(git:374b731)
Loading...
Searching...
No Matches
pw_methods.F
Go to the documentation of this file.
1!--------------------------------------------------------------------------------------------------!
2! CP2K: A general program to perform molecular dynamics simulations !
3! Copyright 2000-2024 CP2K developers group <https://cp2k.org> !
4! !
5! SPDX-License-Identifier: GPL-2.0-or-later !
6!--------------------------------------------------------------------------------------------------!
7
8! **************************************************************************************************
9!> \note
10!> If parallel mode is distributed certain combination of
11!> "in_use" and "in_space" can not be used.
12!> For performance reasons it would be better to have the loops
13!> over g-vectors in the gather/scatter routines in new subprograms
14!> with the actual arrays (also the addressing) in the parameter list
15!> \par History
16!> JGH (29-Dec-2000) : Changes for parallel use
17!> JGH (13-Mar-2001) : added timing calls
18!> JGH (26-Feb-2003) : OpenMP enabled
19!> JGH (17-Nov-2007) : Removed mass arrays
20!> JGH (01-Dec-2007) : Removed and renamed routines
21!> JGH (04-Jul-2019) : added pw_multiply routine
22!> 03.2008 [tlaino] : Splitting pw_types into pw_types and pw_methods
23!> \author apsi
24! **************************************************************************************************
26
27
30 USE fft_tools, ONLY: bwfft, &
31 fwfft, &
32 fft3d
35 USE kinds, ONLY: dp
36 USE machine, ONLY: m_memory
37 USE mathconstants, ONLY: z_zero
38 USE pw_copy_all, ONLY: pw_copy_match
39 USE pw_fpga, ONLY: pw_fpga_c1dr3d_3d_dp, &
44 USE pw_gpu, ONLY: pw_gpu_c1dr3d_3d, &
48 USE pw_grid_types, ONLY: halfspace, &
52 USE pw_types, ONLY: pw_r1d_rs_type
53 USE pw_types, ONLY: pw_r3d_rs_type
54 USE pw_types, ONLY: pw_c1d_rs_type
55 USE pw_types, ONLY: pw_c3d_rs_type
56 USE pw_types, ONLY: pw_r1d_gs_type
57 USE pw_types, ONLY: pw_r3d_gs_type
58 USE pw_types, ONLY: pw_c1d_gs_type
59 USE pw_types, ONLY: pw_c3d_gs_type
60#include "../base/base_uses.f90"
61
62 IMPLICIT NONE
63
64 PRIVATE
65
73 PUBLIC :: pw_set, pw_truncated
74 PUBLIC :: pw_scatter, pw_gather
76
77 CHARACTER(len=*), PARAMETER, PRIVATE :: moduleN = 'pw_methods'
78 LOGICAL, PARAMETER, PRIVATE :: debug_this_module = .false.
79 INTEGER, PARAMETER, PUBLIC :: do_accurate_sum = 0, &
81
82 INTERFACE pw_zero
83 MODULE PROCEDURE pw_zero_r1d_rs
84 MODULE PROCEDURE pw_zero_r3d_rs
85 MODULE PROCEDURE pw_zero_c1d_rs
86 MODULE PROCEDURE pw_zero_c3d_rs
87 MODULE PROCEDURE pw_zero_r1d_gs
88 MODULE PROCEDURE pw_zero_r3d_gs
89 MODULE PROCEDURE pw_zero_c1d_gs
90 MODULE PROCEDURE pw_zero_c3d_gs
91 END INTERFACE
92
93 INTERFACE pw_scale
94 MODULE PROCEDURE pw_scale_r1d_rs
95 MODULE PROCEDURE pw_scale_r3d_rs
96 MODULE PROCEDURE pw_scale_c1d_rs
97 MODULE PROCEDURE pw_scale_c3d_rs
98 MODULE PROCEDURE pw_scale_r1d_gs
99 MODULE PROCEDURE pw_scale_r3d_gs
100 MODULE PROCEDURE pw_scale_c1d_gs
101 MODULE PROCEDURE pw_scale_c3d_gs
102 END INTERFACE
103
104 INTERFACE pw_write
105 MODULE PROCEDURE pw_write_r1d_rs
106 MODULE PROCEDURE pw_write_r3d_rs
107 MODULE PROCEDURE pw_write_c1d_rs
108 MODULE PROCEDURE pw_write_c3d_rs
109 MODULE PROCEDURE pw_write_r1d_gs
110 MODULE PROCEDURE pw_write_r3d_gs
111 MODULE PROCEDURE pw_write_c1d_gs
112 MODULE PROCEDURE pw_write_c3d_gs
113 END INTERFACE
114
116 MODULE PROCEDURE pw_integrate_function_r1d_rs
117 MODULE PROCEDURE pw_integrate_function_r3d_rs
118 MODULE PROCEDURE pw_integrate_function_c1d_rs
119 MODULE PROCEDURE pw_integrate_function_c3d_rs
120 MODULE PROCEDURE pw_integrate_function_r1d_gs
121 MODULE PROCEDURE pw_integrate_function_r3d_gs
122 MODULE PROCEDURE pw_integrate_function_c1d_gs
123 MODULE PROCEDURE pw_integrate_function_c3d_gs
124 END INTERFACE
125
126 INTERFACE pw_set
127 MODULE PROCEDURE pw_set_value_r1d_rs
128 MODULE PROCEDURE pw_zero_r1d_rs
129 MODULE PROCEDURE pw_set_value_r3d_rs
130 MODULE PROCEDURE pw_zero_r3d_rs
131 MODULE PROCEDURE pw_set_value_c1d_rs
132 MODULE PROCEDURE pw_zero_c1d_rs
133 MODULE PROCEDURE pw_set_value_c3d_rs
134 MODULE PROCEDURE pw_zero_c3d_rs
135 MODULE PROCEDURE pw_set_value_r1d_gs
136 MODULE PROCEDURE pw_zero_r1d_gs
137 MODULE PROCEDURE pw_set_value_r3d_gs
138 MODULE PROCEDURE pw_zero_r3d_gs
139 MODULE PROCEDURE pw_set_value_c1d_gs
140 MODULE PROCEDURE pw_zero_c1d_gs
141 MODULE PROCEDURE pw_set_value_c3d_gs
142 MODULE PROCEDURE pw_zero_c3d_gs
143 END INTERFACE
144
145 INTERFACE pw_copy
146 MODULE PROCEDURE pw_copy_r1d_r1d_rs
147 MODULE PROCEDURE pw_copy_r1d_c1d_rs
148 MODULE PROCEDURE pw_copy_r3d_r3d_rs
149 MODULE PROCEDURE pw_copy_r3d_c3d_rs
150 MODULE PROCEDURE pw_copy_c1d_r1d_rs
151 MODULE PROCEDURE pw_copy_c1d_c1d_rs
152 MODULE PROCEDURE pw_copy_c3d_r3d_rs
153 MODULE PROCEDURE pw_copy_c3d_c3d_rs
154 MODULE PROCEDURE pw_copy_r1d_r1d_gs
155 MODULE PROCEDURE pw_copy_r1d_c1d_gs
156 MODULE PROCEDURE pw_copy_r3d_r3d_gs
157 MODULE PROCEDURE pw_copy_r3d_c3d_gs
158 MODULE PROCEDURE pw_copy_c1d_r1d_gs
159 MODULE PROCEDURE pw_copy_c1d_c1d_gs
160 MODULE PROCEDURE pw_copy_c3d_r3d_gs
161 MODULE PROCEDURE pw_copy_c3d_c3d_gs
162 END INTERFACE
163
164 INTERFACE pw_axpy
165 MODULE PROCEDURE pw_axpy_r1d_r1d_rs
166 MODULE PROCEDURE pw_axpy_r1d_c1d_rs
167 MODULE PROCEDURE pw_axpy_r3d_r3d_rs
168 MODULE PROCEDURE pw_axpy_r3d_c3d_rs
169 MODULE PROCEDURE pw_axpy_c1d_r1d_rs
170 MODULE PROCEDURE pw_axpy_c1d_c1d_rs
171 MODULE PROCEDURE pw_axpy_c3d_r3d_rs
172 MODULE PROCEDURE pw_axpy_c3d_c3d_rs
173 MODULE PROCEDURE pw_axpy_r1d_r1d_gs
174 MODULE PROCEDURE pw_axpy_r1d_c1d_gs
175 MODULE PROCEDURE pw_axpy_r3d_r3d_gs
176 MODULE PROCEDURE pw_axpy_r3d_c3d_gs
177 MODULE PROCEDURE pw_axpy_c1d_r1d_gs
178 MODULE PROCEDURE pw_axpy_c1d_c1d_gs
179 MODULE PROCEDURE pw_axpy_c3d_r3d_gs
180 MODULE PROCEDURE pw_axpy_c3d_c3d_gs
181 END INTERFACE
182
183 INTERFACE pw_multiply
184 MODULE PROCEDURE pw_multiply_r1d_r1d_rs
185 MODULE PROCEDURE pw_multiply_r1d_c1d_rs
186 MODULE PROCEDURE pw_multiply_r3d_r3d_rs
187 MODULE PROCEDURE pw_multiply_r3d_c3d_rs
188 MODULE PROCEDURE pw_multiply_c1d_r1d_rs
189 MODULE PROCEDURE pw_multiply_c1d_c1d_rs
190 MODULE PROCEDURE pw_multiply_c3d_r3d_rs
191 MODULE PROCEDURE pw_multiply_c3d_c3d_rs
192 MODULE PROCEDURE pw_multiply_r1d_r1d_gs
193 MODULE PROCEDURE pw_multiply_r1d_c1d_gs
194 MODULE PROCEDURE pw_multiply_r3d_r3d_gs
195 MODULE PROCEDURE pw_multiply_r3d_c3d_gs
196 MODULE PROCEDURE pw_multiply_c1d_r1d_gs
197 MODULE PROCEDURE pw_multiply_c1d_c1d_gs
198 MODULE PROCEDURE pw_multiply_c3d_r3d_gs
199 MODULE PROCEDURE pw_multiply_c3d_c3d_gs
200 END INTERFACE
201
203 MODULE PROCEDURE pw_multiply_with_r1d_r1d_rs
204 MODULE PROCEDURE pw_multiply_with_r1d_c1d_rs
205 MODULE PROCEDURE pw_multiply_with_r3d_r3d_rs
206 MODULE PROCEDURE pw_multiply_with_r3d_c3d_rs
207 MODULE PROCEDURE pw_multiply_with_c1d_r1d_rs
208 MODULE PROCEDURE pw_multiply_with_c1d_c1d_rs
209 MODULE PROCEDURE pw_multiply_with_c3d_r3d_rs
210 MODULE PROCEDURE pw_multiply_with_c3d_c3d_rs
211 MODULE PROCEDURE pw_multiply_with_r1d_r1d_gs
212 MODULE PROCEDURE pw_multiply_with_r1d_c1d_gs
213 MODULE PROCEDURE pw_multiply_with_r3d_r3d_gs
214 MODULE PROCEDURE pw_multiply_with_r3d_c3d_gs
215 MODULE PROCEDURE pw_multiply_with_c1d_r1d_gs
216 MODULE PROCEDURE pw_multiply_with_c1d_c1d_gs
217 MODULE PROCEDURE pw_multiply_with_c3d_r3d_gs
218 MODULE PROCEDURE pw_multiply_with_c3d_c3d_gs
219 END INTERFACE
220
222 MODULE PROCEDURE pw_integral_ab_r1d_r1d_rs
223 MODULE PROCEDURE pw_integral_ab_r1d_c1d_rs
224 MODULE PROCEDURE pw_integral_ab_r3d_r3d_rs
225 MODULE PROCEDURE pw_integral_ab_r3d_c3d_rs
226 MODULE PROCEDURE pw_integral_ab_c1d_r1d_rs
227 MODULE PROCEDURE pw_integral_ab_c1d_c1d_rs
228 MODULE PROCEDURE pw_integral_ab_c3d_r3d_rs
229 MODULE PROCEDURE pw_integral_ab_c3d_c3d_rs
230 MODULE PROCEDURE pw_integral_ab_r1d_r1d_gs
231 MODULE PROCEDURE pw_integral_ab_r1d_c1d_gs
232 MODULE PROCEDURE pw_integral_ab_r3d_r3d_gs
233 MODULE PROCEDURE pw_integral_ab_r3d_c3d_gs
234 MODULE PROCEDURE pw_integral_ab_c1d_r1d_gs
235 MODULE PROCEDURE pw_integral_ab_c1d_c1d_gs
236 MODULE PROCEDURE pw_integral_ab_c3d_r3d_gs
237 MODULE PROCEDURE pw_integral_ab_c3d_c3d_gs
238 END INTERFACE
239
241 MODULE PROCEDURE pw_integral_a2b_r1d_r1d
242 MODULE PROCEDURE pw_integral_a2b_r1d_c1d
243 MODULE PROCEDURE pw_integral_a2b_c1d_r1d
244 MODULE PROCEDURE pw_integral_a2b_c1d_c1d
245 END INTERFACE
246
247 INTERFACE pw_gather
248 MODULE PROCEDURE pw_gather_p_r1d
249 MODULE PROCEDURE pw_gather_p_c1d
250 MODULE PROCEDURE pw_gather_s_r1d_r3d
251 MODULE PROCEDURE pw_gather_s_r1d_c3d
252 MODULE PROCEDURE pw_gather_s_c1d_r3d
253 MODULE PROCEDURE pw_gather_s_c1d_c3d
254 END INTERFACE
255
256 INTERFACE pw_scatter
257 MODULE PROCEDURE pw_scatter_p_r1d
258 MODULE PROCEDURE pw_scatter_p_c1d
259 MODULE PROCEDURE pw_scatter_s_r1d_r3d
260 MODULE PROCEDURE pw_scatter_s_r1d_c3d
261 MODULE PROCEDURE pw_scatter_s_c1d_r3d
262 MODULE PROCEDURE pw_scatter_s_c1d_c3d
263 END INTERFACE
264
266 MODULE PROCEDURE pw_copy_to_array_r1d_r1d_rs
267 MODULE PROCEDURE pw_copy_to_array_r1d_c1d_rs
268 MODULE PROCEDURE pw_copy_to_array_r3d_r3d_rs
269 MODULE PROCEDURE pw_copy_to_array_r3d_c3d_rs
270 MODULE PROCEDURE pw_copy_to_array_c1d_r1d_rs
271 MODULE PROCEDURE pw_copy_to_array_c1d_c1d_rs
272 MODULE PROCEDURE pw_copy_to_array_c3d_r3d_rs
273 MODULE PROCEDURE pw_copy_to_array_c3d_c3d_rs
274 MODULE PROCEDURE pw_copy_to_array_r1d_r1d_gs
275 MODULE PROCEDURE pw_copy_to_array_r1d_c1d_gs
276 MODULE PROCEDURE pw_copy_to_array_r3d_r3d_gs
277 MODULE PROCEDURE pw_copy_to_array_r3d_c3d_gs
278 MODULE PROCEDURE pw_copy_to_array_c1d_r1d_gs
279 MODULE PROCEDURE pw_copy_to_array_c1d_c1d_gs
280 MODULE PROCEDURE pw_copy_to_array_c3d_r3d_gs
281 MODULE PROCEDURE pw_copy_to_array_c3d_c3d_gs
282 END INTERFACE
283
285 MODULE PROCEDURE pw_copy_from_array_r1d_r1d_rs
286 MODULE PROCEDURE pw_copy_from_array_r1d_c1d_rs
287 MODULE PROCEDURE pw_copy_from_array_r3d_r3d_rs
288 MODULE PROCEDURE pw_copy_from_array_r3d_c3d_rs
289 MODULE PROCEDURE pw_copy_from_array_c1d_r1d_rs
290 MODULE PROCEDURE pw_copy_from_array_c1d_c1d_rs
291 MODULE PROCEDURE pw_copy_from_array_c3d_r3d_rs
292 MODULE PROCEDURE pw_copy_from_array_c3d_c3d_rs
293 MODULE PROCEDURE pw_copy_from_array_r1d_r1d_gs
294 MODULE PROCEDURE pw_copy_from_array_r1d_c1d_gs
295 MODULE PROCEDURE pw_copy_from_array_r3d_r3d_gs
296 MODULE PROCEDURE pw_copy_from_array_r3d_c3d_gs
297 MODULE PROCEDURE pw_copy_from_array_c1d_r1d_gs
298 MODULE PROCEDURE pw_copy_from_array_c1d_c1d_gs
299 MODULE PROCEDURE pw_copy_from_array_c3d_r3d_gs
300 MODULE PROCEDURE pw_copy_from_array_c3d_c3d_gs
301 END INTERFACE
302
303 INTERFACE pw_transfer
304 MODULE PROCEDURE pw_copy_r1d_r1d_rs
305 MODULE PROCEDURE pw_copy_r1d_r1d_gs
306 MODULE PROCEDURE pw_gather_s_r1d_r3d_2
307 MODULE PROCEDURE pw_scatter_s_r1d_r3d_2
308 MODULE PROCEDURE pw_copy_r1d_c1d_rs
309 MODULE PROCEDURE pw_copy_r1d_c1d_gs
310 MODULE PROCEDURE pw_gather_s_r1d_c3d_2
311 MODULE PROCEDURE pw_scatter_s_r1d_c3d_2
312 MODULE PROCEDURE pw_copy_r3d_r3d_rs
313 MODULE PROCEDURE pw_copy_r3d_r3d_gs
314 MODULE PROCEDURE fft_wrap_pw1pw2_r3d_c1d_rs_gs
315 MODULE PROCEDURE pw_copy_r3d_c3d_rs
316 MODULE PROCEDURE pw_copy_r3d_c3d_gs
317 MODULE PROCEDURE fft_wrap_pw1pw2_r3d_c3d_rs_gs
318 MODULE PROCEDURE pw_copy_c1d_r1d_rs
319 MODULE PROCEDURE pw_copy_c1d_r1d_gs
320 MODULE PROCEDURE pw_gather_s_c1d_r3d_2
321 MODULE PROCEDURE pw_scatter_s_c1d_r3d_2
322 MODULE PROCEDURE fft_wrap_pw1pw2_c1d_r3d_gs_rs
323 MODULE PROCEDURE pw_copy_c1d_c1d_rs
324 MODULE PROCEDURE pw_copy_c1d_c1d_gs
325 MODULE PROCEDURE pw_gather_s_c1d_c3d_2
326 MODULE PROCEDURE pw_scatter_s_c1d_c3d_2
327 MODULE PROCEDURE fft_wrap_pw1pw2_c1d_c3d_gs_rs
328 MODULE PROCEDURE pw_copy_c3d_r3d_rs
329 MODULE PROCEDURE pw_copy_c3d_r3d_gs
330 MODULE PROCEDURE fft_wrap_pw1pw2_c3d_r3d_gs_rs
331 MODULE PROCEDURE fft_wrap_pw1pw2_c3d_c1d_rs_gs
332 MODULE PROCEDURE pw_copy_c3d_c3d_rs
333 MODULE PROCEDURE pw_copy_c3d_c3d_gs
334 MODULE PROCEDURE fft_wrap_pw1pw2_c3d_c3d_rs_gs
335 MODULE PROCEDURE fft_wrap_pw1pw2_c3d_c3d_gs_rs
336 END INTERFACE
337
338CONTAINS
339! **************************************************************************************************
340!> \brief Set values of a pw type to zero
341!> \param pw ...
342!> \par History
343!> none
344!> \author apsi
345! **************************************************************************************************
346 SUBROUTINE pw_zero_r1d_rs (pw)
347
348 TYPE(pw_r1d_rs_type), INTENT(INOUT) :: pw
349
350 CHARACTER(len=*), PARAMETER :: routineN = 'pw_zero'
351
352 INTEGER :: handle
353
354 CALL timeset(routinen, handle)
355
356!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw)
357 pw%array = 0.0_dp
358!$OMP END PARALLEL WORKSHARE
359
360 CALL timestop(handle)
361
362 END SUBROUTINE pw_zero_r1d_rs
363
364! **************************************************************************************************
365!> \brief multiplies pw coeffs with a number
366!> \param pw ...
367!> \param a ...
368!> \par History
369!> 11.2004 created [Joost VandeVondele]
370! **************************************************************************************************
371 SUBROUTINE pw_scale_r1d_rs (pw, a)
372
373 TYPE(pw_r1d_rs_type), INTENT(INOUT) :: pw
374 REAL(KIND=dp), INTENT(IN) :: a
375
376 CHARACTER(len=*), PARAMETER :: routineN = 'pw_scale'
377
378 INTEGER :: handle
379
380 CALL timeset(routinen, handle)
381
382!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(A, pw)
383 pw%array = a*pw%array
384!$OMP END PARALLEL WORKSHARE
385
386 CALL timestop(handle)
387
388 END SUBROUTINE pw_scale_r1d_rs
389
390! **************************************************************************************************
391!> \brief writes a small description of the actual grid
392!> (change to output the data as cube file, maybe with an
393!> optional long_description arg?)
394!> \param pw the pw data to output
395!> \param unit_nr the unit to output to
396!> \par History
397!> 08.2002 created [fawzi]
398!> \author Fawzi Mohamed
399! **************************************************************************************************
400 SUBROUTINE pw_write_r1d_rs (pw, unit_nr)
401
402 TYPE(pw_r1d_rs_type), INTENT(in) :: pw
403 INTEGER, INTENT(in) :: unit_nr
404
405 INTEGER :: iostatus
406
407 WRITE (unit=unit_nr, fmt="('<pw>:{ ')", iostat=iostatus)
408
409 WRITE (unit=unit_nr, fmt="(a)", iostat=iostatus) " in_use=r1d"
410 IF (ASSOCIATED(pw%array)) THEN
411 WRITE (unit=unit_nr, fmt="(' array=<r(',i8,':',i8,')>')") &
412 lbound(pw%array, 1), ubound(pw%array, 1)
413 ELSE
414 WRITE (unit=unit_nr, fmt="(' array=*null*')")
415 END IF
416
417 END SUBROUTINE pw_write_r1d_rs
418
419! **************************************************************************************************
420!> \brief ...
421!> \param fun ...
422!> \param isign ...
423!> \param oprt ...
424!> \return ...
425! **************************************************************************************************
426 FUNCTION pw_integrate_function_r1d_rs (fun, isign, oprt) RESULT(total_fun)
427
428 TYPE(pw_r1d_rs_type), INTENT(IN) :: fun
429 INTEGER, INTENT(IN), OPTIONAL :: isign
430 CHARACTER(len=*), INTENT(IN), OPTIONAL :: oprt
431 REAL(kind=dp) :: total_fun
432
433 INTEGER :: iop
434
435 iop = 0
436
437 IF (PRESENT(oprt)) THEN
438 SELECT CASE (oprt)
439 CASE ("ABS", "abs")
440 iop = 1
441 CASE DEFAULT
442 cpabort("Unknown operator")
443 END SELECT
444 END IF
445
446 total_fun = 0.0_dp
447
448 ! do reduction using maximum accuracy
449 IF (iop == 1) THEN
450 total_fun = fun%pw_grid%dvol*accurate_sum(abs(fun%array))
451 ELSE
452 total_fun = fun%pw_grid%dvol*accurate_sum( fun%array)
453 END IF
454
455 IF (fun%pw_grid%para%mode /= pw_mode_local) THEN
456 CALL fun%pw_grid%para%group%sum(total_fun)
457 END IF
458
459 IF (PRESENT(isign)) THEN
460 total_fun = total_fun*sign(1._dp, real(isign, dp))
461 END IF
462
463 END FUNCTION pw_integrate_function_r1d_rs
464
465! **************************************************************************************************
466!> \brief ...
467!> \param pw ...
468!> \param value ...
469! **************************************************************************************************
470 SUBROUTINE pw_set_value_r1d_rs (pw, value)
471 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw
472 REAL(KIND=dp), INTENT(IN) :: value
473
474 CHARACTER(len=*), PARAMETER :: routineN = 'pw_set_value'
475
476 INTEGER :: handle
477
478 CALL timeset(routinen, handle)
479
480!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw,value)
481 pw%array = value
482!$OMP END PARALLEL WORKSHARE
483
484 CALL timestop(handle)
485
486 END SUBROUTINE pw_set_value_r1d_rs
487! **************************************************************************************************
488!> \brief Set values of a pw type to zero
489!> \param pw ...
490!> \par History
491!> none
492!> \author apsi
493! **************************************************************************************************
494 SUBROUTINE pw_zero_r1d_gs (pw)
495
496 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw
497
498 CHARACTER(len=*), PARAMETER :: routineN = 'pw_zero'
499
500 INTEGER :: handle
501
502 CALL timeset(routinen, handle)
503
504!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw)
505 pw%array = 0.0_dp
506!$OMP END PARALLEL WORKSHARE
507
508 CALL timestop(handle)
509
510 END SUBROUTINE pw_zero_r1d_gs
511
512! **************************************************************************************************
513!> \brief multiplies pw coeffs with a number
514!> \param pw ...
515!> \param a ...
516!> \par History
517!> 11.2004 created [Joost VandeVondele]
518! **************************************************************************************************
519 SUBROUTINE pw_scale_r1d_gs (pw, a)
520
521 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw
522 REAL(KIND=dp), INTENT(IN) :: a
523
524 CHARACTER(len=*), PARAMETER :: routineN = 'pw_scale'
525
526 INTEGER :: handle
527
528 CALL timeset(routinen, handle)
529
530!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(A, pw)
531 pw%array = a*pw%array
532!$OMP END PARALLEL WORKSHARE
533
534 CALL timestop(handle)
535
536 END SUBROUTINE pw_scale_r1d_gs
537
538! **************************************************************************************************
539!> \brief writes a small description of the actual grid
540!> (change to output the data as cube file, maybe with an
541!> optional long_description arg?)
542!> \param pw the pw data to output
543!> \param unit_nr the unit to output to
544!> \par History
545!> 08.2002 created [fawzi]
546!> \author Fawzi Mohamed
547! **************************************************************************************************
548 SUBROUTINE pw_write_r1d_gs (pw, unit_nr)
549
550 TYPE(pw_r1d_gs_type), INTENT(in) :: pw
551 INTEGER, INTENT(in) :: unit_nr
552
553 INTEGER :: iostatus
554
555 WRITE (unit=unit_nr, fmt="('<pw>:{ ')", iostat=iostatus)
556
557 WRITE (unit=unit_nr, fmt="(a)", iostat=iostatus) " in_use=r1d"
558 IF (ASSOCIATED(pw%array)) THEN
559 WRITE (unit=unit_nr, fmt="(' array=<r(',i8,':',i8,')>')") &
560 lbound(pw%array, 1), ubound(pw%array, 1)
561 ELSE
562 WRITE (unit=unit_nr, fmt="(' array=*null*')")
563 END IF
564
565 END SUBROUTINE pw_write_r1d_gs
566
567! **************************************************************************************************
568!> \brief ...
569!> \param fun ...
570!> \param isign ...
571!> \param oprt ...
572!> \return ...
573! **************************************************************************************************
574 FUNCTION pw_integrate_function_r1d_gs (fun, isign, oprt) RESULT(total_fun)
575
576 TYPE(pw_r1d_gs_type), INTENT(IN) :: fun
577 INTEGER, INTENT(IN), OPTIONAL :: isign
578 CHARACTER(len=*), INTENT(IN), OPTIONAL :: oprt
579 REAL(kind=dp) :: total_fun
580
581 INTEGER :: iop
582
583 iop = 0
584
585 IF (PRESENT(oprt)) THEN
586 SELECT CASE (oprt)
587 CASE ("ABS", "abs")
588 iop = 1
589 CASE DEFAULT
590 cpabort("Unknown operator")
591 END SELECT
592 END IF
593
594 total_fun = 0.0_dp
595
596 IF (iop == 1) &
597 cpabort("Operator ABS not implemented")
598 IF (fun%pw_grid%have_g0) total_fun = fun%pw_grid%vol* fun%array(1)
599
600 IF (fun%pw_grid%para%mode /= pw_mode_local) THEN
601 CALL fun%pw_grid%para%group%sum(total_fun)
602 END IF
603
604 IF (PRESENT(isign)) THEN
605 total_fun = total_fun*sign(1._dp, real(isign, dp))
606 END IF
607
608 END FUNCTION pw_integrate_function_r1d_gs
609
610! **************************************************************************************************
611!> \brief ...
612!> \param pw ...
613!> \param value ...
614! **************************************************************************************************
615 SUBROUTINE pw_set_value_r1d_gs (pw, value)
616 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw
617 REAL(KIND=dp), INTENT(IN) :: value
618
619 CHARACTER(len=*), PARAMETER :: routineN = 'pw_set_value'
620
621 INTEGER :: handle
622
623 CALL timeset(routinen, handle)
624
625!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw,value)
626 pw%array = value
627!$OMP END PARALLEL WORKSHARE
628
629 CALL timestop(handle)
630
631 END SUBROUTINE pw_set_value_r1d_gs
632
633! **************************************************************************************************
634!> \brief ...
635!> \param pw ...
636!> \param c ...
637!> \param scale ...
638! **************************************************************************************************
639 SUBROUTINE pw_gather_p_r1d (pw, c, scale)
640
641 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw
642 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, INTENT(IN) :: c
643 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
644
645 CHARACTER(len=*), PARAMETER :: routineN = 'pw_gather_p'
646
647 INTEGER :: gpt, handle, l, m, mn, n
648
649 CALL timeset(routinen, handle)
650
651 IF (pw%pw_grid%para%mode /= pw_mode_distributed) THEN
652 cpabort("This grid type is not distributed")
653 END IF
654
655 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
656 ngpts => SIZE(pw%pw_grid%gsq), ghat => pw%pw_grid%g_hat, yzq => pw%pw_grid%para%yzq)
657
658 IF (PRESENT(scale)) THEN
659!$OMP PARALLEL DO DEFAULT(NONE), &
660!$OMP PRIVATE(l, m, mn, n), &
661!$OMP SHARED(c, pw, scale)
662 DO gpt = 1, ngpts
663 l = mapl(ghat(1, gpt)) + 1
664 m = mapm(ghat(2, gpt)) + 1
665 n = mapn(ghat(3, gpt)) + 1
666 mn = yzq(m, n)
667 pw%array(gpt) = scale* real(c(l, mn), kind=dp)
668 END DO
669!$OMP END PARALLEL DO
670 ELSE
671!$OMP PARALLEL DO DEFAULT(NONE), &
672!$OMP PRIVATE(l, m, mn, n), &
673!$OMP SHARED(c, pw)
674 DO gpt = 1, ngpts
675 l = mapl(ghat(1, gpt)) + 1
676 m = mapm(ghat(2, gpt)) + 1
677 n = mapn(ghat(3, gpt)) + 1
678 mn = yzq(m, n)
679 pw%array(gpt) = real(c(l, mn), kind=dp)
680 END DO
681!$OMP END PARALLEL DO
682 END IF
683
684 END associate
685
686 CALL timestop(handle)
687
688 END SUBROUTINE pw_gather_p_r1d
689
690! **************************************************************************************************
691!> \brief ...
692!> \param pw ...
693!> \param c ...
694!> \param scale ...
695! **************************************************************************************************
696 SUBROUTINE pw_scatter_p_r1d (pw, c, scale)
697 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw
698 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, INTENT(INOUT) :: c
699 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
700
701 CHARACTER(len=*), PARAMETER :: routineN = 'pw_scatter_p'
702
703 INTEGER :: gpt, handle, l, m, mn, n
704
705 CALL timeset(routinen, handle)
706
707 IF (pw%pw_grid%para%mode /= pw_mode_distributed) THEN
708 cpabort("This grid type is not distributed")
709 END IF
710
711 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
712 ghat => pw%pw_grid%g_hat, yzq => pw%pw_grid%para%yzq, ngpts => SIZE(pw%pw_grid%gsq))
713
714 IF (.NOT. PRESENT(scale)) c = z_zero
715
716 IF (PRESENT(scale)) THEN
717!$OMP PARALLEL DO DEFAULT(NONE), &
718!$OMP PRIVATE(l, m, mn, n), &
719!$OMP SHARED(c, pw, scale)
720 DO gpt = 1, ngpts
721 l = mapl(ghat(1, gpt)) + 1
722 m = mapm(ghat(2, gpt)) + 1
723 n = mapn(ghat(3, gpt)) + 1
724 mn = yzq(m, n)
725 c(l, mn) = cmplx(scale*pw%array(gpt), 0.0_dp, kind=dp)
726 END DO
727!$OMP END PARALLEL DO
728 ELSE
729!$OMP PARALLEL DO DEFAULT(NONE), &
730!$OMP PRIVATE(l, m, mn, n), &
731!$OMP SHARED(c, pw)
732 DO gpt = 1, ngpts
733 l = mapl(ghat(1, gpt)) + 1
734 m = mapm(ghat(2, gpt)) + 1
735 n = mapn(ghat(3, gpt)) + 1
736 mn = yzq(m, n)
737 c(l, mn) = cmplx(pw%array(gpt), 0.0_dp, kind=dp)
738 END DO
739!$OMP END PARALLEL DO
740 END IF
741
742 END associate
743
744 IF (pw%pw_grid%grid_span == halfspace) THEN
745
746 associate(mapm => pw%pw_grid%mapm%neg, mapn => pw%pw_grid%mapn%neg, mapl => pw%pw_grid%mapl%neg, &
747 ghat => pw%pw_grid%g_hat, ngpts => SIZE(pw%pw_grid%gsq), yzq => pw%pw_grid%para%yzq)
748
749 IF (PRESENT(scale)) THEN
750!$OMP PARALLEL DO DEFAULT(NONE), &
751!$OMP PRIVATE(l, m, mn, n), &
752!$OMP SHARED(c, pw, scale)
753 DO gpt = 1, ngpts
754 l = mapl(ghat(1, gpt)) + 1
755 m = mapm(ghat(2, gpt)) + 1
756 n = mapn(ghat(3, gpt)) + 1
757 mn = yzq(m, n)
758 c(l, mn) = scale*( cmplx(pw%array(gpt), 0.0_dp, kind=dp))
759 END DO
760!$OMP END PARALLEL DO
761 ELSE
762!$OMP PARALLEL DO DEFAULT(NONE), &
763!$OMP PRIVATE(l, m, mn, n) &
764!$OMP SHARED(c, pw)
765 DO gpt = 1, ngpts
766 l = mapl(ghat(1, gpt)) + 1
767 m = mapm(ghat(2, gpt)) + 1
768 n = mapn(ghat(3, gpt)) + 1
769 mn = yzq(m, n)
770 c(l, mn) = ( cmplx(pw%array(gpt), 0.0_dp, kind=dp))
771 END DO
772!$OMP END PARALLEL DO
773 END IF
774 END associate
775 END IF
776
777 CALL timestop(handle)
778
779 END SUBROUTINE pw_scatter_p_r1d
780! **************************************************************************************************
781!> \brief Set values of a pw type to zero
782!> \param pw ...
783!> \par History
784!> none
785!> \author apsi
786! **************************************************************************************************
787 SUBROUTINE pw_zero_r3d_rs (pw)
788
789 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw
790
791 CHARACTER(len=*), PARAMETER :: routineN = 'pw_zero'
792
793 INTEGER :: handle
794
795 CALL timeset(routinen, handle)
796
797!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw)
798 pw%array = 0.0_dp
799!$OMP END PARALLEL WORKSHARE
800
801 CALL timestop(handle)
802
803 END SUBROUTINE pw_zero_r3d_rs
804
805! **************************************************************************************************
806!> \brief multiplies pw coeffs with a number
807!> \param pw ...
808!> \param a ...
809!> \par History
810!> 11.2004 created [Joost VandeVondele]
811! **************************************************************************************************
812 SUBROUTINE pw_scale_r3d_rs (pw, a)
813
814 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw
815 REAL(KIND=dp), INTENT(IN) :: a
816
817 CHARACTER(len=*), PARAMETER :: routineN = 'pw_scale'
818
819 INTEGER :: handle
820
821 CALL timeset(routinen, handle)
822
823!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(A, pw)
824 pw%array = a*pw%array
825!$OMP END PARALLEL WORKSHARE
826
827 CALL timestop(handle)
828
829 END SUBROUTINE pw_scale_r3d_rs
830
831! **************************************************************************************************
832!> \brief writes a small description of the actual grid
833!> (change to output the data as cube file, maybe with an
834!> optional long_description arg?)
835!> \param pw the pw data to output
836!> \param unit_nr the unit to output to
837!> \par History
838!> 08.2002 created [fawzi]
839!> \author Fawzi Mohamed
840! **************************************************************************************************
841 SUBROUTINE pw_write_r3d_rs (pw, unit_nr)
842
843 TYPE(pw_r3d_rs_type), INTENT(in) :: pw
844 INTEGER, INTENT(in) :: unit_nr
845
846 INTEGER :: iostatus
847
848 WRITE (unit=unit_nr, fmt="('<pw>:{ ')", iostat=iostatus)
849
850 WRITE (unit=unit_nr, fmt="(a)", iostat=iostatus) " in_use=r3d"
851 IF (ASSOCIATED(pw%array)) THEN
852 WRITE (unit=unit_nr, fmt="(' array=<r(',i8,':',i8,',',i8,':',i8,',',i8,':',i8,')>')") &
853 lbound(pw%array, 1), ubound(pw%array, 1), lbound(pw%array, 2), ubound(pw%array, 2), &
854 lbound(pw%array, 3), ubound(pw%array, 3)
855 ELSE
856 WRITE (unit=unit_nr, fmt="(' array=*null*')")
857 END IF
858
859 END SUBROUTINE pw_write_r3d_rs
860
861! **************************************************************************************************
862!> \brief ...
863!> \param fun ...
864!> \param isign ...
865!> \param oprt ...
866!> \return ...
867! **************************************************************************************************
868 FUNCTION pw_integrate_function_r3d_rs (fun, isign, oprt) RESULT(total_fun)
869
870 TYPE(pw_r3d_rs_type), INTENT(IN) :: fun
871 INTEGER, INTENT(IN), OPTIONAL :: isign
872 CHARACTER(len=*), INTENT(IN), OPTIONAL :: oprt
873 REAL(kind=dp) :: total_fun
874
875 INTEGER :: iop
876
877 iop = 0
878
879 IF (PRESENT(oprt)) THEN
880 SELECT CASE (oprt)
881 CASE ("ABS", "abs")
882 iop = 1
883 CASE DEFAULT
884 cpabort("Unknown operator")
885 END SELECT
886 END IF
887
888 total_fun = 0.0_dp
889
890 ! do reduction using maximum accuracy
891 IF (iop == 1) THEN
892 total_fun = fun%pw_grid%dvol*accurate_sum(abs(fun%array))
893 ELSE
894 total_fun = fun%pw_grid%dvol*accurate_sum( fun%array)
895 END IF
896
897 IF (fun%pw_grid%para%mode /= pw_mode_local) THEN
898 CALL fun%pw_grid%para%group%sum(total_fun)
899 END IF
900
901 IF (PRESENT(isign)) THEN
902 total_fun = total_fun*sign(1._dp, real(isign, dp))
903 END IF
904
905 END FUNCTION pw_integrate_function_r3d_rs
906
907! **************************************************************************************************
908!> \brief ...
909!> \param pw ...
910!> \param value ...
911! **************************************************************************************************
912 SUBROUTINE pw_set_value_r3d_rs (pw, value)
913 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw
914 REAL(KIND=dp), INTENT(IN) :: value
915
916 CHARACTER(len=*), PARAMETER :: routineN = 'pw_set_value'
917
918 INTEGER :: handle
919
920 CALL timeset(routinen, handle)
921
922!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw,value)
923 pw%array = value
924!$OMP END PARALLEL WORKSHARE
925
926 CALL timestop(handle)
927
928 END SUBROUTINE pw_set_value_r3d_rs
929! **************************************************************************************************
930!> \brief Set values of a pw type to zero
931!> \param pw ...
932!> \par History
933!> none
934!> \author apsi
935! **************************************************************************************************
936 SUBROUTINE pw_zero_r3d_gs (pw)
937
938 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw
939
940 CHARACTER(len=*), PARAMETER :: routineN = 'pw_zero'
941
942 INTEGER :: handle
943
944 CALL timeset(routinen, handle)
945
946!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw)
947 pw%array = 0.0_dp
948!$OMP END PARALLEL WORKSHARE
949
950 CALL timestop(handle)
951
952 END SUBROUTINE pw_zero_r3d_gs
953
954! **************************************************************************************************
955!> \brief multiplies pw coeffs with a number
956!> \param pw ...
957!> \param a ...
958!> \par History
959!> 11.2004 created [Joost VandeVondele]
960! **************************************************************************************************
961 SUBROUTINE pw_scale_r3d_gs (pw, a)
962
963 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw
964 REAL(KIND=dp), INTENT(IN) :: a
965
966 CHARACTER(len=*), PARAMETER :: routineN = 'pw_scale'
967
968 INTEGER :: handle
969
970 CALL timeset(routinen, handle)
971
972!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(A, pw)
973 pw%array = a*pw%array
974!$OMP END PARALLEL WORKSHARE
975
976 CALL timestop(handle)
977
978 END SUBROUTINE pw_scale_r3d_gs
979
980! **************************************************************************************************
981!> \brief writes a small description of the actual grid
982!> (change to output the data as cube file, maybe with an
983!> optional long_description arg?)
984!> \param pw the pw data to output
985!> \param unit_nr the unit to output to
986!> \par History
987!> 08.2002 created [fawzi]
988!> \author Fawzi Mohamed
989! **************************************************************************************************
990 SUBROUTINE pw_write_r3d_gs (pw, unit_nr)
991
992 TYPE(pw_r3d_gs_type), INTENT(in) :: pw
993 INTEGER, INTENT(in) :: unit_nr
994
995 INTEGER :: iostatus
996
997 WRITE (unit=unit_nr, fmt="('<pw>:{ ')", iostat=iostatus)
998
999 WRITE (unit=unit_nr, fmt="(a)", iostat=iostatus) " in_use=r3d"
1000 IF (ASSOCIATED(pw%array)) THEN
1001 WRITE (unit=unit_nr, fmt="(' array=<r(',i8,':',i8,',',i8,':',i8,',',i8,':',i8,')>')") &
1002 lbound(pw%array, 1), ubound(pw%array, 1), lbound(pw%array, 2), ubound(pw%array, 2), &
1003 lbound(pw%array, 3), ubound(pw%array, 3)
1004 ELSE
1005 WRITE (unit=unit_nr, fmt="(' array=*null*')")
1006 END IF
1007
1008 END SUBROUTINE pw_write_r3d_gs
1009
1010! **************************************************************************************************
1011!> \brief ...
1012!> \param fun ...
1013!> \param isign ...
1014!> \param oprt ...
1015!> \return ...
1016! **************************************************************************************************
1017 FUNCTION pw_integrate_function_r3d_gs (fun, isign, oprt) RESULT(total_fun)
1018
1019 TYPE(pw_r3d_gs_type), INTENT(IN) :: fun
1020 INTEGER, INTENT(IN), OPTIONAL :: isign
1021 CHARACTER(len=*), INTENT(IN), OPTIONAL :: oprt
1022 REAL(kind=dp) :: total_fun
1023
1024 INTEGER :: iop
1025
1026 iop = 0
1027
1028 IF (PRESENT(oprt)) THEN
1029 SELECT CASE (oprt)
1030 CASE ("ABS", "abs")
1031 iop = 1
1032 CASE DEFAULT
1033 cpabort("Unknown operator")
1034 END SELECT
1035 END IF
1036
1037 total_fun = 0.0_dp
1038
1039 IF (iop == 1) &
1040 cpabort("Operator ABS not implemented")
1041 cpabort("Reciprocal space integration for 3D grids not implemented!")
1042
1043 IF (fun%pw_grid%para%mode /= pw_mode_local) THEN
1044 CALL fun%pw_grid%para%group%sum(total_fun)
1045 END IF
1046
1047 IF (PRESENT(isign)) THEN
1048 total_fun = total_fun*sign(1._dp, real(isign, dp))
1049 END IF
1050
1051 END FUNCTION pw_integrate_function_r3d_gs
1052
1053! **************************************************************************************************
1054!> \brief ...
1055!> \param pw ...
1056!> \param value ...
1057! **************************************************************************************************
1058 SUBROUTINE pw_set_value_r3d_gs (pw, value)
1059 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw
1060 REAL(KIND=dp), INTENT(IN) :: value
1061
1062 CHARACTER(len=*), PARAMETER :: routineN = 'pw_set_value'
1063
1064 INTEGER :: handle
1065
1066 CALL timeset(routinen, handle)
1067
1068!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw,value)
1069 pw%array = value
1070!$OMP END PARALLEL WORKSHARE
1071
1072 CALL timestop(handle)
1073
1074 END SUBROUTINE pw_set_value_r3d_gs
1075
1076! **************************************************************************************************
1077!> \brief Set values of a pw type to zero
1078!> \param pw ...
1079!> \par History
1080!> none
1081!> \author apsi
1082! **************************************************************************************************
1083 SUBROUTINE pw_zero_c1d_rs (pw)
1084
1085 TYPE(pw_c1d_rs_type), INTENT(INOUT) :: pw
1086
1087 CHARACTER(len=*), PARAMETER :: routineN = 'pw_zero'
1088
1089 INTEGER :: handle
1090
1091 CALL timeset(routinen, handle)
1092
1093!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw)
1094 pw%array = cmplx(0.0_dp, 0.0_dp, kind=dp)
1095!$OMP END PARALLEL WORKSHARE
1096
1097 CALL timestop(handle)
1098
1099 END SUBROUTINE pw_zero_c1d_rs
1100
1101! **************************************************************************************************
1102!> \brief multiplies pw coeffs with a number
1103!> \param pw ...
1104!> \param a ...
1105!> \par History
1106!> 11.2004 created [Joost VandeVondele]
1107! **************************************************************************************************
1108 SUBROUTINE pw_scale_c1d_rs (pw, a)
1109
1110 TYPE(pw_c1d_rs_type), INTENT(INOUT) :: pw
1111 REAL(KIND=dp), INTENT(IN) :: a
1112
1113 CHARACTER(len=*), PARAMETER :: routineN = 'pw_scale'
1114
1115 INTEGER :: handle
1116
1117 CALL timeset(routinen, handle)
1118
1119!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(A, pw)
1120 pw%array = a*pw%array
1121!$OMP END PARALLEL WORKSHARE
1122
1123 CALL timestop(handle)
1124
1125 END SUBROUTINE pw_scale_c1d_rs
1126
1127! **************************************************************************************************
1128!> \brief writes a small description of the actual grid
1129!> (change to output the data as cube file, maybe with an
1130!> optional long_description arg?)
1131!> \param pw the pw data to output
1132!> \param unit_nr the unit to output to
1133!> \par History
1134!> 08.2002 created [fawzi]
1135!> \author Fawzi Mohamed
1136! **************************************************************************************************
1137 SUBROUTINE pw_write_c1d_rs (pw, unit_nr)
1138
1139 TYPE(pw_c1d_rs_type), INTENT(in) :: pw
1140 INTEGER, INTENT(in) :: unit_nr
1141
1142 INTEGER :: iostatus
1143
1144 WRITE (unit=unit_nr, fmt="('<pw>:{ ')", iostat=iostatus)
1145
1146 WRITE (unit=unit_nr, fmt="(a)", iostat=iostatus) " in_use=c1d"
1147 IF (ASSOCIATED(pw%array)) THEN
1148 WRITE (unit=unit_nr, fmt="(' array=<c(',i8,':',i8,')>')") &
1149 lbound(pw%array, 1), ubound(pw%array, 1)
1150 ELSE
1151 WRITE (unit=unit_nr, fmt="(' array=*null*')")
1152 END IF
1153
1154 END SUBROUTINE pw_write_c1d_rs
1155
1156! **************************************************************************************************
1157!> \brief ...
1158!> \param fun ...
1159!> \param isign ...
1160!> \param oprt ...
1161!> \return ...
1162! **************************************************************************************************
1163 FUNCTION pw_integrate_function_c1d_rs (fun, isign, oprt) RESULT(total_fun)
1164
1165 TYPE(pw_c1d_rs_type), INTENT(IN) :: fun
1166 INTEGER, INTENT(IN), OPTIONAL :: isign
1167 CHARACTER(len=*), INTENT(IN), OPTIONAL :: oprt
1168 REAL(kind=dp) :: total_fun
1169
1170 INTEGER :: iop
1171
1172 iop = 0
1173
1174 IF (PRESENT(oprt)) THEN
1175 SELECT CASE (oprt)
1176 CASE ("ABS", "abs")
1177 iop = 1
1178 CASE DEFAULT
1179 cpabort("Unknown operator")
1180 END SELECT
1181 END IF
1182
1183 total_fun = 0.0_dp
1184
1185 ! do reduction using maximum accuracy
1186 IF (iop == 1) THEN
1187 total_fun = fun%pw_grid%dvol*accurate_sum(abs(fun%array))
1188 ELSE
1189 total_fun = fun%pw_grid%dvol*accurate_sum( real(fun%array, kind=dp))
1190 END IF
1191
1192 IF (fun%pw_grid%para%mode /= pw_mode_local) THEN
1193 CALL fun%pw_grid%para%group%sum(total_fun)
1194 END IF
1195
1196 IF (PRESENT(isign)) THEN
1197 total_fun = total_fun*sign(1._dp, real(isign, dp))
1198 END IF
1199
1200 END FUNCTION pw_integrate_function_c1d_rs
1201
1202! **************************************************************************************************
1203!> \brief ...
1204!> \param pw ...
1205!> \param value ...
1206! **************************************************************************************************
1207 SUBROUTINE pw_set_value_c1d_rs (pw, value)
1208 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw
1209 REAL(KIND=dp), INTENT(IN) :: value
1210
1211 CHARACTER(len=*), PARAMETER :: routineN = 'pw_set_value'
1212
1213 INTEGER :: handle
1214
1215 CALL timeset(routinen, handle)
1216
1217!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw,value)
1218 pw%array = cmplx(value, 0.0_dp, kind=dp)
1219!$OMP END PARALLEL WORKSHARE
1220
1221 CALL timestop(handle)
1222
1223 END SUBROUTINE pw_set_value_c1d_rs
1224! **************************************************************************************************
1225!> \brief Set values of a pw type to zero
1226!> \param pw ...
1227!> \par History
1228!> none
1229!> \author apsi
1230! **************************************************************************************************
1231 SUBROUTINE pw_zero_c1d_gs (pw)
1232
1233 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
1234
1235 CHARACTER(len=*), PARAMETER :: routineN = 'pw_zero'
1236
1237 INTEGER :: handle
1238
1239 CALL timeset(routinen, handle)
1240
1241!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw)
1242 pw%array = cmplx(0.0_dp, 0.0_dp, kind=dp)
1243!$OMP END PARALLEL WORKSHARE
1244
1245 CALL timestop(handle)
1246
1247 END SUBROUTINE pw_zero_c1d_gs
1248
1249! **************************************************************************************************
1250!> \brief multiplies pw coeffs with a number
1251!> \param pw ...
1252!> \param a ...
1253!> \par History
1254!> 11.2004 created [Joost VandeVondele]
1255! **************************************************************************************************
1256 SUBROUTINE pw_scale_c1d_gs (pw, a)
1257
1258 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
1259 REAL(KIND=dp), INTENT(IN) :: a
1260
1261 CHARACTER(len=*), PARAMETER :: routineN = 'pw_scale'
1262
1263 INTEGER :: handle
1264
1265 CALL timeset(routinen, handle)
1266
1267!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(A, pw)
1268 pw%array = a*pw%array
1269!$OMP END PARALLEL WORKSHARE
1270
1271 CALL timestop(handle)
1272
1273 END SUBROUTINE pw_scale_c1d_gs
1274
1275! **************************************************************************************************
1276!> \brief writes a small description of the actual grid
1277!> (change to output the data as cube file, maybe with an
1278!> optional long_description arg?)
1279!> \param pw the pw data to output
1280!> \param unit_nr the unit to output to
1281!> \par History
1282!> 08.2002 created [fawzi]
1283!> \author Fawzi Mohamed
1284! **************************************************************************************************
1285 SUBROUTINE pw_write_c1d_gs (pw, unit_nr)
1286
1287 TYPE(pw_c1d_gs_type), INTENT(in) :: pw
1288 INTEGER, INTENT(in) :: unit_nr
1289
1290 INTEGER :: iostatus
1291
1292 WRITE (unit=unit_nr, fmt="('<pw>:{ ')", iostat=iostatus)
1293
1294 WRITE (unit=unit_nr, fmt="(a)", iostat=iostatus) " in_use=c1d"
1295 IF (ASSOCIATED(pw%array)) THEN
1296 WRITE (unit=unit_nr, fmt="(' array=<c(',i8,':',i8,')>')") &
1297 lbound(pw%array, 1), ubound(pw%array, 1)
1298 ELSE
1299 WRITE (unit=unit_nr, fmt="(' array=*null*')")
1300 END IF
1301
1302 END SUBROUTINE pw_write_c1d_gs
1303
1304! **************************************************************************************************
1305!> \brief ...
1306!> \param fun ...
1307!> \param isign ...
1308!> \param oprt ...
1309!> \return ...
1310! **************************************************************************************************
1311 FUNCTION pw_integrate_function_c1d_gs (fun, isign, oprt) RESULT(total_fun)
1312
1313 TYPE(pw_c1d_gs_type), INTENT(IN) :: fun
1314 INTEGER, INTENT(IN), OPTIONAL :: isign
1315 CHARACTER(len=*), INTENT(IN), OPTIONAL :: oprt
1316 REAL(kind=dp) :: total_fun
1317
1318 INTEGER :: iop
1319
1320 iop = 0
1321
1322 IF (PRESENT(oprt)) THEN
1323 SELECT CASE (oprt)
1324 CASE ("ABS", "abs")
1325 iop = 1
1326 CASE DEFAULT
1327 cpabort("Unknown operator")
1328 END SELECT
1329 END IF
1330
1331 total_fun = 0.0_dp
1332
1333 IF (iop == 1) &
1334 cpabort("Operator ABS not implemented")
1335 IF (fun%pw_grid%have_g0) total_fun = fun%pw_grid%vol* real(fun%array(1), kind=dp)
1336
1337 IF (fun%pw_grid%para%mode /= pw_mode_local) THEN
1338 CALL fun%pw_grid%para%group%sum(total_fun)
1339 END IF
1340
1341 IF (PRESENT(isign)) THEN
1342 total_fun = total_fun*sign(1._dp, real(isign, dp))
1343 END IF
1344
1345 END FUNCTION pw_integrate_function_c1d_gs
1346
1347! **************************************************************************************************
1348!> \brief ...
1349!> \param pw ...
1350!> \param value ...
1351! **************************************************************************************************
1352 SUBROUTINE pw_set_value_c1d_gs (pw, value)
1353 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
1354 REAL(KIND=dp), INTENT(IN) :: value
1355
1356 CHARACTER(len=*), PARAMETER :: routineN = 'pw_set_value'
1357
1358 INTEGER :: handle
1359
1360 CALL timeset(routinen, handle)
1361
1362!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw,value)
1363 pw%array = cmplx(value, 0.0_dp, kind=dp)
1364!$OMP END PARALLEL WORKSHARE
1365
1366 CALL timestop(handle)
1367
1368 END SUBROUTINE pw_set_value_c1d_gs
1369
1370! **************************************************************************************************
1371!> \brief ...
1372!> \param pw ...
1373!> \param c ...
1374!> \param scale ...
1375! **************************************************************************************************
1376 SUBROUTINE pw_gather_p_c1d (pw, c, scale)
1377
1378 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
1379 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, INTENT(IN) :: c
1380 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
1381
1382 CHARACTER(len=*), PARAMETER :: routineN = 'pw_gather_p'
1383
1384 INTEGER :: gpt, handle, l, m, mn, n
1385
1386 CALL timeset(routinen, handle)
1387
1388 IF (pw%pw_grid%para%mode /= pw_mode_distributed) THEN
1389 cpabort("This grid type is not distributed")
1390 END IF
1391
1392 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
1393 ngpts => SIZE(pw%pw_grid%gsq), ghat => pw%pw_grid%g_hat, yzq => pw%pw_grid%para%yzq)
1394
1395 IF (PRESENT(scale)) THEN
1396!$OMP PARALLEL DO DEFAULT(NONE), &
1397!$OMP PRIVATE(l, m, mn, n), &
1398!$OMP SHARED(c, pw, scale)
1399 DO gpt = 1, ngpts
1400 l = mapl(ghat(1, gpt)) + 1
1401 m = mapm(ghat(2, gpt)) + 1
1402 n = mapn(ghat(3, gpt)) + 1
1403 mn = yzq(m, n)
1404 pw%array(gpt) = scale* c(l, mn)
1405 END DO
1406!$OMP END PARALLEL DO
1407 ELSE
1408!$OMP PARALLEL DO DEFAULT(NONE), &
1409!$OMP PRIVATE(l, m, mn, n), &
1410!$OMP SHARED(c, pw)
1411 DO gpt = 1, ngpts
1412 l = mapl(ghat(1, gpt)) + 1
1413 m = mapm(ghat(2, gpt)) + 1
1414 n = mapn(ghat(3, gpt)) + 1
1415 mn = yzq(m, n)
1416 pw%array(gpt) = c(l, mn)
1417 END DO
1418!$OMP END PARALLEL DO
1419 END IF
1420
1421 END associate
1422
1423 CALL timestop(handle)
1424
1425 END SUBROUTINE pw_gather_p_c1d
1426
1427! **************************************************************************************************
1428!> \brief ...
1429!> \param pw ...
1430!> \param c ...
1431!> \param scale ...
1432! **************************************************************************************************
1433 SUBROUTINE pw_scatter_p_c1d (pw, c, scale)
1434 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
1435 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, INTENT(INOUT) :: c
1436 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
1437
1438 CHARACTER(len=*), PARAMETER :: routineN = 'pw_scatter_p'
1439
1440 INTEGER :: gpt, handle, l, m, mn, n
1441
1442 CALL timeset(routinen, handle)
1443
1444 IF (pw%pw_grid%para%mode /= pw_mode_distributed) THEN
1445 cpabort("This grid type is not distributed")
1446 END IF
1447
1448 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
1449 ghat => pw%pw_grid%g_hat, yzq => pw%pw_grid%para%yzq, ngpts => SIZE(pw%pw_grid%gsq))
1450
1451 IF (.NOT. PRESENT(scale)) c = z_zero
1452
1453 IF (PRESENT(scale)) THEN
1454!$OMP PARALLEL DO DEFAULT(NONE), &
1455!$OMP PRIVATE(l, m, mn, n), &
1456!$OMP SHARED(c, pw, scale)
1457 DO gpt = 1, ngpts
1458 l = mapl(ghat(1, gpt)) + 1
1459 m = mapm(ghat(2, gpt)) + 1
1460 n = mapn(ghat(3, gpt)) + 1
1461 mn = yzq(m, n)
1462 c(l, mn) = scale*pw%array(gpt)
1463 END DO
1464!$OMP END PARALLEL DO
1465 ELSE
1466!$OMP PARALLEL DO DEFAULT(NONE), &
1467!$OMP PRIVATE(l, m, mn, n), &
1468!$OMP SHARED(c, pw)
1469 DO gpt = 1, ngpts
1470 l = mapl(ghat(1, gpt)) + 1
1471 m = mapm(ghat(2, gpt)) + 1
1472 n = mapn(ghat(3, gpt)) + 1
1473 mn = yzq(m, n)
1474 c(l, mn) = pw%array(gpt)
1475 END DO
1476!$OMP END PARALLEL DO
1477 END IF
1478
1479 END associate
1480
1481 IF (pw%pw_grid%grid_span == halfspace) THEN
1482
1483 associate(mapm => pw%pw_grid%mapm%neg, mapn => pw%pw_grid%mapn%neg, mapl => pw%pw_grid%mapl%neg, &
1484 ghat => pw%pw_grid%g_hat, ngpts => SIZE(pw%pw_grid%gsq), yzq => pw%pw_grid%para%yzq)
1485
1486 IF (PRESENT(scale)) THEN
1487!$OMP PARALLEL DO DEFAULT(NONE), &
1488!$OMP PRIVATE(l, m, mn, n), &
1489!$OMP SHARED(c, pw, scale)
1490 DO gpt = 1, ngpts
1491 l = mapl(ghat(1, gpt)) + 1
1492 m = mapm(ghat(2, gpt)) + 1
1493 n = mapn(ghat(3, gpt)) + 1
1494 mn = yzq(m, n)
1495 c(l, mn) = scale*conjg( pw%array(gpt))
1496 END DO
1497!$OMP END PARALLEL DO
1498 ELSE
1499!$OMP PARALLEL DO DEFAULT(NONE), &
1500!$OMP PRIVATE(l, m, mn, n) &
1501!$OMP SHARED(c, pw)
1502 DO gpt = 1, ngpts
1503 l = mapl(ghat(1, gpt)) + 1
1504 m = mapm(ghat(2, gpt)) + 1
1505 n = mapn(ghat(3, gpt)) + 1
1506 mn = yzq(m, n)
1507 c(l, mn) = conjg( pw%array(gpt))
1508 END DO
1509!$OMP END PARALLEL DO
1510 END IF
1511 END associate
1512 END IF
1513
1514 CALL timestop(handle)
1515
1516 END SUBROUTINE pw_scatter_p_c1d
1517! **************************************************************************************************
1518!> \brief Set values of a pw type to zero
1519!> \param pw ...
1520!> \par History
1521!> none
1522!> \author apsi
1523! **************************************************************************************************
1524 SUBROUTINE pw_zero_c3d_rs (pw)
1525
1526 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw
1527
1528 CHARACTER(len=*), PARAMETER :: routineN = 'pw_zero'
1529
1530 INTEGER :: handle
1531
1532 CALL timeset(routinen, handle)
1533
1534!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw)
1535 pw%array = cmplx(0.0_dp, 0.0_dp, kind=dp)
1536!$OMP END PARALLEL WORKSHARE
1537
1538 CALL timestop(handle)
1539
1540 END SUBROUTINE pw_zero_c3d_rs
1541
1542! **************************************************************************************************
1543!> \brief multiplies pw coeffs with a number
1544!> \param pw ...
1545!> \param a ...
1546!> \par History
1547!> 11.2004 created [Joost VandeVondele]
1548! **************************************************************************************************
1549 SUBROUTINE pw_scale_c3d_rs (pw, a)
1550
1551 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw
1552 REAL(KIND=dp), INTENT(IN) :: a
1553
1554 CHARACTER(len=*), PARAMETER :: routineN = 'pw_scale'
1555
1556 INTEGER :: handle
1557
1558 CALL timeset(routinen, handle)
1559
1560!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(A, pw)
1561 pw%array = a*pw%array
1562!$OMP END PARALLEL WORKSHARE
1563
1564 CALL timestop(handle)
1565
1566 END SUBROUTINE pw_scale_c3d_rs
1567
1568! **************************************************************************************************
1569!> \brief writes a small description of the actual grid
1570!> (change to output the data as cube file, maybe with an
1571!> optional long_description arg?)
1572!> \param pw the pw data to output
1573!> \param unit_nr the unit to output to
1574!> \par History
1575!> 08.2002 created [fawzi]
1576!> \author Fawzi Mohamed
1577! **************************************************************************************************
1578 SUBROUTINE pw_write_c3d_rs (pw, unit_nr)
1579
1580 TYPE(pw_c3d_rs_type), INTENT(in) :: pw
1581 INTEGER, INTENT(in) :: unit_nr
1582
1583 INTEGER :: iostatus
1584
1585 WRITE (unit=unit_nr, fmt="('<pw>:{ ')", iostat=iostatus)
1586
1587 WRITE (unit=unit_nr, fmt="(a)", iostat=iostatus) " in_use=c3d"
1588 IF (ASSOCIATED(pw%array)) THEN
1589 WRITE (unit=unit_nr, fmt="(' array=<c(',i8,':',i8,',',i8,':',i8,',',i8,':',i8,')>')") &
1590 lbound(pw%array, 1), ubound(pw%array, 1), lbound(pw%array, 2), ubound(pw%array, 2), &
1591 lbound(pw%array, 3), ubound(pw%array, 3)
1592 ELSE
1593 WRITE (unit=unit_nr, fmt="(' array=*null*')")
1594 END IF
1595
1596 END SUBROUTINE pw_write_c3d_rs
1597
1598! **************************************************************************************************
1599!> \brief ...
1600!> \param fun ...
1601!> \param isign ...
1602!> \param oprt ...
1603!> \return ...
1604! **************************************************************************************************
1605 FUNCTION pw_integrate_function_c3d_rs (fun, isign, oprt) RESULT(total_fun)
1606
1607 TYPE(pw_c3d_rs_type), INTENT(IN) :: fun
1608 INTEGER, INTENT(IN), OPTIONAL :: isign
1609 CHARACTER(len=*), INTENT(IN), OPTIONAL :: oprt
1610 REAL(kind=dp) :: total_fun
1611
1612 INTEGER :: iop
1613
1614 iop = 0
1615
1616 IF (PRESENT(oprt)) THEN
1617 SELECT CASE (oprt)
1618 CASE ("ABS", "abs")
1619 iop = 1
1620 CASE DEFAULT
1621 cpabort("Unknown operator")
1622 END SELECT
1623 END IF
1624
1625 total_fun = 0.0_dp
1626
1627 ! do reduction using maximum accuracy
1628 IF (iop == 1) THEN
1629 total_fun = fun%pw_grid%dvol*accurate_sum(abs(fun%array))
1630 ELSE
1631 total_fun = fun%pw_grid%dvol*accurate_sum( real(fun%array, kind=dp))
1632 END IF
1633
1634 IF (fun%pw_grid%para%mode /= pw_mode_local) THEN
1635 CALL fun%pw_grid%para%group%sum(total_fun)
1636 END IF
1637
1638 IF (PRESENT(isign)) THEN
1639 total_fun = total_fun*sign(1._dp, real(isign, dp))
1640 END IF
1641
1642 END FUNCTION pw_integrate_function_c3d_rs
1643
1644! **************************************************************************************************
1645!> \brief ...
1646!> \param pw ...
1647!> \param value ...
1648! **************************************************************************************************
1649 SUBROUTINE pw_set_value_c3d_rs (pw, value)
1650 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw
1651 REAL(KIND=dp), INTENT(IN) :: value
1652
1653 CHARACTER(len=*), PARAMETER :: routineN = 'pw_set_value'
1654
1655 INTEGER :: handle
1656
1657 CALL timeset(routinen, handle)
1658
1659!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw,value)
1660 pw%array = cmplx(value, 0.0_dp, kind=dp)
1661!$OMP END PARALLEL WORKSHARE
1662
1663 CALL timestop(handle)
1664
1665 END SUBROUTINE pw_set_value_c3d_rs
1666! **************************************************************************************************
1667!> \brief Set values of a pw type to zero
1668!> \param pw ...
1669!> \par History
1670!> none
1671!> \author apsi
1672! **************************************************************************************************
1673 SUBROUTINE pw_zero_c3d_gs (pw)
1674
1675 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw
1676
1677 CHARACTER(len=*), PARAMETER :: routineN = 'pw_zero'
1678
1679 INTEGER :: handle
1680
1681 CALL timeset(routinen, handle)
1682
1683!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw)
1684 pw%array = cmplx(0.0_dp, 0.0_dp, kind=dp)
1685!$OMP END PARALLEL WORKSHARE
1686
1687 CALL timestop(handle)
1688
1689 END SUBROUTINE pw_zero_c3d_gs
1690
1691! **************************************************************************************************
1692!> \brief multiplies pw coeffs with a number
1693!> \param pw ...
1694!> \param a ...
1695!> \par History
1696!> 11.2004 created [Joost VandeVondele]
1697! **************************************************************************************************
1698 SUBROUTINE pw_scale_c3d_gs (pw, a)
1699
1700 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw
1701 REAL(KIND=dp), INTENT(IN) :: a
1702
1703 CHARACTER(len=*), PARAMETER :: routineN = 'pw_scale'
1704
1705 INTEGER :: handle
1706
1707 CALL timeset(routinen, handle)
1708
1709!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(A, pw)
1710 pw%array = a*pw%array
1711!$OMP END PARALLEL WORKSHARE
1712
1713 CALL timestop(handle)
1714
1715 END SUBROUTINE pw_scale_c3d_gs
1716
1717! **************************************************************************************************
1718!> \brief writes a small description of the actual grid
1719!> (change to output the data as cube file, maybe with an
1720!> optional long_description arg?)
1721!> \param pw the pw data to output
1722!> \param unit_nr the unit to output to
1723!> \par History
1724!> 08.2002 created [fawzi]
1725!> \author Fawzi Mohamed
1726! **************************************************************************************************
1727 SUBROUTINE pw_write_c3d_gs (pw, unit_nr)
1728
1729 TYPE(pw_c3d_gs_type), INTENT(in) :: pw
1730 INTEGER, INTENT(in) :: unit_nr
1731
1732 INTEGER :: iostatus
1733
1734 WRITE (unit=unit_nr, fmt="('<pw>:{ ')", iostat=iostatus)
1735
1736 WRITE (unit=unit_nr, fmt="(a)", iostat=iostatus) " in_use=c3d"
1737 IF (ASSOCIATED(pw%array)) THEN
1738 WRITE (unit=unit_nr, fmt="(' array=<c(',i8,':',i8,',',i8,':',i8,',',i8,':',i8,')>')") &
1739 lbound(pw%array, 1), ubound(pw%array, 1), lbound(pw%array, 2), ubound(pw%array, 2), &
1740 lbound(pw%array, 3), ubound(pw%array, 3)
1741 ELSE
1742 WRITE (unit=unit_nr, fmt="(' array=*null*')")
1743 END IF
1744
1745 END SUBROUTINE pw_write_c3d_gs
1746
1747! **************************************************************************************************
1748!> \brief ...
1749!> \param fun ...
1750!> \param isign ...
1751!> \param oprt ...
1752!> \return ...
1753! **************************************************************************************************
1754 FUNCTION pw_integrate_function_c3d_gs (fun, isign, oprt) RESULT(total_fun)
1755
1756 TYPE(pw_c3d_gs_type), INTENT(IN) :: fun
1757 INTEGER, INTENT(IN), OPTIONAL :: isign
1758 CHARACTER(len=*), INTENT(IN), OPTIONAL :: oprt
1759 REAL(kind=dp) :: total_fun
1760
1761 INTEGER :: iop
1762
1763 iop = 0
1764
1765 IF (PRESENT(oprt)) THEN
1766 SELECT CASE (oprt)
1767 CASE ("ABS", "abs")
1768 iop = 1
1769 CASE DEFAULT
1770 cpabort("Unknown operator")
1771 END SELECT
1772 END IF
1773
1774 total_fun = 0.0_dp
1775
1776 IF (iop == 1) &
1777 cpabort("Operator ABS not implemented")
1778 cpabort("Reciprocal space integration for 3D grids not implemented!")
1779
1780 IF (fun%pw_grid%para%mode /= pw_mode_local) THEN
1781 CALL fun%pw_grid%para%group%sum(total_fun)
1782 END IF
1783
1784 IF (PRESENT(isign)) THEN
1785 total_fun = total_fun*sign(1._dp, real(isign, dp))
1786 END IF
1787
1788 END FUNCTION pw_integrate_function_c3d_gs
1789
1790! **************************************************************************************************
1791!> \brief ...
1792!> \param pw ...
1793!> \param value ...
1794! **************************************************************************************************
1795 SUBROUTINE pw_set_value_c3d_gs (pw, value)
1796 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw
1797 REAL(KIND=dp), INTENT(IN) :: value
1798
1799 CHARACTER(len=*), PARAMETER :: routineN = 'pw_set_value'
1800
1801 INTEGER :: handle
1802
1803 CALL timeset(routinen, handle)
1804
1805!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw,value)
1806 pw%array = cmplx(value, 0.0_dp, kind=dp)
1807!$OMP END PARALLEL WORKSHARE
1808
1809 CALL timestop(handle)
1810
1811 END SUBROUTINE pw_set_value_c3d_gs
1812
1813
1814! **************************************************************************************************
1815!> \brief copy a pw type variable
1816!> \param pw1 ...
1817!> \param pw2 ...
1818!> \par History
1819!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
1820!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
1821!> JGH (21-Feb-2003) : Code for generalized reference grids
1822!> \author apsi
1823!> \note
1824!> Currently only copying of respective types allowed,
1825!> in order to avoid errors
1826! **************************************************************************************************
1827 SUBROUTINE pw_copy_r1d_r1d_rs (pw1, pw2)
1828
1829 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw1
1830 TYPE(pw_r1d_rs_type), INTENT(INOUT) :: pw2
1831
1832 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
1833
1834 INTEGER :: handle
1835 INTEGER :: i, j, ng, ng1, ng2, ns
1836
1837 CALL timeset(routinen, handle)
1838
1839 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
1840 cpabort("Both grids must be either spherical or non-spherical!")
1841 IF (pw1%pw_grid%spherical) &
1842 cpabort("Spherical grids only exist in reciprocal space!")
1843
1844 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
1845 IF (pw1%pw_grid%spherical .AND. pw2%pw_grid%spherical) THEN
1846 IF (pw_compatible(pw1%pw_grid, pw2%pw_grid)) THEN
1847 ng1 = SIZE(pw1%array)
1848 ng2 = SIZE(pw2%array)
1849 ng = min(ng1, ng2)
1850!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, pw1, pw2)
1851 pw2%array(1:ng) = pw1%array(1:ng)
1852!$OMP END PARALLEL WORKSHARE
1853 IF (ng2 > ng) THEN
1854!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, ng2, pw2)
1855 pw2%array(ng + 1:ng2) = 0.0_dp
1856!$OMP END PARALLEL WORKSHARE
1857 END IF
1858 ELSE
1859 cpabort("Copies between spherical grids require compatible grids!")
1860 END IF
1861 ELSE
1862 ng1 = SIZE(pw1%array)
1863 ng2 = SIZE(pw2%array)
1864 ns = 2*max(ng1, ng2)
1865
1866 IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
1867 IF (ng1 >= ng2) THEN
1868!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng2, pw1, pw2)
1869 DO i = 1, ng2
1870 j = pw2%pw_grid%gidx(i)
1871 pw2%array(i) = pw1%array(j)
1872 END DO
1873!$OMP END PARALLEL DO
1874 ELSE
1875 CALL pw_zero(pw2)
1876!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng1, pw1, pw2)
1877 DO i = 1, ng1
1878 j = pw2%pw_grid%gidx(i)
1879 pw2%array(j) = pw1%array(i)
1880 END DO
1881!$OMP END PARALLEL DO
1882 END IF
1883 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
1884 IF (ng1 >= ng2) THEN
1885!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng2, pw1, pw2)
1886 DO i = 1, ng2
1887 j = pw1%pw_grid%gidx(i)
1888 pw2%array(i) = pw1%array(j)
1889 END DO
1890!$OMP END PARALLEL DO
1891 ELSE
1892 CALL pw_zero(pw2)
1893!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng1, pw1, pw2)
1894 DO i = 1, ng1
1895 j = pw1%pw_grid%gidx(i)
1896 pw2%array(j) = pw1%array(i)
1897 END DO
1898!$OMP END PARALLEL DO
1899 END IF
1900 ELSE
1901 cpabort("Copy not implemented!")
1902 END IF
1903
1904 END IF
1905
1906 ELSE
1907!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
1908 pw2%array = pw1%array
1909!$OMP END PARALLEL WORKSHARE
1910 END IF
1911
1912 CALL timestop(handle)
1913
1914 END SUBROUTINE pw_copy_r1d_r1d_rs
1915
1916! **************************************************************************************************
1917!> \brief ...
1918!> \param pw ...
1919!> \param array ...
1920! **************************************************************************************************
1921 SUBROUTINE pw_copy_to_array_r1d_r1d_rs (pw, array)
1922 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw
1923 REAL(KIND=dp), DIMENSION(:), INTENT(INOUT) :: array
1924
1925 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
1926
1927 INTEGER :: handle
1928
1929 CALL timeset(routinen, handle)
1930
1931!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
1932 array(:) = pw%array(:)
1933!$OMP END PARALLEL WORKSHARE
1934
1935 CALL timestop(handle)
1936 END SUBROUTINE pw_copy_to_array_r1d_r1d_rs
1937
1938! **************************************************************************************************
1939!> \brief ...
1940!> \param pw ...
1941!> \param array ...
1942! **************************************************************************************************
1943 SUBROUTINE pw_copy_from_array_r1d_r1d_rs (pw, array)
1944 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw
1945 REAL(KIND=dp), DIMENSION(:), INTENT(IN) :: array
1946
1947 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
1948
1949 INTEGER :: handle
1950
1951 CALL timeset(routinen, handle)
1952
1953!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
1954 pw%array = array
1955!$OMP END PARALLEL WORKSHARE
1956
1957 CALL timestop(handle)
1958 END SUBROUTINE pw_copy_from_array_r1d_r1d_rs
1959
1960! **************************************************************************************************
1961!> \brief pw2 = alpha*pw1 + beta*pw2
1962!> alpha defaults to 1, beta defaults to 1
1963!> \param pw1 ...
1964!> \param pw2 ...
1965!> \param alpha ...
1966!> \param beta ...
1967!> \param allow_noncompatible_grids ...
1968!> \par History
1969!> JGH (21-Feb-2003) : added reference grid functionality
1970!> JGH (01-Dec-2007) : rename and remove complex alpha
1971!> \author apsi
1972!> \note
1973!> Currently only summing up of respective types allowed,
1974!> in order to avoid errors
1975! **************************************************************************************************
1976 SUBROUTINE pw_axpy_r1d_r1d_rs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
1977
1978 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw1
1979 TYPE(pw_r1d_rs_type), INTENT(INOUT) :: pw2
1980 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
1981 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
1982
1983 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
1984
1985 INTEGER :: handle
1986 LOGICAL :: my_allow_noncompatible_grids
1987 REAL(KIND=dp) :: my_alpha, my_beta
1988 INTEGER :: i, j, ng, ng1, ng2
1989
1990 CALL timeset(routinen, handle)
1991
1992 my_alpha = 1.0_dp
1993 IF (PRESENT(alpha)) my_alpha = alpha
1994
1995 my_beta = 1.0_dp
1996 IF (PRESENT(beta)) my_beta = beta
1997
1998 my_allow_noncompatible_grids = .false.
1999 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
2000
2001 IF (my_beta /= 1.0_dp) THEN
2002 IF (my_beta == 0.0_dp) THEN
2003 CALL pw_zero(pw2)
2004 ELSE
2005!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
2006 pw2%array = pw2%array*my_beta
2007!$OMP END PARALLEL WORKSHARE
2008 END IF
2009 END IF
2010
2011 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
2012
2013 IF (my_alpha == 1.0_dp) THEN
2014!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
2015 pw2%array = pw2%array + pw1%array
2016!$OMP END PARALLEL WORKSHARE
2017 ELSE IF (my_alpha /= 0.0_dp) THEN
2018!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha)
2019 pw2%array = pw2%array + my_alpha* pw1%array
2020!$OMP END PARALLEL WORKSHARE
2021 END IF
2022
2023 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
2024
2025 ng1 = SIZE(pw1%array)
2026 ng2 = SIZE(pw2%array)
2027 ng = min(ng1, ng2)
2028
2029 IF (pw1%pw_grid%spherical) THEN
2030 IF (my_alpha == 1.0_dp) THEN
2031!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(ng, pw1, pw2)
2032 DO i = 1, ng
2033 pw2%array(i) = pw2%array(i) + pw1%array(i)
2034 END DO
2035!$OMP END PARALLEL DO
2036 ELSE IF (my_alpha /= 0.0_dp) THEN
2037!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha,ng)
2038 DO i = 1, ng
2039 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(i)
2040 END DO
2041!$OMP END PARALLEL DO
2042 END IF
2043 ELSE IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
2044 IF (ng1 >= ng2) THEN
2045 IF (my_alpha == 1.0_dp) THEN
2046!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
2047 DO i = 1, ng
2048 j = pw2%pw_grid%gidx(i)
2049 pw2%array(i) = pw2%array(i) + pw1%array(j)
2050 END DO
2051!$OMP END PARALLEL DO
2052 ELSE IF (my_alpha /= 0.0_dp) THEN
2053!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
2054 DO i = 1, ng
2055 j = pw2%pw_grid%gidx(i)
2056 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(j)
2057 END DO
2058!$OMP END PARALLEL DO
2059 END IF
2060 ELSE
2061 IF (my_alpha == 1.0_dp) THEN
2062!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
2063 DO i = 1, ng
2064 j = pw2%pw_grid%gidx(i)
2065 pw2%array(j) = pw2%array(j) + pw1%array(i)
2066 END DO
2067!$OMP END PARALLEL DO
2068 ELSE IF (my_alpha /= 0.0_dp) THEN
2069!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
2070 DO i = 1, ng
2071 j = pw2%pw_grid%gidx(i)
2072 pw2%array(j) = pw2%array(j) + my_alpha* pw1%array(i)
2073 END DO
2074!$OMP END PARALLEL DO
2075 END IF
2076 END IF
2077 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
2078 IF (ng1 >= ng2) THEN
2079 IF (my_alpha == 1.0_dp) THEN
2080!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
2081 DO i = 1, ng
2082 j = pw1%pw_grid%gidx(i)
2083 pw2%array(i) = pw2%array(i) + pw1%array(j)
2084 END DO
2085!$OMP END PARALLEL DO
2086 ELSE IF (my_alpha /= 0.0_dp) THEN
2087!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
2088 DO i = 1, ng
2089 j = pw1%pw_grid%gidx(i)
2090 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(j)
2091 END DO
2092!$OMP END PARALLEL DO
2093 END IF
2094 ELSE
2095 IF (my_alpha == 1.0_dp) THEN
2096!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
2097 DO i = 1, ng
2098 j = pw1%pw_grid%gidx(i)
2099 pw2%array(j) = pw2%array(j) + pw1%array(i)
2100 END DO
2101!$OMP END PARALLEL DO
2102 ELSE IF (my_alpha /= 0.0_dp) THEN
2103!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
2104 DO i = 1, ng
2105 j = pw1%pw_grid%gidx(i)
2106 pw2%array(j) = pw2%array(j) + my_alpha* pw1%array(i)
2107 END DO
2108!$OMP END PARALLEL DO
2109 END IF
2110 END IF
2111 ELSE
2112 cpabort("Grids not compatible")
2113 END IF
2114
2115 ELSE
2116
2117 cpabort("Grids not compatible")
2118
2119 END IF
2120
2121 CALL timestop(handle)
2122
2123 END SUBROUTINE pw_axpy_r1d_r1d_rs
2124
2125! **************************************************************************************************
2126!> \brief pw_out = pw_out + alpha * pw1 * pw2
2127!> alpha defaults to 1
2128!> \param pw_out ...
2129!> \param pw1 ...
2130!> \param pw2 ...
2131!> \param alpha ...
2132!> \author JGH
2133! **************************************************************************************************
2134 SUBROUTINE pw_multiply_r1d_r1d_rs (pw_out, pw1, pw2, alpha)
2135
2136 TYPE(pw_r1d_rs_type), INTENT(INOUT) :: pw_out
2137 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw1
2138 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw2
2139 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
2140
2141 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
2142
2143 INTEGER :: handle
2144 REAL(KIND=dp) :: my_alpha
2145
2146 CALL timeset(routinen, handle)
2147
2148 my_alpha = 1.0_dp
2149 IF (PRESENT(alpha)) my_alpha = alpha
2150
2151 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
2152 cpabort("pw_multiply not implemented for non-identical grids!")
2153
2154 IF (my_alpha == 1.0_dp) THEN
2155!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
2156 pw_out%array = pw_out%array + pw1%array* pw2%array
2157!$OMP END PARALLEL WORKSHARE
2158 ELSE
2159!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
2160 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
2161!$OMP END PARALLEL WORKSHARE
2162 END IF
2163
2164 CALL timestop(handle)
2165
2166 END SUBROUTINE pw_multiply_r1d_r1d_rs
2167
2168! **************************************************************************************************
2169!> \brief ...
2170!> \param pw1 ...
2171!> \param pw2 ...
2172! **************************************************************************************************
2173 SUBROUTINE pw_multiply_with_r1d_r1d_rs (pw1, pw2)
2174 TYPE(pw_r1d_rs_type), INTENT(INOUT) :: pw1
2175 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw2
2176
2177 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
2178
2179 INTEGER :: handle
2180
2181 CALL timeset(routinen, handle)
2182
2183 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
2184 cpabort("Incompatible grids!")
2185
2186!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
2187 pw1%array = pw1%array* pw2%array
2188!$OMP END PARALLEL WORKSHARE
2189
2190 CALL timestop(handle)
2191
2192 END SUBROUTINE pw_multiply_with_r1d_r1d_rs
2193
2194! **************************************************************************************************
2195!> \brief Calculate integral over unit cell for functions in plane wave basis
2196!> only returns the real part of it ......
2197!> \param pw1 ...
2198!> \param pw2 ...
2199!> \param sumtype ...
2200!> \param just_sum ...
2201!> \param local_only ...
2202!> \return ...
2203!> \par History
2204!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
2205!> \author apsi
2206! **************************************************************************************************
2207 FUNCTION pw_integral_ab_r1d_r1d_rs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
2208
2209 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw1
2210 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw2
2211 INTEGER, INTENT(IN), OPTIONAL :: sumtype
2212 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
2213 REAL(kind=dp) :: integral_value
2214
2215 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab'
2216
2217 INTEGER :: handle, loc_sumtype
2218 LOGICAL :: my_just_sum, my_local_only
2219
2220 CALL timeset(routinen, handle)
2221
2222 loc_sumtype = do_accurate_sum
2223 IF (PRESENT(sumtype)) loc_sumtype = sumtype
2224
2225 my_local_only = .false.
2226 IF (PRESENT(local_only)) my_local_only = local_only
2227
2228 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
2229 cpabort("Grids incompatible")
2230 END IF
2231
2232 my_just_sum = .false.
2233 IF (PRESENT(just_sum)) my_just_sum = just_sum
2234
2235 ! do standard sum
2236 IF (loc_sumtype == do_standard_sum) THEN
2237
2238 ! Do standard sum
2239
2240 integral_value = dot_product(pw1%array, pw2%array)
2241
2242 ELSE
2243
2244 ! Do accurate sum
2245 integral_value = accurate_dot_product(pw1%array, pw2%array)
2246
2247 END IF
2248
2249 IF (.NOT. my_just_sum) THEN
2250 integral_value = integral_value*pw1%pw_grid%dvol
2251 END IF
2252
2253 IF (pw1%pw_grid%grid_span == halfspace) THEN
2254 integral_value = 2.0_dp*integral_value
2255 IF (pw1%pw_grid%have_g0) integral_value = integral_value - &
2256 pw1%array(1)*pw2%array(1)
2257 END IF
2258
2259 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
2260 CALL pw1%pw_grid%para%group%sum(integral_value)
2261
2262 CALL timestop(handle)
2263
2264 END FUNCTION pw_integral_ab_r1d_r1d_rs
2265! **************************************************************************************************
2266!> \brief copy a pw type variable
2267!> \param pw1 ...
2268!> \param pw2 ...
2269!> \par History
2270!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
2271!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
2272!> JGH (21-Feb-2003) : Code for generalized reference grids
2273!> \author apsi
2274!> \note
2275!> Currently only copying of respective types allowed,
2276!> in order to avoid errors
2277! **************************************************************************************************
2278 SUBROUTINE pw_copy_r1d_r1d_gs (pw1, pw2)
2279
2280 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
2281 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw2
2282
2283 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
2284
2285 INTEGER :: handle
2286 INTEGER :: i, j, ng, ng1, ng2, ns
2287
2288 CALL timeset(routinen, handle)
2289
2290 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
2291 cpabort("Both grids must be either spherical or non-spherical!")
2292
2293 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
2294 IF (pw1%pw_grid%spherical .AND. pw2%pw_grid%spherical) THEN
2295 IF (pw_compatible(pw1%pw_grid, pw2%pw_grid)) THEN
2296 ng1 = SIZE(pw1%array)
2297 ng2 = SIZE(pw2%array)
2298 ng = min(ng1, ng2)
2299!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, pw1, pw2)
2300 pw2%array(1:ng) = pw1%array(1:ng)
2301!$OMP END PARALLEL WORKSHARE
2302 IF (ng2 > ng) THEN
2303!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, ng2, pw2)
2304 pw2%array(ng + 1:ng2) = 0.0_dp
2305!$OMP END PARALLEL WORKSHARE
2306 END IF
2307 ELSE
2308 cpabort("Copies between spherical grids require compatible grids!")
2309 END IF
2310 ELSE
2311 ng1 = SIZE(pw1%array)
2312 ng2 = SIZE(pw2%array)
2313 ns = 2*max(ng1, ng2)
2314
2315 IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
2316 IF (ng1 >= ng2) THEN
2317!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng2, pw1, pw2)
2318 DO i = 1, ng2
2319 j = pw2%pw_grid%gidx(i)
2320 pw2%array(i) = pw1%array(j)
2321 END DO
2322!$OMP END PARALLEL DO
2323 ELSE
2324 CALL pw_zero(pw2)
2325!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng1, pw1, pw2)
2326 DO i = 1, ng1
2327 j = pw2%pw_grid%gidx(i)
2328 pw2%array(j) = pw1%array(i)
2329 END DO
2330!$OMP END PARALLEL DO
2331 END IF
2332 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
2333 IF (ng1 >= ng2) THEN
2334!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng2, pw1, pw2)
2335 DO i = 1, ng2
2336 j = pw1%pw_grid%gidx(i)
2337 pw2%array(i) = pw1%array(j)
2338 END DO
2339!$OMP END PARALLEL DO
2340 ELSE
2341 CALL pw_zero(pw2)
2342!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng1, pw1, pw2)
2343 DO i = 1, ng1
2344 j = pw1%pw_grid%gidx(i)
2345 pw2%array(j) = pw1%array(i)
2346 END DO
2347!$OMP END PARALLEL DO
2348 END IF
2349 ELSE
2350 cpabort("Copy not implemented!")
2351 END IF
2352
2353 END IF
2354
2355 ELSE
2356!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
2357 pw2%array = pw1%array
2358!$OMP END PARALLEL WORKSHARE
2359 END IF
2360
2361 CALL timestop(handle)
2362
2363 END SUBROUTINE pw_copy_r1d_r1d_gs
2364
2365! **************************************************************************************************
2366!> \brief ...
2367!> \param pw ...
2368!> \param array ...
2369! **************************************************************************************************
2370 SUBROUTINE pw_copy_to_array_r1d_r1d_gs (pw, array)
2371 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw
2372 REAL(KIND=dp), DIMENSION(:), INTENT(INOUT) :: array
2373
2374 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
2375
2376 INTEGER :: handle
2377
2378 CALL timeset(routinen, handle)
2379
2380!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
2381 array(:) = pw%array(:)
2382!$OMP END PARALLEL WORKSHARE
2383
2384 CALL timestop(handle)
2385 END SUBROUTINE pw_copy_to_array_r1d_r1d_gs
2386
2387! **************************************************************************************************
2388!> \brief ...
2389!> \param pw ...
2390!> \param array ...
2391! **************************************************************************************************
2392 SUBROUTINE pw_copy_from_array_r1d_r1d_gs (pw, array)
2393 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw
2394 REAL(KIND=dp), DIMENSION(:), INTENT(IN) :: array
2395
2396 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
2397
2398 INTEGER :: handle
2399
2400 CALL timeset(routinen, handle)
2401
2402!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
2403 pw%array = array
2404!$OMP END PARALLEL WORKSHARE
2405
2406 CALL timestop(handle)
2407 END SUBROUTINE pw_copy_from_array_r1d_r1d_gs
2408
2409! **************************************************************************************************
2410!> \brief pw2 = alpha*pw1 + beta*pw2
2411!> alpha defaults to 1, beta defaults to 1
2412!> \param pw1 ...
2413!> \param pw2 ...
2414!> \param alpha ...
2415!> \param beta ...
2416!> \param allow_noncompatible_grids ...
2417!> \par History
2418!> JGH (21-Feb-2003) : added reference grid functionality
2419!> JGH (01-Dec-2007) : rename and remove complex alpha
2420!> \author apsi
2421!> \note
2422!> Currently only summing up of respective types allowed,
2423!> in order to avoid errors
2424! **************************************************************************************************
2425 SUBROUTINE pw_axpy_r1d_r1d_gs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
2426
2427 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
2428 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw2
2429 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
2430 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
2431
2432 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
2433
2434 INTEGER :: handle
2435 LOGICAL :: my_allow_noncompatible_grids
2436 REAL(KIND=dp) :: my_alpha, my_beta
2437 INTEGER :: i, j, ng, ng1, ng2
2438
2439 CALL timeset(routinen, handle)
2440
2441 my_alpha = 1.0_dp
2442 IF (PRESENT(alpha)) my_alpha = alpha
2443
2444 my_beta = 1.0_dp
2445 IF (PRESENT(beta)) my_beta = beta
2446
2447 my_allow_noncompatible_grids = .false.
2448 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
2449
2450 IF (my_beta /= 1.0_dp) THEN
2451 IF (my_beta == 0.0_dp) THEN
2452 CALL pw_zero(pw2)
2453 ELSE
2454!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
2455 pw2%array = pw2%array*my_beta
2456!$OMP END PARALLEL WORKSHARE
2457 END IF
2458 END IF
2459
2460 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
2461
2462 IF (my_alpha == 1.0_dp) THEN
2463!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
2464 pw2%array = pw2%array + pw1%array
2465!$OMP END PARALLEL WORKSHARE
2466 ELSE IF (my_alpha /= 0.0_dp) THEN
2467!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha)
2468 pw2%array = pw2%array + my_alpha* pw1%array
2469!$OMP END PARALLEL WORKSHARE
2470 END IF
2471
2472 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
2473
2474 ng1 = SIZE(pw1%array)
2475 ng2 = SIZE(pw2%array)
2476 ng = min(ng1, ng2)
2477
2478 IF (pw1%pw_grid%spherical) THEN
2479 IF (my_alpha == 1.0_dp) THEN
2480!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(ng, pw1, pw2)
2481 DO i = 1, ng
2482 pw2%array(i) = pw2%array(i) + pw1%array(i)
2483 END DO
2484!$OMP END PARALLEL DO
2485 ELSE IF (my_alpha /= 0.0_dp) THEN
2486!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha,ng)
2487 DO i = 1, ng
2488 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(i)
2489 END DO
2490!$OMP END PARALLEL DO
2491 END IF
2492 ELSE IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
2493 IF (ng1 >= ng2) THEN
2494 IF (my_alpha == 1.0_dp) THEN
2495!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
2496 DO i = 1, ng
2497 j = pw2%pw_grid%gidx(i)
2498 pw2%array(i) = pw2%array(i) + pw1%array(j)
2499 END DO
2500!$OMP END PARALLEL DO
2501 ELSE IF (my_alpha /= 0.0_dp) THEN
2502!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
2503 DO i = 1, ng
2504 j = pw2%pw_grid%gidx(i)
2505 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(j)
2506 END DO
2507!$OMP END PARALLEL DO
2508 END IF
2509 ELSE
2510 IF (my_alpha == 1.0_dp) THEN
2511!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
2512 DO i = 1, ng
2513 j = pw2%pw_grid%gidx(i)
2514 pw2%array(j) = pw2%array(j) + pw1%array(i)
2515 END DO
2516!$OMP END PARALLEL DO
2517 ELSE IF (my_alpha /= 0.0_dp) THEN
2518!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
2519 DO i = 1, ng
2520 j = pw2%pw_grid%gidx(i)
2521 pw2%array(j) = pw2%array(j) + my_alpha* pw1%array(i)
2522 END DO
2523!$OMP END PARALLEL DO
2524 END IF
2525 END IF
2526 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
2527 IF (ng1 >= ng2) THEN
2528 IF (my_alpha == 1.0_dp) THEN
2529!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
2530 DO i = 1, ng
2531 j = pw1%pw_grid%gidx(i)
2532 pw2%array(i) = pw2%array(i) + pw1%array(j)
2533 END DO
2534!$OMP END PARALLEL DO
2535 ELSE IF (my_alpha /= 0.0_dp) THEN
2536!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
2537 DO i = 1, ng
2538 j = pw1%pw_grid%gidx(i)
2539 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(j)
2540 END DO
2541!$OMP END PARALLEL DO
2542 END IF
2543 ELSE
2544 IF (my_alpha == 1.0_dp) THEN
2545!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
2546 DO i = 1, ng
2547 j = pw1%pw_grid%gidx(i)
2548 pw2%array(j) = pw2%array(j) + pw1%array(i)
2549 END DO
2550!$OMP END PARALLEL DO
2551 ELSE IF (my_alpha /= 0.0_dp) THEN
2552!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
2553 DO i = 1, ng
2554 j = pw1%pw_grid%gidx(i)
2555 pw2%array(j) = pw2%array(j) + my_alpha* pw1%array(i)
2556 END DO
2557!$OMP END PARALLEL DO
2558 END IF
2559 END IF
2560 ELSE
2561 cpabort("Grids not compatible")
2562 END IF
2563
2564 ELSE
2565
2566 cpabort("Grids not compatible")
2567
2568 END IF
2569
2570 CALL timestop(handle)
2571
2572 END SUBROUTINE pw_axpy_r1d_r1d_gs
2573
2574! **************************************************************************************************
2575!> \brief pw_out = pw_out + alpha * pw1 * pw2
2576!> alpha defaults to 1
2577!> \param pw_out ...
2578!> \param pw1 ...
2579!> \param pw2 ...
2580!> \param alpha ...
2581!> \author JGH
2582! **************************************************************************************************
2583 SUBROUTINE pw_multiply_r1d_r1d_gs (pw_out, pw1, pw2, alpha)
2584
2585 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw_out
2586 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
2587 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw2
2588 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
2589
2590 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
2591
2592 INTEGER :: handle
2593 REAL(KIND=dp) :: my_alpha
2594
2595 CALL timeset(routinen, handle)
2596
2597 my_alpha = 1.0_dp
2598 IF (PRESENT(alpha)) my_alpha = alpha
2599
2600 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
2601 cpabort("pw_multiply not implemented for non-identical grids!")
2602
2603 IF (my_alpha == 1.0_dp) THEN
2604!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
2605 pw_out%array = pw_out%array + pw1%array* pw2%array
2606!$OMP END PARALLEL WORKSHARE
2607 ELSE
2608!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
2609 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
2610!$OMP END PARALLEL WORKSHARE
2611 END IF
2612
2613 CALL timestop(handle)
2614
2615 END SUBROUTINE pw_multiply_r1d_r1d_gs
2616
2617! **************************************************************************************************
2618!> \brief ...
2619!> \param pw1 ...
2620!> \param pw2 ...
2621! **************************************************************************************************
2622 SUBROUTINE pw_multiply_with_r1d_r1d_gs (pw1, pw2)
2623 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw1
2624 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw2
2625
2626 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
2627
2628 INTEGER :: handle
2629
2630 CALL timeset(routinen, handle)
2631
2632 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
2633 cpabort("Incompatible grids!")
2634
2635!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
2636 pw1%array = pw1%array* pw2%array
2637!$OMP END PARALLEL WORKSHARE
2638
2639 CALL timestop(handle)
2640
2641 END SUBROUTINE pw_multiply_with_r1d_r1d_gs
2642
2643! **************************************************************************************************
2644!> \brief Calculate integral over unit cell for functions in plane wave basis
2645!> only returns the real part of it ......
2646!> \param pw1 ...
2647!> \param pw2 ...
2648!> \param sumtype ...
2649!> \param just_sum ...
2650!> \param local_only ...
2651!> \return ...
2652!> \par History
2653!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
2654!> \author apsi
2655! **************************************************************************************************
2656 FUNCTION pw_integral_ab_r1d_r1d_gs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
2657
2658 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
2659 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw2
2660 INTEGER, INTENT(IN), OPTIONAL :: sumtype
2661 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
2662 REAL(kind=dp) :: integral_value
2663
2664 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab'
2665
2666 INTEGER :: handle, loc_sumtype
2667 LOGICAL :: my_just_sum, my_local_only
2668
2669 CALL timeset(routinen, handle)
2670
2671 loc_sumtype = do_accurate_sum
2672 IF (PRESENT(sumtype)) loc_sumtype = sumtype
2673
2674 my_local_only = .false.
2675 IF (PRESENT(local_only)) my_local_only = local_only
2676
2677 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
2678 cpabort("Grids incompatible")
2679 END IF
2680
2681 my_just_sum = .false.
2682 IF (PRESENT(just_sum)) my_just_sum = just_sum
2683
2684 ! do standard sum
2685 IF (loc_sumtype == do_standard_sum) THEN
2686
2687 ! Do standard sum
2688
2689 integral_value = dot_product(pw1%array, pw2%array)
2690
2691 ELSE
2692
2693 ! Do accurate sum
2694 integral_value = accurate_dot_product(pw1%array, pw2%array)
2695
2696 END IF
2697
2698 IF (.NOT. my_just_sum) THEN
2699 integral_value = integral_value*pw1%pw_grid%vol
2700 END IF
2701
2702 IF (pw1%pw_grid%grid_span == halfspace) THEN
2703 integral_value = 2.0_dp*integral_value
2704 IF (pw1%pw_grid%have_g0) integral_value = integral_value - &
2705 pw1%array(1)*pw2%array(1)
2706 END IF
2707
2708 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
2709 CALL pw1%pw_grid%para%group%sum(integral_value)
2710
2711 CALL timestop(handle)
2712
2713 END FUNCTION pw_integral_ab_r1d_r1d_gs
2714
2715! **************************************************************************************************
2716!> \brief ...
2717!> \param pw1 ...
2718!> \param pw2 ...
2719!> \return ...
2720! **************************************************************************************************
2721 FUNCTION pw_integral_a2b_r1d_r1d (pw1, pw2) RESULT(integral_value)
2722
2723 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
2724 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw2
2725 REAL(kind=dp) :: integral_value
2726
2727 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_a2b'
2728
2729 INTEGER :: handle
2730
2731 CALL timeset(routinen, handle)
2732
2733 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
2734 cpabort("Grids incompatible")
2735 END IF
2736
2737 integral_value = accurate_sum(pw1%array*pw2%array*pw1%pw_grid%gsq)
2738 IF (pw1%pw_grid%grid_span == halfspace) THEN
2739 integral_value = 2.0_dp*integral_value
2740 END IF
2741
2742 integral_value = integral_value*pw1%pw_grid%vol
2743
2744 IF (pw1%pw_grid%para%mode == pw_mode_distributed) &
2745 CALL pw1%pw_grid%para%group%sum(integral_value)
2746 CALL timestop(handle)
2747
2748 END FUNCTION pw_integral_a2b_r1d_r1d
2749! **************************************************************************************************
2750!> \brief copy a pw type variable
2751!> \param pw1 ...
2752!> \param pw2 ...
2753!> \par History
2754!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
2755!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
2756!> JGH (21-Feb-2003) : Code for generalized reference grids
2757!> \author apsi
2758!> \note
2759!> Currently only copying of respective types allowed,
2760!> in order to avoid errors
2761! **************************************************************************************************
2762 SUBROUTINE pw_copy_r1d_c1d_rs (pw1, pw2)
2763
2764 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw1
2765 TYPE(pw_c1d_rs_type), INTENT(INOUT) :: pw2
2766
2767 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
2768
2769 INTEGER :: handle
2770 INTEGER :: i, j, ng, ng1, ng2, ns
2771
2772 CALL timeset(routinen, handle)
2773
2774 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
2775 cpabort("Both grids must be either spherical or non-spherical!")
2776 IF (pw1%pw_grid%spherical) &
2777 cpabort("Spherical grids only exist in reciprocal space!")
2778
2779 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
2780 IF (pw1%pw_grid%spherical .AND. pw2%pw_grid%spherical) THEN
2781 IF (pw_compatible(pw1%pw_grid, pw2%pw_grid)) THEN
2782 ng1 = SIZE(pw1%array)
2783 ng2 = SIZE(pw2%array)
2784 ng = min(ng1, ng2)
2785!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, pw1, pw2)
2786 pw2%array(1:ng) = cmplx(pw1%array(1:ng), 0.0_dp, kind=dp)
2787!$OMP END PARALLEL WORKSHARE
2788 IF (ng2 > ng) THEN
2789!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, ng2, pw2)
2790 pw2%array(ng + 1:ng2) = cmplx(0.0_dp, 0.0_dp, kind=dp)
2791!$OMP END PARALLEL WORKSHARE
2792 END IF
2793 ELSE
2794 cpabort("Copies between spherical grids require compatible grids!")
2795 END IF
2796 ELSE
2797 ng1 = SIZE(pw1%array)
2798 ng2 = SIZE(pw2%array)
2799 ns = 2*max(ng1, ng2)
2800
2801 IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
2802 IF (ng1 >= ng2) THEN
2803!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng2, pw1, pw2)
2804 DO i = 1, ng2
2805 j = pw2%pw_grid%gidx(i)
2806 pw2%array(i) = cmplx(pw1%array(j), 0.0_dp, kind=dp)
2807 END DO
2808!$OMP END PARALLEL DO
2809 ELSE
2810 CALL pw_zero(pw2)
2811!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng1, pw1, pw2)
2812 DO i = 1, ng1
2813 j = pw2%pw_grid%gidx(i)
2814 pw2%array(j) = cmplx(pw1%array(i), 0.0_dp, kind=dp)
2815 END DO
2816!$OMP END PARALLEL DO
2817 END IF
2818 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
2819 IF (ng1 >= ng2) THEN
2820!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng2, pw1, pw2)
2821 DO i = 1, ng2
2822 j = pw1%pw_grid%gidx(i)
2823 pw2%array(i) = cmplx(pw1%array(j), 0.0_dp, kind=dp)
2824 END DO
2825!$OMP END PARALLEL DO
2826 ELSE
2827 CALL pw_zero(pw2)
2828!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng1, pw1, pw2)
2829 DO i = 1, ng1
2830 j = pw1%pw_grid%gidx(i)
2831 pw2%array(j) = cmplx(pw1%array(i), 0.0_dp, kind=dp)
2832 END DO
2833!$OMP END PARALLEL DO
2834 END IF
2835 ELSE
2836 cpabort("Copy not implemented!")
2837 END IF
2838
2839 END IF
2840
2841 ELSE
2842!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
2843 pw2%array = cmplx(pw1%array, 0.0_dp, kind=dp)
2844!$OMP END PARALLEL WORKSHARE
2845 END IF
2846
2847 CALL timestop(handle)
2848
2849 END SUBROUTINE pw_copy_r1d_c1d_rs
2850
2851! **************************************************************************************************
2852!> \brief ...
2853!> \param pw ...
2854!> \param array ...
2855! **************************************************************************************************
2856 SUBROUTINE pw_copy_to_array_r1d_c1d_rs (pw, array)
2857 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw
2858 COMPLEX(KIND=dp), DIMENSION(:), INTENT(INOUT) :: array
2859
2860 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
2861
2862 INTEGER :: handle
2863
2864 CALL timeset(routinen, handle)
2865
2866!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
2867 array(:) = cmplx(pw%array(:), 0.0_dp, kind=dp)
2868!$OMP END PARALLEL WORKSHARE
2869
2870 CALL timestop(handle)
2871 END SUBROUTINE pw_copy_to_array_r1d_c1d_rs
2872
2873! **************************************************************************************************
2874!> \brief ...
2875!> \param pw ...
2876!> \param array ...
2877! **************************************************************************************************
2878 SUBROUTINE pw_copy_from_array_r1d_c1d_rs (pw, array)
2879 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw
2880 COMPLEX(KIND=dp), DIMENSION(:), INTENT(IN) :: array
2881
2882 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
2883
2884 INTEGER :: handle
2885
2886 CALL timeset(routinen, handle)
2887
2888!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
2889 pw%array = real(array, kind=dp)
2890!$OMP END PARALLEL WORKSHARE
2891
2892 CALL timestop(handle)
2893 END SUBROUTINE pw_copy_from_array_r1d_c1d_rs
2894
2895! **************************************************************************************************
2896!> \brief pw2 = alpha*pw1 + beta*pw2
2897!> alpha defaults to 1, beta defaults to 1
2898!> \param pw1 ...
2899!> \param pw2 ...
2900!> \param alpha ...
2901!> \param beta ...
2902!> \param allow_noncompatible_grids ...
2903!> \par History
2904!> JGH (21-Feb-2003) : added reference grid functionality
2905!> JGH (01-Dec-2007) : rename and remove complex alpha
2906!> \author apsi
2907!> \note
2908!> Currently only summing up of respective types allowed,
2909!> in order to avoid errors
2910! **************************************************************************************************
2911 SUBROUTINE pw_axpy_r1d_c1d_rs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
2912
2913 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw1
2914 TYPE(pw_c1d_rs_type), INTENT(INOUT) :: pw2
2915 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
2916 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
2917
2918 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
2919
2920 INTEGER :: handle
2921 LOGICAL :: my_allow_noncompatible_grids
2922 REAL(KIND=dp) :: my_alpha, my_beta
2923 INTEGER :: i, j, ng, ng1, ng2
2924
2925 CALL timeset(routinen, handle)
2926
2927 my_alpha = 1.0_dp
2928 IF (PRESENT(alpha)) my_alpha = alpha
2929
2930 my_beta = 1.0_dp
2931 IF (PRESENT(beta)) my_beta = beta
2932
2933 my_allow_noncompatible_grids = .false.
2934 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
2935
2936 IF (my_beta /= 1.0_dp) THEN
2937 IF (my_beta == 0.0_dp) THEN
2938 CALL pw_zero(pw2)
2939 ELSE
2940!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
2941 pw2%array = pw2%array*my_beta
2942!$OMP END PARALLEL WORKSHARE
2943 END IF
2944 END IF
2945
2946 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
2947
2948 IF (my_alpha == 1.0_dp) THEN
2949!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
2950 pw2%array = pw2%array + cmplx(pw1%array, 0.0_dp, kind=dp)
2951!$OMP END PARALLEL WORKSHARE
2952 ELSE IF (my_alpha /= 0.0_dp) THEN
2953!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha)
2954 pw2%array = pw2%array + my_alpha* cmplx(pw1%array, 0.0_dp, kind=dp)
2955!$OMP END PARALLEL WORKSHARE
2956 END IF
2957
2958 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
2959
2960 ng1 = SIZE(pw1%array)
2961 ng2 = SIZE(pw2%array)
2962 ng = min(ng1, ng2)
2963
2964 IF (pw1%pw_grid%spherical) THEN
2965 IF (my_alpha == 1.0_dp) THEN
2966!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(ng, pw1, pw2)
2967 DO i = 1, ng
2968 pw2%array(i) = pw2%array(i) + cmplx(pw1%array(i), 0.0_dp, kind=dp)
2969 END DO
2970!$OMP END PARALLEL DO
2971 ELSE IF (my_alpha /= 0.0_dp) THEN
2972!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha,ng)
2973 DO i = 1, ng
2974 pw2%array(i) = pw2%array(i) + my_alpha* cmplx(pw1%array(i), 0.0_dp, kind=dp)
2975 END DO
2976!$OMP END PARALLEL DO
2977 END IF
2978 ELSE IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
2979 IF (ng1 >= ng2) THEN
2980 IF (my_alpha == 1.0_dp) THEN
2981!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
2982 DO i = 1, ng
2983 j = pw2%pw_grid%gidx(i)
2984 pw2%array(i) = pw2%array(i) + cmplx(pw1%array(j), 0.0_dp, kind=dp)
2985 END DO
2986!$OMP END PARALLEL DO
2987 ELSE IF (my_alpha /= 0.0_dp) THEN
2988!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
2989 DO i = 1, ng
2990 j = pw2%pw_grid%gidx(i)
2991 pw2%array(i) = pw2%array(i) + my_alpha* cmplx(pw1%array(j), 0.0_dp, kind=dp)
2992 END DO
2993!$OMP END PARALLEL DO
2994 END IF
2995 ELSE
2996 IF (my_alpha == 1.0_dp) THEN
2997!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
2998 DO i = 1, ng
2999 j = pw2%pw_grid%gidx(i)
3000 pw2%array(j) = pw2%array(j) + cmplx(pw1%array(i), 0.0_dp, kind=dp)
3001 END DO
3002!$OMP END PARALLEL DO
3003 ELSE IF (my_alpha /= 0.0_dp) THEN
3004!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
3005 DO i = 1, ng
3006 j = pw2%pw_grid%gidx(i)
3007 pw2%array(j) = pw2%array(j) + my_alpha* cmplx(pw1%array(i), 0.0_dp, kind=dp)
3008 END DO
3009!$OMP END PARALLEL DO
3010 END IF
3011 END IF
3012 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
3013 IF (ng1 >= ng2) THEN
3014 IF (my_alpha == 1.0_dp) THEN
3015!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
3016 DO i = 1, ng
3017 j = pw1%pw_grid%gidx(i)
3018 pw2%array(i) = pw2%array(i) + cmplx(pw1%array(j), 0.0_dp, kind=dp)
3019 END DO
3020!$OMP END PARALLEL DO
3021 ELSE IF (my_alpha /= 0.0_dp) THEN
3022!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
3023 DO i = 1, ng
3024 j = pw1%pw_grid%gidx(i)
3025 pw2%array(i) = pw2%array(i) + my_alpha* cmplx(pw1%array(j), 0.0_dp, kind=dp)
3026 END DO
3027!$OMP END PARALLEL DO
3028 END IF
3029 ELSE
3030 IF (my_alpha == 1.0_dp) THEN
3031!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
3032 DO i = 1, ng
3033 j = pw1%pw_grid%gidx(i)
3034 pw2%array(j) = pw2%array(j) + cmplx(pw1%array(i), 0.0_dp, kind=dp)
3035 END DO
3036!$OMP END PARALLEL DO
3037 ELSE IF (my_alpha /= 0.0_dp) THEN
3038!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
3039 DO i = 1, ng
3040 j = pw1%pw_grid%gidx(i)
3041 pw2%array(j) = pw2%array(j) + my_alpha* cmplx(pw1%array(i), 0.0_dp, kind=dp)
3042 END DO
3043!$OMP END PARALLEL DO
3044 END IF
3045 END IF
3046 ELSE
3047 cpabort("Grids not compatible")
3048 END IF
3049
3050 ELSE
3051
3052 cpabort("Grids not compatible")
3053
3054 END IF
3055
3056 CALL timestop(handle)
3057
3058 END SUBROUTINE pw_axpy_r1d_c1d_rs
3059
3060! **************************************************************************************************
3061!> \brief pw_out = pw_out + alpha * pw1 * pw2
3062!> alpha defaults to 1
3063!> \param pw_out ...
3064!> \param pw1 ...
3065!> \param pw2 ...
3066!> \param alpha ...
3067!> \author JGH
3068! **************************************************************************************************
3069 SUBROUTINE pw_multiply_r1d_c1d_rs (pw_out, pw1, pw2, alpha)
3070
3071 TYPE(pw_r1d_rs_type), INTENT(INOUT) :: pw_out
3072 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw1
3073 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw2
3074 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
3075
3076 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
3077
3078 INTEGER :: handle
3079 REAL(KIND=dp) :: my_alpha
3080
3081 CALL timeset(routinen, handle)
3082
3083 my_alpha = 1.0_dp
3084 IF (PRESENT(alpha)) my_alpha = alpha
3085
3086 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
3087 cpabort("pw_multiply not implemented for non-identical grids!")
3088
3089 IF (my_alpha == 1.0_dp) THEN
3090!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
3091 pw_out%array = pw_out%array + pw1%array* real(pw2%array, kind=dp)
3092!$OMP END PARALLEL WORKSHARE
3093 ELSE
3094!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
3095 pw_out%array = pw_out%array + my_alpha*pw1%array* real(pw2%array, kind=dp)
3096!$OMP END PARALLEL WORKSHARE
3097 END IF
3098
3099 CALL timestop(handle)
3100
3101 END SUBROUTINE pw_multiply_r1d_c1d_rs
3102
3103! **************************************************************************************************
3104!> \brief ...
3105!> \param pw1 ...
3106!> \param pw2 ...
3107! **************************************************************************************************
3108 SUBROUTINE pw_multiply_with_r1d_c1d_rs (pw1, pw2)
3109 TYPE(pw_r1d_rs_type), INTENT(INOUT) :: pw1
3110 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw2
3111
3112 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
3113
3114 INTEGER :: handle
3115
3116 CALL timeset(routinen, handle)
3117
3118 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
3119 cpabort("Incompatible grids!")
3120
3121!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
3122 pw1%array = pw1%array* real(pw2%array, kind=dp)
3123!$OMP END PARALLEL WORKSHARE
3124
3125 CALL timestop(handle)
3126
3127 END SUBROUTINE pw_multiply_with_r1d_c1d_rs
3128
3129! **************************************************************************************************
3130!> \brief Calculate integral over unit cell for functions in plane wave basis
3131!> only returns the real part of it ......
3132!> \param pw1 ...
3133!> \param pw2 ...
3134!> \param sumtype ...
3135!> \param just_sum ...
3136!> \param local_only ...
3137!> \return ...
3138!> \par History
3139!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
3140!> \author apsi
3141! **************************************************************************************************
3142 FUNCTION pw_integral_ab_r1d_c1d_rs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
3143
3144 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw1
3145 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw2
3146 INTEGER, INTENT(IN), OPTIONAL :: sumtype
3147 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
3148 REAL(kind=dp) :: integral_value
3149
3150 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab'
3151
3152 INTEGER :: handle, loc_sumtype
3153 LOGICAL :: my_just_sum, my_local_only
3154
3155 CALL timeset(routinen, handle)
3156
3157 loc_sumtype = do_accurate_sum
3158 IF (PRESENT(sumtype)) loc_sumtype = sumtype
3159
3160 my_local_only = .false.
3161 IF (PRESENT(local_only)) my_local_only = local_only
3162
3163 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
3164 cpabort("Grids incompatible")
3165 END IF
3166
3167 my_just_sum = .false.
3168 IF (PRESENT(just_sum)) my_just_sum = just_sum
3169
3170 ! do standard sum
3171 IF (loc_sumtype == do_standard_sum) THEN
3172
3173 ! Do standard sum
3174
3175 integral_value = sum(pw1%array*real(pw2%array, kind=dp)) !? complex bit
3176
3177 ELSE
3178
3179 ! Do accurate sum
3180 integral_value = accurate_sum(pw1%array*real(pw2%array, kind=dp)) !? complex bit
3181
3182 END IF
3183
3184 IF (.NOT. my_just_sum) THEN
3185 integral_value = integral_value*pw1%pw_grid%dvol
3186 END IF
3187
3188 IF (pw1%pw_grid%grid_span == halfspace) THEN
3189 integral_value = 2.0_dp*integral_value
3190 IF (pw1%pw_grid%have_g0) integral_value = integral_value - &
3191 pw1%array(1)*real(pw2%array(1), kind=dp)
3192 END IF
3193
3194 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
3195 CALL pw1%pw_grid%para%group%sum(integral_value)
3196
3197 CALL timestop(handle)
3198
3199 END FUNCTION pw_integral_ab_r1d_c1d_rs
3200! **************************************************************************************************
3201!> \brief copy a pw type variable
3202!> \param pw1 ...
3203!> \param pw2 ...
3204!> \par History
3205!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
3206!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
3207!> JGH (21-Feb-2003) : Code for generalized reference grids
3208!> \author apsi
3209!> \note
3210!> Currently only copying of respective types allowed,
3211!> in order to avoid errors
3212! **************************************************************************************************
3213 SUBROUTINE pw_copy_r1d_c1d_gs (pw1, pw2)
3214
3215 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
3216 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw2
3217
3218 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
3219
3220 INTEGER :: handle
3221 INTEGER :: i, j, ng, ng1, ng2, ns
3222
3223 CALL timeset(routinen, handle)
3224
3225 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
3226 cpabort("Both grids must be either spherical or non-spherical!")
3227
3228 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
3229 IF (pw1%pw_grid%spherical .AND. pw2%pw_grid%spherical) THEN
3230 IF (pw_compatible(pw1%pw_grid, pw2%pw_grid)) THEN
3231 ng1 = SIZE(pw1%array)
3232 ng2 = SIZE(pw2%array)
3233 ng = min(ng1, ng2)
3234!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, pw1, pw2)
3235 pw2%array(1:ng) = cmplx(pw1%array(1:ng), 0.0_dp, kind=dp)
3236!$OMP END PARALLEL WORKSHARE
3237 IF (ng2 > ng) THEN
3238!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, ng2, pw2)
3239 pw2%array(ng + 1:ng2) = cmplx(0.0_dp, 0.0_dp, kind=dp)
3240!$OMP END PARALLEL WORKSHARE
3241 END IF
3242 ELSE
3243 cpabort("Copies between spherical grids require compatible grids!")
3244 END IF
3245 ELSE
3246 ng1 = SIZE(pw1%array)
3247 ng2 = SIZE(pw2%array)
3248 ns = 2*max(ng1, ng2)
3249
3250 IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
3251 IF (ng1 >= ng2) THEN
3252!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng2, pw1, pw2)
3253 DO i = 1, ng2
3254 j = pw2%pw_grid%gidx(i)
3255 pw2%array(i) = cmplx(pw1%array(j), 0.0_dp, kind=dp)
3256 END DO
3257!$OMP END PARALLEL DO
3258 ELSE
3259 CALL pw_zero(pw2)
3260!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng1, pw1, pw2)
3261 DO i = 1, ng1
3262 j = pw2%pw_grid%gidx(i)
3263 pw2%array(j) = cmplx(pw1%array(i), 0.0_dp, kind=dp)
3264 END DO
3265!$OMP END PARALLEL DO
3266 END IF
3267 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
3268 IF (ng1 >= ng2) THEN
3269!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng2, pw1, pw2)
3270 DO i = 1, ng2
3271 j = pw1%pw_grid%gidx(i)
3272 pw2%array(i) = cmplx(pw1%array(j), 0.0_dp, kind=dp)
3273 END DO
3274!$OMP END PARALLEL DO
3275 ELSE
3276 CALL pw_zero(pw2)
3277!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng1, pw1, pw2)
3278 DO i = 1, ng1
3279 j = pw1%pw_grid%gidx(i)
3280 pw2%array(j) = cmplx(pw1%array(i), 0.0_dp, kind=dp)
3281 END DO
3282!$OMP END PARALLEL DO
3283 END IF
3284 ELSE
3285 cpabort("Copy not implemented!")
3286 END IF
3287
3288 END IF
3289
3290 ELSE
3291!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
3292 pw2%array = cmplx(pw1%array, 0.0_dp, kind=dp)
3293!$OMP END PARALLEL WORKSHARE
3294 END IF
3295
3296 CALL timestop(handle)
3297
3298 END SUBROUTINE pw_copy_r1d_c1d_gs
3299
3300! **************************************************************************************************
3301!> \brief ...
3302!> \param pw ...
3303!> \param array ...
3304! **************************************************************************************************
3305 SUBROUTINE pw_copy_to_array_r1d_c1d_gs (pw, array)
3306 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw
3307 COMPLEX(KIND=dp), DIMENSION(:), INTENT(INOUT) :: array
3308
3309 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
3310
3311 INTEGER :: handle
3312
3313 CALL timeset(routinen, handle)
3314
3315!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
3316 array(:) = cmplx(pw%array(:), 0.0_dp, kind=dp)
3317!$OMP END PARALLEL WORKSHARE
3318
3319 CALL timestop(handle)
3320 END SUBROUTINE pw_copy_to_array_r1d_c1d_gs
3321
3322! **************************************************************************************************
3323!> \brief ...
3324!> \param pw ...
3325!> \param array ...
3326! **************************************************************************************************
3327 SUBROUTINE pw_copy_from_array_r1d_c1d_gs (pw, array)
3328 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw
3329 COMPLEX(KIND=dp), DIMENSION(:), INTENT(IN) :: array
3330
3331 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
3332
3333 INTEGER :: handle
3334
3335 CALL timeset(routinen, handle)
3336
3337!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
3338 pw%array = real(array, kind=dp)
3339!$OMP END PARALLEL WORKSHARE
3340
3341 CALL timestop(handle)
3342 END SUBROUTINE pw_copy_from_array_r1d_c1d_gs
3343
3344! **************************************************************************************************
3345!> \brief pw2 = alpha*pw1 + beta*pw2
3346!> alpha defaults to 1, beta defaults to 1
3347!> \param pw1 ...
3348!> \param pw2 ...
3349!> \param alpha ...
3350!> \param beta ...
3351!> \param allow_noncompatible_grids ...
3352!> \par History
3353!> JGH (21-Feb-2003) : added reference grid functionality
3354!> JGH (01-Dec-2007) : rename and remove complex alpha
3355!> \author apsi
3356!> \note
3357!> Currently only summing up of respective types allowed,
3358!> in order to avoid errors
3359! **************************************************************************************************
3360 SUBROUTINE pw_axpy_r1d_c1d_gs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
3361
3362 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
3363 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw2
3364 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
3365 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
3366
3367 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
3368
3369 INTEGER :: handle
3370 LOGICAL :: my_allow_noncompatible_grids
3371 REAL(KIND=dp) :: my_alpha, my_beta
3372 INTEGER :: i, j, ng, ng1, ng2
3373
3374 CALL timeset(routinen, handle)
3375
3376 my_alpha = 1.0_dp
3377 IF (PRESENT(alpha)) my_alpha = alpha
3378
3379 my_beta = 1.0_dp
3380 IF (PRESENT(beta)) my_beta = beta
3381
3382 my_allow_noncompatible_grids = .false.
3383 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
3384
3385 IF (my_beta /= 1.0_dp) THEN
3386 IF (my_beta == 0.0_dp) THEN
3387 CALL pw_zero(pw2)
3388 ELSE
3389!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
3390 pw2%array = pw2%array*my_beta
3391!$OMP END PARALLEL WORKSHARE
3392 END IF
3393 END IF
3394
3395 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
3396
3397 IF (my_alpha == 1.0_dp) THEN
3398!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
3399 pw2%array = pw2%array + cmplx(pw1%array, 0.0_dp, kind=dp)
3400!$OMP END PARALLEL WORKSHARE
3401 ELSE IF (my_alpha /= 0.0_dp) THEN
3402!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha)
3403 pw2%array = pw2%array + my_alpha* cmplx(pw1%array, 0.0_dp, kind=dp)
3404!$OMP END PARALLEL WORKSHARE
3405 END IF
3406
3407 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
3408
3409 ng1 = SIZE(pw1%array)
3410 ng2 = SIZE(pw2%array)
3411 ng = min(ng1, ng2)
3412
3413 IF (pw1%pw_grid%spherical) THEN
3414 IF (my_alpha == 1.0_dp) THEN
3415!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(ng, pw1, pw2)
3416 DO i = 1, ng
3417 pw2%array(i) = pw2%array(i) + cmplx(pw1%array(i), 0.0_dp, kind=dp)
3418 END DO
3419!$OMP END PARALLEL DO
3420 ELSE IF (my_alpha /= 0.0_dp) THEN
3421!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha,ng)
3422 DO i = 1, ng
3423 pw2%array(i) = pw2%array(i) + my_alpha* cmplx(pw1%array(i), 0.0_dp, kind=dp)
3424 END DO
3425!$OMP END PARALLEL DO
3426 END IF
3427 ELSE IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
3428 IF (ng1 >= ng2) THEN
3429 IF (my_alpha == 1.0_dp) THEN
3430!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
3431 DO i = 1, ng
3432 j = pw2%pw_grid%gidx(i)
3433 pw2%array(i) = pw2%array(i) + cmplx(pw1%array(j), 0.0_dp, kind=dp)
3434 END DO
3435!$OMP END PARALLEL DO
3436 ELSE IF (my_alpha /= 0.0_dp) THEN
3437!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
3438 DO i = 1, ng
3439 j = pw2%pw_grid%gidx(i)
3440 pw2%array(i) = pw2%array(i) + my_alpha* cmplx(pw1%array(j), 0.0_dp, kind=dp)
3441 END DO
3442!$OMP END PARALLEL DO
3443 END IF
3444 ELSE
3445 IF (my_alpha == 1.0_dp) THEN
3446!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
3447 DO i = 1, ng
3448 j = pw2%pw_grid%gidx(i)
3449 pw2%array(j) = pw2%array(j) + cmplx(pw1%array(i), 0.0_dp, kind=dp)
3450 END DO
3451!$OMP END PARALLEL DO
3452 ELSE IF (my_alpha /= 0.0_dp) THEN
3453!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
3454 DO i = 1, ng
3455 j = pw2%pw_grid%gidx(i)
3456 pw2%array(j) = pw2%array(j) + my_alpha* cmplx(pw1%array(i), 0.0_dp, kind=dp)
3457 END DO
3458!$OMP END PARALLEL DO
3459 END IF
3460 END IF
3461 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
3462 IF (ng1 >= ng2) THEN
3463 IF (my_alpha == 1.0_dp) THEN
3464!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
3465 DO i = 1, ng
3466 j = pw1%pw_grid%gidx(i)
3467 pw2%array(i) = pw2%array(i) + cmplx(pw1%array(j), 0.0_dp, kind=dp)
3468 END DO
3469!$OMP END PARALLEL DO
3470 ELSE IF (my_alpha /= 0.0_dp) THEN
3471!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
3472 DO i = 1, ng
3473 j = pw1%pw_grid%gidx(i)
3474 pw2%array(i) = pw2%array(i) + my_alpha* cmplx(pw1%array(j), 0.0_dp, kind=dp)
3475 END DO
3476!$OMP END PARALLEL DO
3477 END IF
3478 ELSE
3479 IF (my_alpha == 1.0_dp) THEN
3480!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
3481 DO i = 1, ng
3482 j = pw1%pw_grid%gidx(i)
3483 pw2%array(j) = pw2%array(j) + cmplx(pw1%array(i), 0.0_dp, kind=dp)
3484 END DO
3485!$OMP END PARALLEL DO
3486 ELSE IF (my_alpha /= 0.0_dp) THEN
3487!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
3488 DO i = 1, ng
3489 j = pw1%pw_grid%gidx(i)
3490 pw2%array(j) = pw2%array(j) + my_alpha* cmplx(pw1%array(i), 0.0_dp, kind=dp)
3491 END DO
3492!$OMP END PARALLEL DO
3493 END IF
3494 END IF
3495 ELSE
3496 cpabort("Grids not compatible")
3497 END IF
3498
3499 ELSE
3500
3501 cpabort("Grids not compatible")
3502
3503 END IF
3504
3505 CALL timestop(handle)
3506
3507 END SUBROUTINE pw_axpy_r1d_c1d_gs
3508
3509! **************************************************************************************************
3510!> \brief pw_out = pw_out + alpha * pw1 * pw2
3511!> alpha defaults to 1
3512!> \param pw_out ...
3513!> \param pw1 ...
3514!> \param pw2 ...
3515!> \param alpha ...
3516!> \author JGH
3517! **************************************************************************************************
3518 SUBROUTINE pw_multiply_r1d_c1d_gs (pw_out, pw1, pw2, alpha)
3519
3520 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw_out
3521 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
3522 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw2
3523 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
3524
3525 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
3526
3527 INTEGER :: handle
3528 REAL(KIND=dp) :: my_alpha
3529
3530 CALL timeset(routinen, handle)
3531
3532 my_alpha = 1.0_dp
3533 IF (PRESENT(alpha)) my_alpha = alpha
3534
3535 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
3536 cpabort("pw_multiply not implemented for non-identical grids!")
3537
3538 IF (my_alpha == 1.0_dp) THEN
3539!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
3540 pw_out%array = pw_out%array + pw1%array* real(pw2%array, kind=dp)
3541!$OMP END PARALLEL WORKSHARE
3542 ELSE
3543!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
3544 pw_out%array = pw_out%array + my_alpha*pw1%array* real(pw2%array, kind=dp)
3545!$OMP END PARALLEL WORKSHARE
3546 END IF
3547
3548 CALL timestop(handle)
3549
3550 END SUBROUTINE pw_multiply_r1d_c1d_gs
3551
3552! **************************************************************************************************
3553!> \brief ...
3554!> \param pw1 ...
3555!> \param pw2 ...
3556! **************************************************************************************************
3557 SUBROUTINE pw_multiply_with_r1d_c1d_gs (pw1, pw2)
3558 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw1
3559 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw2
3560
3561 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
3562
3563 INTEGER :: handle
3564
3565 CALL timeset(routinen, handle)
3566
3567 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
3568 cpabort("Incompatible grids!")
3569
3570!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
3571 pw1%array = pw1%array* real(pw2%array, kind=dp)
3572!$OMP END PARALLEL WORKSHARE
3573
3574 CALL timestop(handle)
3575
3576 END SUBROUTINE pw_multiply_with_r1d_c1d_gs
3577
3578! **************************************************************************************************
3579!> \brief Calculate integral over unit cell for functions in plane wave basis
3580!> only returns the real part of it ......
3581!> \param pw1 ...
3582!> \param pw2 ...
3583!> \param sumtype ...
3584!> \param just_sum ...
3585!> \param local_only ...
3586!> \return ...
3587!> \par History
3588!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
3589!> \author apsi
3590! **************************************************************************************************
3591 FUNCTION pw_integral_ab_r1d_c1d_gs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
3592
3593 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
3594 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw2
3595 INTEGER, INTENT(IN), OPTIONAL :: sumtype
3596 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
3597 REAL(kind=dp) :: integral_value
3598
3599 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab'
3600
3601 INTEGER :: handle, loc_sumtype
3602 LOGICAL :: my_just_sum, my_local_only
3603
3604 CALL timeset(routinen, handle)
3605
3606 loc_sumtype = do_accurate_sum
3607 IF (PRESENT(sumtype)) loc_sumtype = sumtype
3608
3609 my_local_only = .false.
3610 IF (PRESENT(local_only)) my_local_only = local_only
3611
3612 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
3613 cpabort("Grids incompatible")
3614 END IF
3615
3616 my_just_sum = .false.
3617 IF (PRESENT(just_sum)) my_just_sum = just_sum
3618
3619 ! do standard sum
3620 IF (loc_sumtype == do_standard_sum) THEN
3621
3622 ! Do standard sum
3623
3624 integral_value = sum(pw1%array*real(pw2%array, kind=dp)) !? complex bit
3625
3626 ELSE
3627
3628 ! Do accurate sum
3629 integral_value = accurate_sum(pw1%array*real(pw2%array, kind=dp)) !? complex bit
3630
3631 END IF
3632
3633 IF (.NOT. my_just_sum) THEN
3634 integral_value = integral_value*pw1%pw_grid%vol
3635 END IF
3636
3637 IF (pw1%pw_grid%grid_span == halfspace) THEN
3638 integral_value = 2.0_dp*integral_value
3639 IF (pw1%pw_grid%have_g0) integral_value = integral_value - &
3640 pw1%array(1)*real(pw2%array(1), kind=dp)
3641 END IF
3642
3643 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
3644 CALL pw1%pw_grid%para%group%sum(integral_value)
3645
3646 CALL timestop(handle)
3647
3648 END FUNCTION pw_integral_ab_r1d_c1d_gs
3649
3650! **************************************************************************************************
3651!> \brief ...
3652!> \param pw1 ...
3653!> \param pw2 ...
3654!> \return ...
3655! **************************************************************************************************
3656 FUNCTION pw_integral_a2b_r1d_c1d (pw1, pw2) RESULT(integral_value)
3657
3658 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
3659 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw2
3660 REAL(kind=dp) :: integral_value
3661
3662 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_a2b'
3663
3664 INTEGER :: handle
3665
3666 CALL timeset(routinen, handle)
3667
3668 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
3669 cpabort("Grids incompatible")
3670 END IF
3671
3672 integral_value = accurate_sum(pw1%array*real(pw2%array, kind=dp)*pw1%pw_grid%gsq)
3673 IF (pw1%pw_grid%grid_span == halfspace) THEN
3674 integral_value = 2.0_dp*integral_value
3675 END IF
3676
3677 integral_value = integral_value*pw1%pw_grid%vol
3678
3679 IF (pw1%pw_grid%para%mode == pw_mode_distributed) &
3680 CALL pw1%pw_grid%para%group%sum(integral_value)
3681 CALL timestop(handle)
3682
3683 END FUNCTION pw_integral_a2b_r1d_c1d
3684! **************************************************************************************************
3685!> \brief copy a pw type variable
3686!> \param pw1 ...
3687!> \param pw2 ...
3688!> \par History
3689!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
3690!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
3691!> JGH (21-Feb-2003) : Code for generalized reference grids
3692!> \author apsi
3693!> \note
3694!> Currently only copying of respective types allowed,
3695!> in order to avoid errors
3696! **************************************************************************************************
3697 SUBROUTINE pw_copy_r3d_r3d_rs (pw1, pw2)
3698
3699 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw1
3700 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw2
3701
3702 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
3703
3704 INTEGER :: handle
3705
3706 CALL timeset(routinen, handle)
3707
3708 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
3709 cpabort("Both grids must be either spherical or non-spherical!")
3710 IF (pw1%pw_grid%spherical) &
3711 cpabort("Spherical grids only exist in reciprocal space!")
3712
3713 IF (any(shape(pw2%array) /= shape(pw1%array))) &
3714 cpabort("3D grids must be compatible!")
3715 IF (pw1%pw_grid%spherical) &
3716 cpabort("3D grids must not be spherical!")
3717!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
3718 pw2%array(:, :, :) = pw1%array(:, :, :)
3719!$OMP END PARALLEL WORKSHARE
3720
3721 CALL timestop(handle)
3722
3723 END SUBROUTINE pw_copy_r3d_r3d_rs
3724
3725! **************************************************************************************************
3726!> \brief ...
3727!> \param pw ...
3728!> \param array ...
3729! **************************************************************************************************
3730 SUBROUTINE pw_copy_to_array_r3d_r3d_rs (pw, array)
3731 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw
3732 REAL(KIND=dp), DIMENSION(:, :, :), INTENT(INOUT) :: array
3733
3734 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
3735
3736 INTEGER :: handle
3737
3738 CALL timeset(routinen, handle)
3739
3740!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
3741 array(:, :, :) = pw%array(:, :, :)
3742!$OMP END PARALLEL WORKSHARE
3743
3744 CALL timestop(handle)
3745 END SUBROUTINE pw_copy_to_array_r3d_r3d_rs
3746
3747! **************************************************************************************************
3748!> \brief ...
3749!> \param pw ...
3750!> \param array ...
3751! **************************************************************************************************
3752 SUBROUTINE pw_copy_from_array_r3d_r3d_rs (pw, array)
3753 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw
3754 REAL(KIND=dp), DIMENSION(:, :, :), INTENT(IN) :: array
3755
3756 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
3757
3758 INTEGER :: handle
3759
3760 CALL timeset(routinen, handle)
3761
3762!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
3763 pw%array = array
3764!$OMP END PARALLEL WORKSHARE
3765
3766 CALL timestop(handle)
3767 END SUBROUTINE pw_copy_from_array_r3d_r3d_rs
3768
3769! **************************************************************************************************
3770!> \brief pw2 = alpha*pw1 + beta*pw2
3771!> alpha defaults to 1, beta defaults to 1
3772!> \param pw1 ...
3773!> \param pw2 ...
3774!> \param alpha ...
3775!> \param beta ...
3776!> \param allow_noncompatible_grids ...
3777!> \par History
3778!> JGH (21-Feb-2003) : added reference grid functionality
3779!> JGH (01-Dec-2007) : rename and remove complex alpha
3780!> \author apsi
3781!> \note
3782!> Currently only summing up of respective types allowed,
3783!> in order to avoid errors
3784! **************************************************************************************************
3785 SUBROUTINE pw_axpy_r3d_r3d_rs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
3786
3787 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw1
3788 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw2
3789 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
3790 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
3791
3792 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
3793
3794 INTEGER :: handle
3795 LOGICAL :: my_allow_noncompatible_grids
3796 REAL(KIND=dp) :: my_alpha, my_beta
3797
3798 CALL timeset(routinen, handle)
3799
3800 my_alpha = 1.0_dp
3801 IF (PRESENT(alpha)) my_alpha = alpha
3802
3803 my_beta = 1.0_dp
3804 IF (PRESENT(beta)) my_beta = beta
3805
3806 my_allow_noncompatible_grids = .false.
3807 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
3808
3809 IF (my_beta /= 1.0_dp) THEN
3810 IF (my_beta == 0.0_dp) THEN
3811 CALL pw_zero(pw2)
3812 ELSE
3813!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
3814 pw2%array = pw2%array*my_beta
3815!$OMP END PARALLEL WORKSHARE
3816 END IF
3817 END IF
3818
3819 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
3820 IF (my_alpha == 1.0_dp) THEN
3821!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2)
3822 pw2%array = pw2%array + pw1%array
3823!$OMP END PARALLEL WORKSHARE
3824 ELSE IF (my_alpha /= 0.0_dp) THEN
3825!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2, my_alpha)
3826 pw2%array = pw2%array + my_alpha* pw1%array
3827!$OMP END PARALLEL WORKSHARE
3828 END IF
3829
3830 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
3831
3832 IF (any(shape(pw1%array) /= shape(pw2%array))) &
3833 cpabort("Noncommensurate grids not implemented for 3D grids!")
3834
3835 IF (my_alpha == 1.0_dp) THEN
3836!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
3837 pw2%array = pw2%array + pw1%array
3838!$OMP END PARALLEL WORKSHARE
3839 ELSE
3840!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2,my_alpha)
3841 pw2%array = pw2%array + my_alpha* pw1%array
3842!$OMP END PARALLEL WORKSHARE
3843 END IF
3844
3845 ELSE
3846
3847 cpabort("Grids not compatible")
3848
3849 END IF
3850
3851 CALL timestop(handle)
3852
3853 END SUBROUTINE pw_axpy_r3d_r3d_rs
3854
3855! **************************************************************************************************
3856!> \brief pw_out = pw_out + alpha * pw1 * pw2
3857!> alpha defaults to 1
3858!> \param pw_out ...
3859!> \param pw1 ...
3860!> \param pw2 ...
3861!> \param alpha ...
3862!> \author JGH
3863! **************************************************************************************************
3864 SUBROUTINE pw_multiply_r3d_r3d_rs (pw_out, pw1, pw2, alpha)
3865
3866 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw_out
3867 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw1
3868 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw2
3869 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
3870
3871 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
3872
3873 INTEGER :: handle
3874 REAL(KIND=dp) :: my_alpha
3875
3876 CALL timeset(routinen, handle)
3877
3878 my_alpha = 1.0_dp
3879 IF (PRESENT(alpha)) my_alpha = alpha
3880
3881 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
3882 cpabort("pw_multiply not implemented for non-identical grids!")
3883
3884 IF (my_alpha == 1.0_dp) THEN
3885!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
3886 pw_out%array = pw_out%array + pw1%array* pw2%array
3887!$OMP END PARALLEL WORKSHARE
3888 ELSE
3889!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
3890 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
3891!$OMP END PARALLEL WORKSHARE
3892 END IF
3893
3894 CALL timestop(handle)
3895
3896 END SUBROUTINE pw_multiply_r3d_r3d_rs
3897
3898! **************************************************************************************************
3899!> \brief ...
3900!> \param pw1 ...
3901!> \param pw2 ...
3902! **************************************************************************************************
3903 SUBROUTINE pw_multiply_with_r3d_r3d_rs (pw1, pw2)
3904 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw1
3905 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw2
3906
3907 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
3908
3909 INTEGER :: handle
3910
3911 CALL timeset(routinen, handle)
3912
3913 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
3914 cpabort("Incompatible grids!")
3915
3916!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
3917 pw1%array = pw1%array* pw2%array
3918!$OMP END PARALLEL WORKSHARE
3919
3920 CALL timestop(handle)
3921
3922 END SUBROUTINE pw_multiply_with_r3d_r3d_rs
3923
3924! **************************************************************************************************
3925!> \brief Calculate integral over unit cell for functions in plane wave basis
3926!> only returns the real part of it ......
3927!> \param pw1 ...
3928!> \param pw2 ...
3929!> \param sumtype ...
3930!> \param just_sum ...
3931!> \param local_only ...
3932!> \return ...
3933!> \par History
3934!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
3935!> \author apsi
3936! **************************************************************************************************
3937 FUNCTION pw_integral_ab_r3d_r3d_rs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
3938
3939 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw1
3940 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw2
3941 INTEGER, INTENT(IN), OPTIONAL :: sumtype
3942 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
3943 REAL(kind=dp) :: integral_value
3944
3945 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab'
3946
3947 INTEGER :: handle, loc_sumtype
3948 LOGICAL :: my_just_sum, my_local_only
3949
3950 CALL timeset(routinen, handle)
3951
3952 loc_sumtype = do_accurate_sum
3953 IF (PRESENT(sumtype)) loc_sumtype = sumtype
3954
3955 my_local_only = .false.
3956 IF (PRESENT(local_only)) my_local_only = local_only
3957
3958 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
3959 cpabort("Grids incompatible")
3960 END IF
3961
3962 my_just_sum = .false.
3963 IF (PRESENT(just_sum)) my_just_sum = just_sum
3964
3965 ! do standard sum
3966 IF (loc_sumtype == do_standard_sum) THEN
3967
3968 ! Do standard sum
3969
3970 integral_value = sum(pw1%array*pw2%array)
3971
3972 ELSE
3973
3974 ! Do accurate sum
3975 integral_value = accurate_dot_product(pw1%array, pw2%array)
3976
3977 END IF
3978
3979 IF (.NOT. my_just_sum) THEN
3980 integral_value = integral_value*pw1%pw_grid%dvol
3981 END IF
3982
3983
3984 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
3985 CALL pw1%pw_grid%para%group%sum(integral_value)
3986
3987 CALL timestop(handle)
3988
3989 END FUNCTION pw_integral_ab_r3d_r3d_rs
3990! **************************************************************************************************
3991!> \brief copy a pw type variable
3992!> \param pw1 ...
3993!> \param pw2 ...
3994!> \par History
3995!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
3996!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
3997!> JGH (21-Feb-2003) : Code for generalized reference grids
3998!> \author apsi
3999!> \note
4000!> Currently only copying of respective types allowed,
4001!> in order to avoid errors
4002! **************************************************************************************************
4003 SUBROUTINE pw_copy_r3d_r3d_gs (pw1, pw2)
4004
4005 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw1
4006 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw2
4007
4008 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
4009
4010 INTEGER :: handle
4011
4012 CALL timeset(routinen, handle)
4013
4014 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
4015 cpabort("Both grids must be either spherical or non-spherical!")
4016
4017 IF (any(shape(pw2%array) /= shape(pw1%array))) &
4018 cpabort("3D grids must be compatible!")
4019 IF (pw1%pw_grid%spherical) &
4020 cpabort("3D grids must not be spherical!")
4021!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
4022 pw2%array(:, :, :) = pw1%array(:, :, :)
4023!$OMP END PARALLEL WORKSHARE
4024
4025 CALL timestop(handle)
4026
4027 END SUBROUTINE pw_copy_r3d_r3d_gs
4028
4029! **************************************************************************************************
4030!> \brief ...
4031!> \param pw ...
4032!> \param array ...
4033! **************************************************************************************************
4034 SUBROUTINE pw_copy_to_array_r3d_r3d_gs (pw, array)
4035 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw
4036 REAL(KIND=dp), DIMENSION(:, :, :), INTENT(INOUT) :: array
4037
4038 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
4039
4040 INTEGER :: handle
4041
4042 CALL timeset(routinen, handle)
4043
4044!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
4045 array(:, :, :) = pw%array(:, :, :)
4046!$OMP END PARALLEL WORKSHARE
4047
4048 CALL timestop(handle)
4049 END SUBROUTINE pw_copy_to_array_r3d_r3d_gs
4050
4051! **************************************************************************************************
4052!> \brief ...
4053!> \param pw ...
4054!> \param array ...
4055! **************************************************************************************************
4056 SUBROUTINE pw_copy_from_array_r3d_r3d_gs (pw, array)
4057 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw
4058 REAL(KIND=dp), DIMENSION(:, :, :), INTENT(IN) :: array
4059
4060 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
4061
4062 INTEGER :: handle
4063
4064 CALL timeset(routinen, handle)
4065
4066!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
4067 pw%array = array
4068!$OMP END PARALLEL WORKSHARE
4069
4070 CALL timestop(handle)
4071 END SUBROUTINE pw_copy_from_array_r3d_r3d_gs
4072
4073! **************************************************************************************************
4074!> \brief pw2 = alpha*pw1 + beta*pw2
4075!> alpha defaults to 1, beta defaults to 1
4076!> \param pw1 ...
4077!> \param pw2 ...
4078!> \param alpha ...
4079!> \param beta ...
4080!> \param allow_noncompatible_grids ...
4081!> \par History
4082!> JGH (21-Feb-2003) : added reference grid functionality
4083!> JGH (01-Dec-2007) : rename and remove complex alpha
4084!> \author apsi
4085!> \note
4086!> Currently only summing up of respective types allowed,
4087!> in order to avoid errors
4088! **************************************************************************************************
4089 SUBROUTINE pw_axpy_r3d_r3d_gs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
4090
4091 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw1
4092 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw2
4093 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
4094 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
4095
4096 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
4097
4098 INTEGER :: handle
4099 LOGICAL :: my_allow_noncompatible_grids
4100 REAL(KIND=dp) :: my_alpha, my_beta
4101
4102 CALL timeset(routinen, handle)
4103
4104 my_alpha = 1.0_dp
4105 IF (PRESENT(alpha)) my_alpha = alpha
4106
4107 my_beta = 1.0_dp
4108 IF (PRESENT(beta)) my_beta = beta
4109
4110 my_allow_noncompatible_grids = .false.
4111 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
4112
4113 IF (my_beta /= 1.0_dp) THEN
4114 IF (my_beta == 0.0_dp) THEN
4115 CALL pw_zero(pw2)
4116 ELSE
4117!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
4118 pw2%array = pw2%array*my_beta
4119!$OMP END PARALLEL WORKSHARE
4120 END IF
4121 END IF
4122
4123 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
4124 IF (my_alpha == 1.0_dp) THEN
4125!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2)
4126 pw2%array = pw2%array + pw1%array
4127!$OMP END PARALLEL WORKSHARE
4128 ELSE IF (my_alpha /= 0.0_dp) THEN
4129!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2, my_alpha)
4130 pw2%array = pw2%array + my_alpha* pw1%array
4131!$OMP END PARALLEL WORKSHARE
4132 END IF
4133
4134 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
4135
4136 IF (any(shape(pw1%array) /= shape(pw2%array))) &
4137 cpabort("Noncommensurate grids not implemented for 3D grids!")
4138
4139 IF (my_alpha == 1.0_dp) THEN
4140!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
4141 pw2%array = pw2%array + pw1%array
4142!$OMP END PARALLEL WORKSHARE
4143 ELSE
4144!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2,my_alpha)
4145 pw2%array = pw2%array + my_alpha* pw1%array
4146!$OMP END PARALLEL WORKSHARE
4147 END IF
4148
4149 ELSE
4150
4151 cpabort("Grids not compatible")
4152
4153 END IF
4154
4155 CALL timestop(handle)
4156
4157 END SUBROUTINE pw_axpy_r3d_r3d_gs
4158
4159! **************************************************************************************************
4160!> \brief pw_out = pw_out + alpha * pw1 * pw2
4161!> alpha defaults to 1
4162!> \param pw_out ...
4163!> \param pw1 ...
4164!> \param pw2 ...
4165!> \param alpha ...
4166!> \author JGH
4167! **************************************************************************************************
4168 SUBROUTINE pw_multiply_r3d_r3d_gs (pw_out, pw1, pw2, alpha)
4169
4170 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw_out
4171 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw1
4172 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw2
4173 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
4174
4175 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
4176
4177 INTEGER :: handle
4178 REAL(KIND=dp) :: my_alpha
4179
4180 CALL timeset(routinen, handle)
4181
4182 my_alpha = 1.0_dp
4183 IF (PRESENT(alpha)) my_alpha = alpha
4184
4185 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
4186 cpabort("pw_multiply not implemented for non-identical grids!")
4187
4188 IF (my_alpha == 1.0_dp) THEN
4189!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
4190 pw_out%array = pw_out%array + pw1%array* pw2%array
4191!$OMP END PARALLEL WORKSHARE
4192 ELSE
4193!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
4194 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
4195!$OMP END PARALLEL WORKSHARE
4196 END IF
4197
4198 CALL timestop(handle)
4199
4200 END SUBROUTINE pw_multiply_r3d_r3d_gs
4201
4202! **************************************************************************************************
4203!> \brief ...
4204!> \param pw1 ...
4205!> \param pw2 ...
4206! **************************************************************************************************
4207 SUBROUTINE pw_multiply_with_r3d_r3d_gs (pw1, pw2)
4208 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw1
4209 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw2
4210
4211 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
4212
4213 INTEGER :: handle
4214
4215 CALL timeset(routinen, handle)
4216
4217 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
4218 cpabort("Incompatible grids!")
4219
4220!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
4221 pw1%array = pw1%array* pw2%array
4222!$OMP END PARALLEL WORKSHARE
4223
4224 CALL timestop(handle)
4225
4226 END SUBROUTINE pw_multiply_with_r3d_r3d_gs
4227
4228! **************************************************************************************************
4229!> \brief Calculate integral over unit cell for functions in plane wave basis
4230!> only returns the real part of it ......
4231!> \param pw1 ...
4232!> \param pw2 ...
4233!> \param sumtype ...
4234!> \param just_sum ...
4235!> \param local_only ...
4236!> \return ...
4237!> \par History
4238!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
4239!> \author apsi
4240! **************************************************************************************************
4241 FUNCTION pw_integral_ab_r3d_r3d_gs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
4242
4243 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw1
4244 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw2
4245 INTEGER, INTENT(IN), OPTIONAL :: sumtype
4246 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
4247 REAL(kind=dp) :: integral_value
4248
4249 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab'
4250
4251 INTEGER :: handle, loc_sumtype
4252 LOGICAL :: my_just_sum, my_local_only
4253
4254 CALL timeset(routinen, handle)
4255
4256 loc_sumtype = do_accurate_sum
4257 IF (PRESENT(sumtype)) loc_sumtype = sumtype
4258
4259 my_local_only = .false.
4260 IF (PRESENT(local_only)) my_local_only = local_only
4261
4262 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
4263 cpabort("Grids incompatible")
4264 END IF
4265
4266 my_just_sum = .false.
4267 IF (PRESENT(just_sum)) my_just_sum = just_sum
4268
4269 ! do standard sum
4270 IF (loc_sumtype == do_standard_sum) THEN
4271
4272 ! Do standard sum
4273
4274 integral_value = sum(pw1%array*pw2%array)
4275
4276 ELSE
4277
4278 ! Do accurate sum
4279 integral_value = accurate_dot_product(pw1%array, pw2%array)
4280
4281 END IF
4282
4283 IF (.NOT. my_just_sum) THEN
4284 integral_value = integral_value*pw1%pw_grid%vol
4285 END IF
4286
4287
4288 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
4289 CALL pw1%pw_grid%para%group%sum(integral_value)
4290
4291 CALL timestop(handle)
4292
4293 END FUNCTION pw_integral_ab_r3d_r3d_gs
4294
4295! **************************************************************************************************
4296!> \brief copy a pw type variable
4297!> \param pw1 ...
4298!> \param pw2 ...
4299!> \par History
4300!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
4301!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
4302!> JGH (21-Feb-2003) : Code for generalized reference grids
4303!> \author apsi
4304!> \note
4305!> Currently only copying of respective types allowed,
4306!> in order to avoid errors
4307! **************************************************************************************************
4308 SUBROUTINE pw_copy_r3d_c3d_rs (pw1, pw2)
4309
4310 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw1
4311 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw2
4312
4313 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
4314
4315 INTEGER :: handle
4316
4317 CALL timeset(routinen, handle)
4318
4319 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
4320 cpabort("Both grids must be either spherical or non-spherical!")
4321 IF (pw1%pw_grid%spherical) &
4322 cpabort("Spherical grids only exist in reciprocal space!")
4323
4324 IF (any(shape(pw2%array) /= shape(pw1%array))) &
4325 cpabort("3D grids must be compatible!")
4326 IF (pw1%pw_grid%spherical) &
4327 cpabort("3D grids must not be spherical!")
4328!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
4329 pw2%array(:, :, :) = cmplx(pw1%array(:, :, :), 0.0_dp, kind=dp)
4330!$OMP END PARALLEL WORKSHARE
4331
4332 CALL timestop(handle)
4333
4334 END SUBROUTINE pw_copy_r3d_c3d_rs
4335
4336! **************************************************************************************************
4337!> \brief ...
4338!> \param pw ...
4339!> \param array ...
4340! **************************************************************************************************
4341 SUBROUTINE pw_copy_to_array_r3d_c3d_rs (pw, array)
4342 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw
4343 COMPLEX(KIND=dp), DIMENSION(:, :, :), INTENT(INOUT) :: array
4344
4345 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
4346
4347 INTEGER :: handle
4348
4349 CALL timeset(routinen, handle)
4350
4351!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
4352 array(:, :, :) = cmplx(pw%array(:, :, :), 0.0_dp, kind=dp)
4353!$OMP END PARALLEL WORKSHARE
4354
4355 CALL timestop(handle)
4356 END SUBROUTINE pw_copy_to_array_r3d_c3d_rs
4357
4358! **************************************************************************************************
4359!> \brief ...
4360!> \param pw ...
4361!> \param array ...
4362! **************************************************************************************************
4363 SUBROUTINE pw_copy_from_array_r3d_c3d_rs (pw, array)
4364 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw
4365 COMPLEX(KIND=dp), DIMENSION(:, :, :), INTENT(IN) :: array
4366
4367 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
4368
4369 INTEGER :: handle
4370
4371 CALL timeset(routinen, handle)
4372
4373!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
4374 pw%array = real(array, kind=dp)
4375!$OMP END PARALLEL WORKSHARE
4376
4377 CALL timestop(handle)
4378 END SUBROUTINE pw_copy_from_array_r3d_c3d_rs
4379
4380! **************************************************************************************************
4381!> \brief pw2 = alpha*pw1 + beta*pw2
4382!> alpha defaults to 1, beta defaults to 1
4383!> \param pw1 ...
4384!> \param pw2 ...
4385!> \param alpha ...
4386!> \param beta ...
4387!> \param allow_noncompatible_grids ...
4388!> \par History
4389!> JGH (21-Feb-2003) : added reference grid functionality
4390!> JGH (01-Dec-2007) : rename and remove complex alpha
4391!> \author apsi
4392!> \note
4393!> Currently only summing up of respective types allowed,
4394!> in order to avoid errors
4395! **************************************************************************************************
4396 SUBROUTINE pw_axpy_r3d_c3d_rs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
4397
4398 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw1
4399 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw2
4400 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
4401 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
4402
4403 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
4404
4405 INTEGER :: handle
4406 LOGICAL :: my_allow_noncompatible_grids
4407 REAL(KIND=dp) :: my_alpha, my_beta
4408
4409 CALL timeset(routinen, handle)
4410
4411 my_alpha = 1.0_dp
4412 IF (PRESENT(alpha)) my_alpha = alpha
4413
4414 my_beta = 1.0_dp
4415 IF (PRESENT(beta)) my_beta = beta
4416
4417 my_allow_noncompatible_grids = .false.
4418 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
4419
4420 IF (my_beta /= 1.0_dp) THEN
4421 IF (my_beta == 0.0_dp) THEN
4422 CALL pw_zero(pw2)
4423 ELSE
4424!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
4425 pw2%array = pw2%array*my_beta
4426!$OMP END PARALLEL WORKSHARE
4427 END IF
4428 END IF
4429
4430 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
4431 IF (my_alpha == 1.0_dp) THEN
4432!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2)
4433 pw2%array = pw2%array + cmplx(pw1%array, 0.0_dp, kind=dp)
4434!$OMP END PARALLEL WORKSHARE
4435 ELSE IF (my_alpha /= 0.0_dp) THEN
4436!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2, my_alpha)
4437 pw2%array = pw2%array + my_alpha* cmplx(pw1%array, 0.0_dp, kind=dp)
4438!$OMP END PARALLEL WORKSHARE
4439 END IF
4440
4441 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
4442
4443 IF (any(shape(pw1%array) /= shape(pw2%array))) &
4444 cpabort("Noncommensurate grids not implemented for 3D grids!")
4445
4446 IF (my_alpha == 1.0_dp) THEN
4447!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
4448 pw2%array = pw2%array + cmplx(pw1%array, 0.0_dp, kind=dp)
4449!$OMP END PARALLEL WORKSHARE
4450 ELSE
4451!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2,my_alpha)
4452 pw2%array = pw2%array + my_alpha* cmplx(pw1%array, 0.0_dp, kind=dp)
4453!$OMP END PARALLEL WORKSHARE
4454 END IF
4455
4456 ELSE
4457
4458 cpabort("Grids not compatible")
4459
4460 END IF
4461
4462 CALL timestop(handle)
4463
4464 END SUBROUTINE pw_axpy_r3d_c3d_rs
4465
4466! **************************************************************************************************
4467!> \brief pw_out = pw_out + alpha * pw1 * pw2
4468!> alpha defaults to 1
4469!> \param pw_out ...
4470!> \param pw1 ...
4471!> \param pw2 ...
4472!> \param alpha ...
4473!> \author JGH
4474! **************************************************************************************************
4475 SUBROUTINE pw_multiply_r3d_c3d_rs (pw_out, pw1, pw2, alpha)
4476
4477 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw_out
4478 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw1
4479 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw2
4480 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
4481
4482 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
4483
4484 INTEGER :: handle
4485 REAL(KIND=dp) :: my_alpha
4486
4487 CALL timeset(routinen, handle)
4488
4489 my_alpha = 1.0_dp
4490 IF (PRESENT(alpha)) my_alpha = alpha
4491
4492 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
4493 cpabort("pw_multiply not implemented for non-identical grids!")
4494
4495 IF (my_alpha == 1.0_dp) THEN
4496!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
4497 pw_out%array = pw_out%array + pw1%array* real(pw2%array, kind=dp)
4498!$OMP END PARALLEL WORKSHARE
4499 ELSE
4500!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
4501 pw_out%array = pw_out%array + my_alpha*pw1%array* real(pw2%array, kind=dp)
4502!$OMP END PARALLEL WORKSHARE
4503 END IF
4504
4505 CALL timestop(handle)
4506
4507 END SUBROUTINE pw_multiply_r3d_c3d_rs
4508
4509! **************************************************************************************************
4510!> \brief ...
4511!> \param pw1 ...
4512!> \param pw2 ...
4513! **************************************************************************************************
4514 SUBROUTINE pw_multiply_with_r3d_c3d_rs (pw1, pw2)
4515 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw1
4516 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw2
4517
4518 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
4519
4520 INTEGER :: handle
4521
4522 CALL timeset(routinen, handle)
4523
4524 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
4525 cpabort("Incompatible grids!")
4526
4527!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
4528 pw1%array = pw1%array* real(pw2%array, kind=dp)
4529!$OMP END PARALLEL WORKSHARE
4530
4531 CALL timestop(handle)
4532
4533 END SUBROUTINE pw_multiply_with_r3d_c3d_rs
4534
4535! **************************************************************************************************
4536!> \brief Calculate integral over unit cell for functions in plane wave basis
4537!> only returns the real part of it ......
4538!> \param pw1 ...
4539!> \param pw2 ...
4540!> \param sumtype ...
4541!> \param just_sum ...
4542!> \param local_only ...
4543!> \return ...
4544!> \par History
4545!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
4546!> \author apsi
4547! **************************************************************************************************
4548 FUNCTION pw_integral_ab_r3d_c3d_rs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
4549
4550 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw1
4551 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw2
4552 INTEGER, INTENT(IN), OPTIONAL :: sumtype
4553 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
4554 REAL(kind=dp) :: integral_value
4555
4556 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab'
4557
4558 INTEGER :: handle, loc_sumtype
4559 LOGICAL :: my_just_sum, my_local_only
4560
4561 CALL timeset(routinen, handle)
4562
4563 loc_sumtype = do_accurate_sum
4564 IF (PRESENT(sumtype)) loc_sumtype = sumtype
4565
4566 my_local_only = .false.
4567 IF (PRESENT(local_only)) my_local_only = local_only
4568
4569 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
4570 cpabort("Grids incompatible")
4571 END IF
4572
4573 my_just_sum = .false.
4574 IF (PRESENT(just_sum)) my_just_sum = just_sum
4575
4576 ! do standard sum
4577 IF (loc_sumtype == do_standard_sum) THEN
4578
4579 ! Do standard sum
4580
4581 integral_value = sum(pw1%array*real(pw2%array, kind=dp)) !? complex bit
4582
4583 ELSE
4584
4585 ! Do accurate sum
4586 integral_value = accurate_sum(pw1%array*real(pw2%array, kind=dp)) !? complex bit
4587
4588 END IF
4589
4590 IF (.NOT. my_just_sum) THEN
4591 integral_value = integral_value*pw1%pw_grid%dvol
4592 END IF
4593
4594
4595 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
4596 CALL pw1%pw_grid%para%group%sum(integral_value)
4597
4598 CALL timestop(handle)
4599
4600 END FUNCTION pw_integral_ab_r3d_c3d_rs
4601! **************************************************************************************************
4602!> \brief copy a pw type variable
4603!> \param pw1 ...
4604!> \param pw2 ...
4605!> \par History
4606!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
4607!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
4608!> JGH (21-Feb-2003) : Code for generalized reference grids
4609!> \author apsi
4610!> \note
4611!> Currently only copying of respective types allowed,
4612!> in order to avoid errors
4613! **************************************************************************************************
4614 SUBROUTINE pw_copy_r3d_c3d_gs (pw1, pw2)
4615
4616 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw1
4617 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw2
4618
4619 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
4620
4621 INTEGER :: handle
4622
4623 CALL timeset(routinen, handle)
4624
4625 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
4626 cpabort("Both grids must be either spherical or non-spherical!")
4627
4628 IF (any(shape(pw2%array) /= shape(pw1%array))) &
4629 cpabort("3D grids must be compatible!")
4630 IF (pw1%pw_grid%spherical) &
4631 cpabort("3D grids must not be spherical!")
4632!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
4633 pw2%array(:, :, :) = cmplx(pw1%array(:, :, :), 0.0_dp, kind=dp)
4634!$OMP END PARALLEL WORKSHARE
4635
4636 CALL timestop(handle)
4637
4638 END SUBROUTINE pw_copy_r3d_c3d_gs
4639
4640! **************************************************************************************************
4641!> \brief ...
4642!> \param pw ...
4643!> \param array ...
4644! **************************************************************************************************
4645 SUBROUTINE pw_copy_to_array_r3d_c3d_gs (pw, array)
4646 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw
4647 COMPLEX(KIND=dp), DIMENSION(:, :, :), INTENT(INOUT) :: array
4648
4649 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
4650
4651 INTEGER :: handle
4652
4653 CALL timeset(routinen, handle)
4654
4655!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
4656 array(:, :, :) = cmplx(pw%array(:, :, :), 0.0_dp, kind=dp)
4657!$OMP END PARALLEL WORKSHARE
4658
4659 CALL timestop(handle)
4660 END SUBROUTINE pw_copy_to_array_r3d_c3d_gs
4661
4662! **************************************************************************************************
4663!> \brief ...
4664!> \param pw ...
4665!> \param array ...
4666! **************************************************************************************************
4667 SUBROUTINE pw_copy_from_array_r3d_c3d_gs (pw, array)
4668 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw
4669 COMPLEX(KIND=dp), DIMENSION(:, :, :), INTENT(IN) :: array
4670
4671 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
4672
4673 INTEGER :: handle
4674
4675 CALL timeset(routinen, handle)
4676
4677!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
4678 pw%array = real(array, kind=dp)
4679!$OMP END PARALLEL WORKSHARE
4680
4681 CALL timestop(handle)
4682 END SUBROUTINE pw_copy_from_array_r3d_c3d_gs
4683
4684! **************************************************************************************************
4685!> \brief pw2 = alpha*pw1 + beta*pw2
4686!> alpha defaults to 1, beta defaults to 1
4687!> \param pw1 ...
4688!> \param pw2 ...
4689!> \param alpha ...
4690!> \param beta ...
4691!> \param allow_noncompatible_grids ...
4692!> \par History
4693!> JGH (21-Feb-2003) : added reference grid functionality
4694!> JGH (01-Dec-2007) : rename and remove complex alpha
4695!> \author apsi
4696!> \note
4697!> Currently only summing up of respective types allowed,
4698!> in order to avoid errors
4699! **************************************************************************************************
4700 SUBROUTINE pw_axpy_r3d_c3d_gs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
4701
4702 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw1
4703 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw2
4704 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
4705 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
4706
4707 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
4708
4709 INTEGER :: handle
4710 LOGICAL :: my_allow_noncompatible_grids
4711 REAL(KIND=dp) :: my_alpha, my_beta
4712
4713 CALL timeset(routinen, handle)
4714
4715 my_alpha = 1.0_dp
4716 IF (PRESENT(alpha)) my_alpha = alpha
4717
4718 my_beta = 1.0_dp
4719 IF (PRESENT(beta)) my_beta = beta
4720
4721 my_allow_noncompatible_grids = .false.
4722 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
4723
4724 IF (my_beta /= 1.0_dp) THEN
4725 IF (my_beta == 0.0_dp) THEN
4726 CALL pw_zero(pw2)
4727 ELSE
4728!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
4729 pw2%array = pw2%array*my_beta
4730!$OMP END PARALLEL WORKSHARE
4731 END IF
4732 END IF
4733
4734 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
4735 IF (my_alpha == 1.0_dp) THEN
4736!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2)
4737 pw2%array = pw2%array + cmplx(pw1%array, 0.0_dp, kind=dp)
4738!$OMP END PARALLEL WORKSHARE
4739 ELSE IF (my_alpha /= 0.0_dp) THEN
4740!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2, my_alpha)
4741 pw2%array = pw2%array + my_alpha* cmplx(pw1%array, 0.0_dp, kind=dp)
4742!$OMP END PARALLEL WORKSHARE
4743 END IF
4744
4745 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
4746
4747 IF (any(shape(pw1%array) /= shape(pw2%array))) &
4748 cpabort("Noncommensurate grids not implemented for 3D grids!")
4749
4750 IF (my_alpha == 1.0_dp) THEN
4751!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
4752 pw2%array = pw2%array + cmplx(pw1%array, 0.0_dp, kind=dp)
4753!$OMP END PARALLEL WORKSHARE
4754 ELSE
4755!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2,my_alpha)
4756 pw2%array = pw2%array + my_alpha* cmplx(pw1%array, 0.0_dp, kind=dp)
4757!$OMP END PARALLEL WORKSHARE
4758 END IF
4759
4760 ELSE
4761
4762 cpabort("Grids not compatible")
4763
4764 END IF
4765
4766 CALL timestop(handle)
4767
4768 END SUBROUTINE pw_axpy_r3d_c3d_gs
4769
4770! **************************************************************************************************
4771!> \brief pw_out = pw_out + alpha * pw1 * pw2
4772!> alpha defaults to 1
4773!> \param pw_out ...
4774!> \param pw1 ...
4775!> \param pw2 ...
4776!> \param alpha ...
4777!> \author JGH
4778! **************************************************************************************************
4779 SUBROUTINE pw_multiply_r3d_c3d_gs (pw_out, pw1, pw2, alpha)
4780
4781 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw_out
4782 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw1
4783 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw2
4784 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
4785
4786 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
4787
4788 INTEGER :: handle
4789 REAL(KIND=dp) :: my_alpha
4790
4791 CALL timeset(routinen, handle)
4792
4793 my_alpha = 1.0_dp
4794 IF (PRESENT(alpha)) my_alpha = alpha
4795
4796 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
4797 cpabort("pw_multiply not implemented for non-identical grids!")
4798
4799 IF (my_alpha == 1.0_dp) THEN
4800!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
4801 pw_out%array = pw_out%array + pw1%array* real(pw2%array, kind=dp)
4802!$OMP END PARALLEL WORKSHARE
4803 ELSE
4804!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
4805 pw_out%array = pw_out%array + my_alpha*pw1%array* real(pw2%array, kind=dp)
4806!$OMP END PARALLEL WORKSHARE
4807 END IF
4808
4809 CALL timestop(handle)
4810
4811 END SUBROUTINE pw_multiply_r3d_c3d_gs
4812
4813! **************************************************************************************************
4814!> \brief ...
4815!> \param pw1 ...
4816!> \param pw2 ...
4817! **************************************************************************************************
4818 SUBROUTINE pw_multiply_with_r3d_c3d_gs (pw1, pw2)
4819 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw1
4820 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw2
4821
4822 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
4823
4824 INTEGER :: handle
4825
4826 CALL timeset(routinen, handle)
4827
4828 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
4829 cpabort("Incompatible grids!")
4830
4831!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
4832 pw1%array = pw1%array* real(pw2%array, kind=dp)
4833!$OMP END PARALLEL WORKSHARE
4834
4835 CALL timestop(handle)
4836
4837 END SUBROUTINE pw_multiply_with_r3d_c3d_gs
4838
4839! **************************************************************************************************
4840!> \brief Calculate integral over unit cell for functions in plane wave basis
4841!> only returns the real part of it ......
4842!> \param pw1 ...
4843!> \param pw2 ...
4844!> \param sumtype ...
4845!> \param just_sum ...
4846!> \param local_only ...
4847!> \return ...
4848!> \par History
4849!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
4850!> \author apsi
4851! **************************************************************************************************
4852 FUNCTION pw_integral_ab_r3d_c3d_gs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
4853
4854 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw1
4855 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw2
4856 INTEGER, INTENT(IN), OPTIONAL :: sumtype
4857 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
4858 REAL(kind=dp) :: integral_value
4859
4860 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab'
4861
4862 INTEGER :: handle, loc_sumtype
4863 LOGICAL :: my_just_sum, my_local_only
4864
4865 CALL timeset(routinen, handle)
4866
4867 loc_sumtype = do_accurate_sum
4868 IF (PRESENT(sumtype)) loc_sumtype = sumtype
4869
4870 my_local_only = .false.
4871 IF (PRESENT(local_only)) my_local_only = local_only
4872
4873 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
4874 cpabort("Grids incompatible")
4875 END IF
4876
4877 my_just_sum = .false.
4878 IF (PRESENT(just_sum)) my_just_sum = just_sum
4879
4880 ! do standard sum
4881 IF (loc_sumtype == do_standard_sum) THEN
4882
4883 ! Do standard sum
4884
4885 integral_value = sum(pw1%array*real(pw2%array, kind=dp)) !? complex bit
4886
4887 ELSE
4888
4889 ! Do accurate sum
4890 integral_value = accurate_sum(pw1%array*real(pw2%array, kind=dp)) !? complex bit
4891
4892 END IF
4893
4894 IF (.NOT. my_just_sum) THEN
4895 integral_value = integral_value*pw1%pw_grid%vol
4896 END IF
4897
4898
4899 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
4900 CALL pw1%pw_grid%para%group%sum(integral_value)
4901
4902 CALL timestop(handle)
4903
4904 END FUNCTION pw_integral_ab_r3d_c3d_gs
4905
4906! **************************************************************************************************
4907!> \brief copy a pw type variable
4908!> \param pw1 ...
4909!> \param pw2 ...
4910!> \par History
4911!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
4912!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
4913!> JGH (21-Feb-2003) : Code for generalized reference grids
4914!> \author apsi
4915!> \note
4916!> Currently only copying of respective types allowed,
4917!> in order to avoid errors
4918! **************************************************************************************************
4919 SUBROUTINE pw_copy_c1d_r1d_rs (pw1, pw2)
4920
4921 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw1
4922 TYPE(pw_r1d_rs_type), INTENT(INOUT) :: pw2
4923
4924 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
4925
4926 INTEGER :: handle
4927 INTEGER :: i, j, ng, ng1, ng2, ns
4928
4929 CALL timeset(routinen, handle)
4930
4931 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
4932 cpabort("Both grids must be either spherical or non-spherical!")
4933 IF (pw1%pw_grid%spherical) &
4934 cpabort("Spherical grids only exist in reciprocal space!")
4935
4936 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
4937 IF (pw1%pw_grid%spherical .AND. pw2%pw_grid%spherical) THEN
4938 IF (pw_compatible(pw1%pw_grid, pw2%pw_grid)) THEN
4939 ng1 = SIZE(pw1%array)
4940 ng2 = SIZE(pw2%array)
4941 ng = min(ng1, ng2)
4942!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, pw1, pw2)
4943 pw2%array(1:ng) = real(pw1%array(1:ng), kind=dp)
4944!$OMP END PARALLEL WORKSHARE
4945 IF (ng2 > ng) THEN
4946!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, ng2, pw2)
4947 pw2%array(ng + 1:ng2) = 0.0_dp
4948!$OMP END PARALLEL WORKSHARE
4949 END IF
4950 ELSE
4951 cpabort("Copies between spherical grids require compatible grids!")
4952 END IF
4953 ELSE
4954 ng1 = SIZE(pw1%array)
4955 ng2 = SIZE(pw2%array)
4956 ns = 2*max(ng1, ng2)
4957
4958 IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
4959 IF (ng1 >= ng2) THEN
4960!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng2, pw1, pw2)
4961 DO i = 1, ng2
4962 j = pw2%pw_grid%gidx(i)
4963 pw2%array(i) = real(pw1%array(j), kind=dp)
4964 END DO
4965!$OMP END PARALLEL DO
4966 ELSE
4967 CALL pw_zero(pw2)
4968!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng1, pw1, pw2)
4969 DO i = 1, ng1
4970 j = pw2%pw_grid%gidx(i)
4971 pw2%array(j) = real(pw1%array(i), kind=dp)
4972 END DO
4973!$OMP END PARALLEL DO
4974 END IF
4975 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
4976 IF (ng1 >= ng2) THEN
4977!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng2, pw1, pw2)
4978 DO i = 1, ng2
4979 j = pw1%pw_grid%gidx(i)
4980 pw2%array(i) = real(pw1%array(j), kind=dp)
4981 END DO
4982!$OMP END PARALLEL DO
4983 ELSE
4984 CALL pw_zero(pw2)
4985!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng1, pw1, pw2)
4986 DO i = 1, ng1
4987 j = pw1%pw_grid%gidx(i)
4988 pw2%array(j) = real(pw1%array(i), kind=dp)
4989 END DO
4990!$OMP END PARALLEL DO
4991 END IF
4992 ELSE
4993 cpabort("Copy not implemented!")
4994 END IF
4995
4996 END IF
4997
4998 ELSE
4999!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
5000 pw2%array = real(pw1%array, kind=dp)
5001!$OMP END PARALLEL WORKSHARE
5002 END IF
5003
5004 CALL timestop(handle)
5005
5006 END SUBROUTINE pw_copy_c1d_r1d_rs
5007
5008! **************************************************************************************************
5009!> \brief ...
5010!> \param pw ...
5011!> \param array ...
5012! **************************************************************************************************
5013 SUBROUTINE pw_copy_to_array_c1d_r1d_rs (pw, array)
5014 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw
5015 REAL(KIND=dp), DIMENSION(:), INTENT(INOUT) :: array
5016
5017 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
5018
5019 INTEGER :: handle
5020
5021 CALL timeset(routinen, handle)
5022
5023!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
5024 array(:) = real(pw%array(:), kind=dp)
5025!$OMP END PARALLEL WORKSHARE
5026
5027 CALL timestop(handle)
5028 END SUBROUTINE pw_copy_to_array_c1d_r1d_rs
5029
5030! **************************************************************************************************
5031!> \brief ...
5032!> \param pw ...
5033!> \param array ...
5034! **************************************************************************************************
5035 SUBROUTINE pw_copy_from_array_c1d_r1d_rs (pw, array)
5036 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw
5037 REAL(KIND=dp), DIMENSION(:), INTENT(IN) :: array
5038
5039 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
5040
5041 INTEGER :: handle
5042
5043 CALL timeset(routinen, handle)
5044
5045!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
5046 pw%array = cmplx(array, 0.0_dp, kind=dp)
5047!$OMP END PARALLEL WORKSHARE
5048
5049 CALL timestop(handle)
5050 END SUBROUTINE pw_copy_from_array_c1d_r1d_rs
5051
5052! **************************************************************************************************
5053!> \brief pw2 = alpha*pw1 + beta*pw2
5054!> alpha defaults to 1, beta defaults to 1
5055!> \param pw1 ...
5056!> \param pw2 ...
5057!> \param alpha ...
5058!> \param beta ...
5059!> \param allow_noncompatible_grids ...
5060!> \par History
5061!> JGH (21-Feb-2003) : added reference grid functionality
5062!> JGH (01-Dec-2007) : rename and remove complex alpha
5063!> \author apsi
5064!> \note
5065!> Currently only summing up of respective types allowed,
5066!> in order to avoid errors
5067! **************************************************************************************************
5068 SUBROUTINE pw_axpy_c1d_r1d_rs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
5069
5070 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw1
5071 TYPE(pw_r1d_rs_type), INTENT(INOUT) :: pw2
5072 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
5073 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
5074
5075 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
5076
5077 INTEGER :: handle
5078 LOGICAL :: my_allow_noncompatible_grids
5079 REAL(KIND=dp) :: my_alpha, my_beta
5080 INTEGER :: i, j, ng, ng1, ng2
5081
5082 CALL timeset(routinen, handle)
5083
5084 my_alpha = 1.0_dp
5085 IF (PRESENT(alpha)) my_alpha = alpha
5086
5087 my_beta = 1.0_dp
5088 IF (PRESENT(beta)) my_beta = beta
5089
5090 my_allow_noncompatible_grids = .false.
5091 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
5092
5093 IF (my_beta /= 1.0_dp) THEN
5094 IF (my_beta == 0.0_dp) THEN
5095 CALL pw_zero(pw2)
5096 ELSE
5097!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
5098 pw2%array = pw2%array*my_beta
5099!$OMP END PARALLEL WORKSHARE
5100 END IF
5101 END IF
5102
5103 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
5104
5105 IF (my_alpha == 1.0_dp) THEN
5106!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
5107 pw2%array = pw2%array + real(pw1%array, kind=dp)
5108!$OMP END PARALLEL WORKSHARE
5109 ELSE IF (my_alpha /= 0.0_dp) THEN
5110!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha)
5111 pw2%array = pw2%array + my_alpha* real(pw1%array, kind=dp)
5112!$OMP END PARALLEL WORKSHARE
5113 END IF
5114
5115 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
5116
5117 ng1 = SIZE(pw1%array)
5118 ng2 = SIZE(pw2%array)
5119 ng = min(ng1, ng2)
5120
5121 IF (pw1%pw_grid%spherical) THEN
5122 IF (my_alpha == 1.0_dp) THEN
5123!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(ng, pw1, pw2)
5124 DO i = 1, ng
5125 pw2%array(i) = pw2%array(i) + real(pw1%array(i), kind=dp)
5126 END DO
5127!$OMP END PARALLEL DO
5128 ELSE IF (my_alpha /= 0.0_dp) THEN
5129!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha,ng)
5130 DO i = 1, ng
5131 pw2%array(i) = pw2%array(i) + my_alpha* real(pw1%array(i), kind=dp)
5132 END DO
5133!$OMP END PARALLEL DO
5134 END IF
5135 ELSE IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
5136 IF (ng1 >= ng2) THEN
5137 IF (my_alpha == 1.0_dp) THEN
5138!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
5139 DO i = 1, ng
5140 j = pw2%pw_grid%gidx(i)
5141 pw2%array(i) = pw2%array(i) + real(pw1%array(j), kind=dp)
5142 END DO
5143!$OMP END PARALLEL DO
5144 ELSE IF (my_alpha /= 0.0_dp) THEN
5145!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
5146 DO i = 1, ng
5147 j = pw2%pw_grid%gidx(i)
5148 pw2%array(i) = pw2%array(i) + my_alpha* real(pw1%array(j), kind=dp)
5149 END DO
5150!$OMP END PARALLEL DO
5151 END IF
5152 ELSE
5153 IF (my_alpha == 1.0_dp) THEN
5154!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
5155 DO i = 1, ng
5156 j = pw2%pw_grid%gidx(i)
5157 pw2%array(j) = pw2%array(j) + real(pw1%array(i), kind=dp)
5158 END DO
5159!$OMP END PARALLEL DO
5160 ELSE IF (my_alpha /= 0.0_dp) THEN
5161!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
5162 DO i = 1, ng
5163 j = pw2%pw_grid%gidx(i)
5164 pw2%array(j) = pw2%array(j) + my_alpha* real(pw1%array(i), kind=dp)
5165 END DO
5166!$OMP END PARALLEL DO
5167 END IF
5168 END IF
5169 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
5170 IF (ng1 >= ng2) THEN
5171 IF (my_alpha == 1.0_dp) THEN
5172!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
5173 DO i = 1, ng
5174 j = pw1%pw_grid%gidx(i)
5175 pw2%array(i) = pw2%array(i) + real(pw1%array(j), kind=dp)
5176 END DO
5177!$OMP END PARALLEL DO
5178 ELSE IF (my_alpha /= 0.0_dp) THEN
5179!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
5180 DO i = 1, ng
5181 j = pw1%pw_grid%gidx(i)
5182 pw2%array(i) = pw2%array(i) + my_alpha* real(pw1%array(j), kind=dp)
5183 END DO
5184!$OMP END PARALLEL DO
5185 END IF
5186 ELSE
5187 IF (my_alpha == 1.0_dp) THEN
5188!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
5189 DO i = 1, ng
5190 j = pw1%pw_grid%gidx(i)
5191 pw2%array(j) = pw2%array(j) + real(pw1%array(i), kind=dp)
5192 END DO
5193!$OMP END PARALLEL DO
5194 ELSE IF (my_alpha /= 0.0_dp) THEN
5195!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
5196 DO i = 1, ng
5197 j = pw1%pw_grid%gidx(i)
5198 pw2%array(j) = pw2%array(j) + my_alpha* real(pw1%array(i), kind=dp)
5199 END DO
5200!$OMP END PARALLEL DO
5201 END IF
5202 END IF
5203 ELSE
5204 cpabort("Grids not compatible")
5205 END IF
5206
5207 ELSE
5208
5209 cpabort("Grids not compatible")
5210
5211 END IF
5212
5213 CALL timestop(handle)
5214
5215 END SUBROUTINE pw_axpy_c1d_r1d_rs
5216
5217! **************************************************************************************************
5218!> \brief pw_out = pw_out + alpha * pw1 * pw2
5219!> alpha defaults to 1
5220!> \param pw_out ...
5221!> \param pw1 ...
5222!> \param pw2 ...
5223!> \param alpha ...
5224!> \author JGH
5225! **************************************************************************************************
5226 SUBROUTINE pw_multiply_c1d_r1d_rs (pw_out, pw1, pw2, alpha)
5227
5228 TYPE(pw_c1d_rs_type), INTENT(INOUT) :: pw_out
5229 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw1
5230 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw2
5231 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
5232
5233 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
5234
5235 INTEGER :: handle
5236 REAL(KIND=dp) :: my_alpha
5237
5238 CALL timeset(routinen, handle)
5239
5240 my_alpha = 1.0_dp
5241 IF (PRESENT(alpha)) my_alpha = alpha
5242
5243 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
5244 cpabort("pw_multiply not implemented for non-identical grids!")
5245
5246 IF (my_alpha == 1.0_dp) THEN
5247!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
5248 pw_out%array = pw_out%array + pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
5249!$OMP END PARALLEL WORKSHARE
5250 ELSE
5251!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
5252 pw_out%array = pw_out%array + my_alpha*pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
5253!$OMP END PARALLEL WORKSHARE
5254 END IF
5255
5256 CALL timestop(handle)
5257
5258 END SUBROUTINE pw_multiply_c1d_r1d_rs
5259
5260! **************************************************************************************************
5261!> \brief ...
5262!> \param pw1 ...
5263!> \param pw2 ...
5264! **************************************************************************************************
5265 SUBROUTINE pw_multiply_with_c1d_r1d_rs (pw1, pw2)
5266 TYPE(pw_c1d_rs_type), INTENT(INOUT) :: pw1
5267 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw2
5268
5269 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
5270
5271 INTEGER :: handle
5272
5273 CALL timeset(routinen, handle)
5274
5275 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
5276 cpabort("Incompatible grids!")
5277
5278!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
5279 pw1%array = pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
5280!$OMP END PARALLEL WORKSHARE
5281
5282 CALL timestop(handle)
5283
5284 END SUBROUTINE pw_multiply_with_c1d_r1d_rs
5285
5286! **************************************************************************************************
5287!> \brief Calculate integral over unit cell for functions in plane wave basis
5288!> only returns the real part of it ......
5289!> \param pw1 ...
5290!> \param pw2 ...
5291!> \param sumtype ...
5292!> \param just_sum ...
5293!> \param local_only ...
5294!> \return ...
5295!> \par History
5296!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
5297!> \author apsi
5298! **************************************************************************************************
5299 FUNCTION pw_integral_ab_c1d_r1d_rs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
5300
5301 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw1
5302 TYPE(pw_r1d_rs_type), INTENT(IN) :: pw2
5303 INTEGER, INTENT(IN), OPTIONAL :: sumtype
5304 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
5305 REAL(kind=dp) :: integral_value
5306
5307 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab'
5308
5309 INTEGER :: handle, loc_sumtype
5310 LOGICAL :: my_just_sum, my_local_only
5311
5312 CALL timeset(routinen, handle)
5313
5314 loc_sumtype = do_accurate_sum
5315 IF (PRESENT(sumtype)) loc_sumtype = sumtype
5316
5317 my_local_only = .false.
5318 IF (PRESENT(local_only)) my_local_only = local_only
5319
5320 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
5321 cpabort("Grids incompatible")
5322 END IF
5323
5324 my_just_sum = .false.
5325 IF (PRESENT(just_sum)) my_just_sum = just_sum
5326
5327 ! do standard sum
5328 IF (loc_sumtype == do_standard_sum) THEN
5329
5330 ! Do standard sum
5331
5332 integral_value = sum(real(pw1%array, kind=dp)*pw2%array) !? complex bit
5333
5334 ELSE
5335
5336 ! Do accurate sum
5337 integral_value = accurate_sum(real(pw1%array, kind=dp)*pw2%array) !? complex bit
5338
5339 END IF
5340
5341 IF (.NOT. my_just_sum) THEN
5342 integral_value = integral_value*pw1%pw_grid%dvol
5343 END IF
5344
5345 IF (pw1%pw_grid%grid_span == halfspace) THEN
5346 integral_value = 2.0_dp*integral_value
5347 IF (pw1%pw_grid%have_g0) integral_value = integral_value - &
5348 REAL(conjg(pw1%array(1))*pw2%array(1), kind=dp)
5349 END IF
5350
5351 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
5352 CALL pw1%pw_grid%para%group%sum(integral_value)
5353
5354 CALL timestop(handle)
5355
5356 END FUNCTION pw_integral_ab_c1d_r1d_rs
5357! **************************************************************************************************
5358!> \brief copy a pw type variable
5359!> \param pw1 ...
5360!> \param pw2 ...
5361!> \par History
5362!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
5363!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
5364!> JGH (21-Feb-2003) : Code for generalized reference grids
5365!> \author apsi
5366!> \note
5367!> Currently only copying of respective types allowed,
5368!> in order to avoid errors
5369! **************************************************************************************************
5370 SUBROUTINE pw_copy_c1d_r1d_gs (pw1, pw2)
5371
5372 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
5373 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw2
5374
5375 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
5376
5377 INTEGER :: handle
5378 INTEGER :: i, j, ng, ng1, ng2, ns
5379
5380 CALL timeset(routinen, handle)
5381
5382 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
5383 cpabort("Both grids must be either spherical or non-spherical!")
5384
5385 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
5386 IF (pw1%pw_grid%spherical .AND. pw2%pw_grid%spherical) THEN
5387 IF (pw_compatible(pw1%pw_grid, pw2%pw_grid)) THEN
5388 ng1 = SIZE(pw1%array)
5389 ng2 = SIZE(pw2%array)
5390 ng = min(ng1, ng2)
5391!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, pw1, pw2)
5392 pw2%array(1:ng) = real(pw1%array(1:ng), kind=dp)
5393!$OMP END PARALLEL WORKSHARE
5394 IF (ng2 > ng) THEN
5395!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, ng2, pw2)
5396 pw2%array(ng + 1:ng2) = 0.0_dp
5397!$OMP END PARALLEL WORKSHARE
5398 END IF
5399 ELSE
5400 cpabort("Copies between spherical grids require compatible grids!")
5401 END IF
5402 ELSE
5403 ng1 = SIZE(pw1%array)
5404 ng2 = SIZE(pw2%array)
5405 ns = 2*max(ng1, ng2)
5406
5407 IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
5408 IF (ng1 >= ng2) THEN
5409!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng2, pw1, pw2)
5410 DO i = 1, ng2
5411 j = pw2%pw_grid%gidx(i)
5412 pw2%array(i) = real(pw1%array(j), kind=dp)
5413 END DO
5414!$OMP END PARALLEL DO
5415 ELSE
5416 CALL pw_zero(pw2)
5417!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng1, pw1, pw2)
5418 DO i = 1, ng1
5419 j = pw2%pw_grid%gidx(i)
5420 pw2%array(j) = real(pw1%array(i), kind=dp)
5421 END DO
5422!$OMP END PARALLEL DO
5423 END IF
5424 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
5425 IF (ng1 >= ng2) THEN
5426!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng2, pw1, pw2)
5427 DO i = 1, ng2
5428 j = pw1%pw_grid%gidx(i)
5429 pw2%array(i) = real(pw1%array(j), kind=dp)
5430 END DO
5431!$OMP END PARALLEL DO
5432 ELSE
5433 CALL pw_zero(pw2)
5434!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng1, pw1, pw2)
5435 DO i = 1, ng1
5436 j = pw1%pw_grid%gidx(i)
5437 pw2%array(j) = real(pw1%array(i), kind=dp)
5438 END DO
5439!$OMP END PARALLEL DO
5440 END IF
5441 ELSE
5442 cpabort("Copy not implemented!")
5443 END IF
5444
5445 END IF
5446
5447 ELSE
5448!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
5449 pw2%array = real(pw1%array, kind=dp)
5450!$OMP END PARALLEL WORKSHARE
5451 END IF
5452
5453 CALL timestop(handle)
5454
5455 END SUBROUTINE pw_copy_c1d_r1d_gs
5456
5457! **************************************************************************************************
5458!> \brief ...
5459!> \param pw ...
5460!> \param array ...
5461! **************************************************************************************************
5462 SUBROUTINE pw_copy_to_array_c1d_r1d_gs (pw, array)
5463 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
5464 REAL(KIND=dp), DIMENSION(:), INTENT(INOUT) :: array
5465
5466 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
5467
5468 INTEGER :: handle
5469
5470 CALL timeset(routinen, handle)
5471
5472!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
5473 array(:) = real(pw%array(:), kind=dp)
5474!$OMP END PARALLEL WORKSHARE
5475
5476 CALL timestop(handle)
5477 END SUBROUTINE pw_copy_to_array_c1d_r1d_gs
5478
5479! **************************************************************************************************
5480!> \brief ...
5481!> \param pw ...
5482!> \param array ...
5483! **************************************************************************************************
5484 SUBROUTINE pw_copy_from_array_c1d_r1d_gs (pw, array)
5485 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
5486 REAL(KIND=dp), DIMENSION(:), INTENT(IN) :: array
5487
5488 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
5489
5490 INTEGER :: handle
5491
5492 CALL timeset(routinen, handle)
5493
5494!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
5495 pw%array = cmplx(array, 0.0_dp, kind=dp)
5496!$OMP END PARALLEL WORKSHARE
5497
5498 CALL timestop(handle)
5499 END SUBROUTINE pw_copy_from_array_c1d_r1d_gs
5500
5501! **************************************************************************************************
5502!> \brief pw2 = alpha*pw1 + beta*pw2
5503!> alpha defaults to 1, beta defaults to 1
5504!> \param pw1 ...
5505!> \param pw2 ...
5506!> \param alpha ...
5507!> \param beta ...
5508!> \param allow_noncompatible_grids ...
5509!> \par History
5510!> JGH (21-Feb-2003) : added reference grid functionality
5511!> JGH (01-Dec-2007) : rename and remove complex alpha
5512!> \author apsi
5513!> \note
5514!> Currently only summing up of respective types allowed,
5515!> in order to avoid errors
5516! **************************************************************************************************
5517 SUBROUTINE pw_axpy_c1d_r1d_gs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
5518
5519 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
5520 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw2
5521 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
5522 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
5523
5524 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
5525
5526 INTEGER :: handle
5527 LOGICAL :: my_allow_noncompatible_grids
5528 REAL(KIND=dp) :: my_alpha, my_beta
5529 INTEGER :: i, j, ng, ng1, ng2
5530
5531 CALL timeset(routinen, handle)
5532
5533 my_alpha = 1.0_dp
5534 IF (PRESENT(alpha)) my_alpha = alpha
5535
5536 my_beta = 1.0_dp
5537 IF (PRESENT(beta)) my_beta = beta
5538
5539 my_allow_noncompatible_grids = .false.
5540 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
5541
5542 IF (my_beta /= 1.0_dp) THEN
5543 IF (my_beta == 0.0_dp) THEN
5544 CALL pw_zero(pw2)
5545 ELSE
5546!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
5547 pw2%array = pw2%array*my_beta
5548!$OMP END PARALLEL WORKSHARE
5549 END IF
5550 END IF
5551
5552 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
5553
5554 IF (my_alpha == 1.0_dp) THEN
5555!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
5556 pw2%array = pw2%array + real(pw1%array, kind=dp)
5557!$OMP END PARALLEL WORKSHARE
5558 ELSE IF (my_alpha /= 0.0_dp) THEN
5559!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha)
5560 pw2%array = pw2%array + my_alpha* real(pw1%array, kind=dp)
5561!$OMP END PARALLEL WORKSHARE
5562 END IF
5563
5564 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
5565
5566 ng1 = SIZE(pw1%array)
5567 ng2 = SIZE(pw2%array)
5568 ng = min(ng1, ng2)
5569
5570 IF (pw1%pw_grid%spherical) THEN
5571 IF (my_alpha == 1.0_dp) THEN
5572!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(ng, pw1, pw2)
5573 DO i = 1, ng
5574 pw2%array(i) = pw2%array(i) + real(pw1%array(i), kind=dp)
5575 END DO
5576!$OMP END PARALLEL DO
5577 ELSE IF (my_alpha /= 0.0_dp) THEN
5578!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha,ng)
5579 DO i = 1, ng
5580 pw2%array(i) = pw2%array(i) + my_alpha* real(pw1%array(i), kind=dp)
5581 END DO
5582!$OMP END PARALLEL DO
5583 END IF
5584 ELSE IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
5585 IF (ng1 >= ng2) THEN
5586 IF (my_alpha == 1.0_dp) THEN
5587!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
5588 DO i = 1, ng
5589 j = pw2%pw_grid%gidx(i)
5590 pw2%array(i) = pw2%array(i) + real(pw1%array(j), kind=dp)
5591 END DO
5592!$OMP END PARALLEL DO
5593 ELSE IF (my_alpha /= 0.0_dp) THEN
5594!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
5595 DO i = 1, ng
5596 j = pw2%pw_grid%gidx(i)
5597 pw2%array(i) = pw2%array(i) + my_alpha* real(pw1%array(j), kind=dp)
5598 END DO
5599!$OMP END PARALLEL DO
5600 END IF
5601 ELSE
5602 IF (my_alpha == 1.0_dp) THEN
5603!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
5604 DO i = 1, ng
5605 j = pw2%pw_grid%gidx(i)
5606 pw2%array(j) = pw2%array(j) + real(pw1%array(i), kind=dp)
5607 END DO
5608!$OMP END PARALLEL DO
5609 ELSE IF (my_alpha /= 0.0_dp) THEN
5610!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
5611 DO i = 1, ng
5612 j = pw2%pw_grid%gidx(i)
5613 pw2%array(j) = pw2%array(j) + my_alpha* real(pw1%array(i), kind=dp)
5614 END DO
5615!$OMP END PARALLEL DO
5616 END IF
5617 END IF
5618 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
5619 IF (ng1 >= ng2) THEN
5620 IF (my_alpha == 1.0_dp) THEN
5621!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
5622 DO i = 1, ng
5623 j = pw1%pw_grid%gidx(i)
5624 pw2%array(i) = pw2%array(i) + real(pw1%array(j), kind=dp)
5625 END DO
5626!$OMP END PARALLEL DO
5627 ELSE IF (my_alpha /= 0.0_dp) THEN
5628!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
5629 DO i = 1, ng
5630 j = pw1%pw_grid%gidx(i)
5631 pw2%array(i) = pw2%array(i) + my_alpha* real(pw1%array(j), kind=dp)
5632 END DO
5633!$OMP END PARALLEL DO
5634 END IF
5635 ELSE
5636 IF (my_alpha == 1.0_dp) THEN
5637!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
5638 DO i = 1, ng
5639 j = pw1%pw_grid%gidx(i)
5640 pw2%array(j) = pw2%array(j) + real(pw1%array(i), kind=dp)
5641 END DO
5642!$OMP END PARALLEL DO
5643 ELSE IF (my_alpha /= 0.0_dp) THEN
5644!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
5645 DO i = 1, ng
5646 j = pw1%pw_grid%gidx(i)
5647 pw2%array(j) = pw2%array(j) + my_alpha* real(pw1%array(i), kind=dp)
5648 END DO
5649!$OMP END PARALLEL DO
5650 END IF
5651 END IF
5652 ELSE
5653 cpabort("Grids not compatible")
5654 END IF
5655
5656 ELSE
5657
5658 cpabort("Grids not compatible")
5659
5660 END IF
5661
5662 CALL timestop(handle)
5663
5664 END SUBROUTINE pw_axpy_c1d_r1d_gs
5665
5666! **************************************************************************************************
5667!> \brief pw_out = pw_out + alpha * pw1 * pw2
5668!> alpha defaults to 1
5669!> \param pw_out ...
5670!> \param pw1 ...
5671!> \param pw2 ...
5672!> \param alpha ...
5673!> \author JGH
5674! **************************************************************************************************
5675 SUBROUTINE pw_multiply_c1d_r1d_gs (pw_out, pw1, pw2, alpha)
5676
5677 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw_out
5678 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
5679 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw2
5680 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
5681
5682 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
5683
5684 INTEGER :: handle
5685 REAL(KIND=dp) :: my_alpha
5686
5687 CALL timeset(routinen, handle)
5688
5689 my_alpha = 1.0_dp
5690 IF (PRESENT(alpha)) my_alpha = alpha
5691
5692 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
5693 cpabort("pw_multiply not implemented for non-identical grids!")
5694
5695 IF (my_alpha == 1.0_dp) THEN
5696!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
5697 pw_out%array = pw_out%array + pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
5698!$OMP END PARALLEL WORKSHARE
5699 ELSE
5700!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
5701 pw_out%array = pw_out%array + my_alpha*pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
5702!$OMP END PARALLEL WORKSHARE
5703 END IF
5704
5705 CALL timestop(handle)
5706
5707 END SUBROUTINE pw_multiply_c1d_r1d_gs
5708
5709! **************************************************************************************************
5710!> \brief ...
5711!> \param pw1 ...
5712!> \param pw2 ...
5713! **************************************************************************************************
5714 SUBROUTINE pw_multiply_with_c1d_r1d_gs (pw1, pw2)
5715 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw1
5716 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw2
5717
5718 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
5719
5720 INTEGER :: handle
5721
5722 CALL timeset(routinen, handle)
5723
5724 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
5725 cpabort("Incompatible grids!")
5726
5727!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
5728 pw1%array = pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
5729!$OMP END PARALLEL WORKSHARE
5730
5731 CALL timestop(handle)
5732
5733 END SUBROUTINE pw_multiply_with_c1d_r1d_gs
5734
5735! **************************************************************************************************
5736!> \brief Calculate integral over unit cell for functions in plane wave basis
5737!> only returns the real part of it ......
5738!> \param pw1 ...
5739!> \param pw2 ...
5740!> \param sumtype ...
5741!> \param just_sum ...
5742!> \param local_only ...
5743!> \return ...
5744!> \par History
5745!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
5746!> \author apsi
5747! **************************************************************************************************
5748 FUNCTION pw_integral_ab_c1d_r1d_gs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
5749
5750 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
5751 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw2
5752 INTEGER, INTENT(IN), OPTIONAL :: sumtype
5753 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
5754 REAL(kind=dp) :: integral_value
5755
5756 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab'
5757
5758 INTEGER :: handle, loc_sumtype
5759 LOGICAL :: my_just_sum, my_local_only
5760
5761 CALL timeset(routinen, handle)
5762
5763 loc_sumtype = do_accurate_sum
5764 IF (PRESENT(sumtype)) loc_sumtype = sumtype
5765
5766 my_local_only = .false.
5767 IF (PRESENT(local_only)) my_local_only = local_only
5768
5769 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
5770 cpabort("Grids incompatible")
5771 END IF
5772
5773 my_just_sum = .false.
5774 IF (PRESENT(just_sum)) my_just_sum = just_sum
5775
5776 ! do standard sum
5777 IF (loc_sumtype == do_standard_sum) THEN
5778
5779 ! Do standard sum
5780
5781 integral_value = sum(real(pw1%array, kind=dp)*pw2%array) !? complex bit
5782
5783 ELSE
5784
5785 ! Do accurate sum
5786 integral_value = accurate_sum(real(pw1%array, kind=dp)*pw2%array) !? complex bit
5787
5788 END IF
5789
5790 IF (.NOT. my_just_sum) THEN
5791 integral_value = integral_value*pw1%pw_grid%vol
5792 END IF
5793
5794 IF (pw1%pw_grid%grid_span == halfspace) THEN
5795 integral_value = 2.0_dp*integral_value
5796 IF (pw1%pw_grid%have_g0) integral_value = integral_value - &
5797 REAL(conjg(pw1%array(1))*pw2%array(1), kind=dp)
5798 END IF
5799
5800 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
5801 CALL pw1%pw_grid%para%group%sum(integral_value)
5802
5803 CALL timestop(handle)
5804
5805 END FUNCTION pw_integral_ab_c1d_r1d_gs
5806
5807! **************************************************************************************************
5808!> \brief ...
5809!> \param pw1 ...
5810!> \param pw2 ...
5811!> \return ...
5812! **************************************************************************************************
5813 FUNCTION pw_integral_a2b_c1d_r1d (pw1, pw2) RESULT(integral_value)
5814
5815 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
5816 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw2
5817 REAL(kind=dp) :: integral_value
5818
5819 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_a2b'
5820
5821 INTEGER :: handle
5822
5823 CALL timeset(routinen, handle)
5824
5825 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
5826 cpabort("Grids incompatible")
5827 END IF
5828
5829 integral_value = accurate_sum(real(conjg(pw1%array), kind=dp)*pw2%array*pw1%pw_grid%gsq)
5830 IF (pw1%pw_grid%grid_span == halfspace) THEN
5831 integral_value = 2.0_dp*integral_value
5832 END IF
5833
5834 integral_value = integral_value*pw1%pw_grid%vol
5835
5836 IF (pw1%pw_grid%para%mode == pw_mode_distributed) &
5837 CALL pw1%pw_grid%para%group%sum(integral_value)
5838 CALL timestop(handle)
5839
5840 END FUNCTION pw_integral_a2b_c1d_r1d
5841! **************************************************************************************************
5842!> \brief copy a pw type variable
5843!> \param pw1 ...
5844!> \param pw2 ...
5845!> \par History
5846!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
5847!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
5848!> JGH (21-Feb-2003) : Code for generalized reference grids
5849!> \author apsi
5850!> \note
5851!> Currently only copying of respective types allowed,
5852!> in order to avoid errors
5853! **************************************************************************************************
5854 SUBROUTINE pw_copy_c1d_c1d_rs (pw1, pw2)
5855
5856 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw1
5857 TYPE(pw_c1d_rs_type), INTENT(INOUT) :: pw2
5858
5859 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
5860
5861 INTEGER :: handle
5862 INTEGER :: i, j, ng, ng1, ng2, ns
5863
5864 CALL timeset(routinen, handle)
5865
5866 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
5867 cpabort("Both grids must be either spherical or non-spherical!")
5868 IF (pw1%pw_grid%spherical) &
5869 cpabort("Spherical grids only exist in reciprocal space!")
5870
5871 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
5872 IF (pw1%pw_grid%spherical .AND. pw2%pw_grid%spherical) THEN
5873 IF (pw_compatible(pw1%pw_grid, pw2%pw_grid)) THEN
5874 ng1 = SIZE(pw1%array)
5875 ng2 = SIZE(pw2%array)
5876 ng = min(ng1, ng2)
5877!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, pw1, pw2)
5878 pw2%array(1:ng) = pw1%array(1:ng)
5879!$OMP END PARALLEL WORKSHARE
5880 IF (ng2 > ng) THEN
5881!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, ng2, pw2)
5882 pw2%array(ng + 1:ng2) = cmplx(0.0_dp, 0.0_dp, kind=dp)
5883!$OMP END PARALLEL WORKSHARE
5884 END IF
5885 ELSE
5886 cpabort("Copies between spherical grids require compatible grids!")
5887 END IF
5888 ELSE
5889 ng1 = SIZE(pw1%array)
5890 ng2 = SIZE(pw2%array)
5891 ns = 2*max(ng1, ng2)
5892
5893 IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
5894 IF (ng1 >= ng2) THEN
5895!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng2, pw1, pw2)
5896 DO i = 1, ng2
5897 j = pw2%pw_grid%gidx(i)
5898 pw2%array(i) = pw1%array(j)
5899 END DO
5900!$OMP END PARALLEL DO
5901 ELSE
5902 CALL pw_zero(pw2)
5903!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng1, pw1, pw2)
5904 DO i = 1, ng1
5905 j = pw2%pw_grid%gidx(i)
5906 pw2%array(j) = pw1%array(i)
5907 END DO
5908!$OMP END PARALLEL DO
5909 END IF
5910 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
5911 IF (ng1 >= ng2) THEN
5912!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng2, pw1, pw2)
5913 DO i = 1, ng2
5914 j = pw1%pw_grid%gidx(i)
5915 pw2%array(i) = pw1%array(j)
5916 END DO
5917!$OMP END PARALLEL DO
5918 ELSE
5919 CALL pw_zero(pw2)
5920!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng1, pw1, pw2)
5921 DO i = 1, ng1
5922 j = pw1%pw_grid%gidx(i)
5923 pw2%array(j) = pw1%array(i)
5924 END DO
5925!$OMP END PARALLEL DO
5926 END IF
5927 ELSE
5928 cpabort("Copy not implemented!")
5929 END IF
5930
5931 END IF
5932
5933 ELSE
5934!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
5935 pw2%array = pw1%array
5936!$OMP END PARALLEL WORKSHARE
5937 END IF
5938
5939 CALL timestop(handle)
5940
5941 END SUBROUTINE pw_copy_c1d_c1d_rs
5942
5943! **************************************************************************************************
5944!> \brief ...
5945!> \param pw ...
5946!> \param array ...
5947! **************************************************************************************************
5948 SUBROUTINE pw_copy_to_array_c1d_c1d_rs (pw, array)
5949 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw
5950 COMPLEX(KIND=dp), DIMENSION(:), INTENT(INOUT) :: array
5951
5952 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
5953
5954 INTEGER :: handle
5955
5956 CALL timeset(routinen, handle)
5957
5958!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
5959 array(:) = pw%array(:)
5960!$OMP END PARALLEL WORKSHARE
5961
5962 CALL timestop(handle)
5963 END SUBROUTINE pw_copy_to_array_c1d_c1d_rs
5964
5965! **************************************************************************************************
5966!> \brief ...
5967!> \param pw ...
5968!> \param array ...
5969! **************************************************************************************************
5970 SUBROUTINE pw_copy_from_array_c1d_c1d_rs (pw, array)
5971 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw
5972 COMPLEX(KIND=dp), DIMENSION(:), INTENT(IN) :: array
5973
5974 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
5975
5976 INTEGER :: handle
5977
5978 CALL timeset(routinen, handle)
5979
5980!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
5981 pw%array = array
5982!$OMP END PARALLEL WORKSHARE
5983
5984 CALL timestop(handle)
5985 END SUBROUTINE pw_copy_from_array_c1d_c1d_rs
5986
5987! **************************************************************************************************
5988!> \brief pw2 = alpha*pw1 + beta*pw2
5989!> alpha defaults to 1, beta defaults to 1
5990!> \param pw1 ...
5991!> \param pw2 ...
5992!> \param alpha ...
5993!> \param beta ...
5994!> \param allow_noncompatible_grids ...
5995!> \par History
5996!> JGH (21-Feb-2003) : added reference grid functionality
5997!> JGH (01-Dec-2007) : rename and remove complex alpha
5998!> \author apsi
5999!> \note
6000!> Currently only summing up of respective types allowed,
6001!> in order to avoid errors
6002! **************************************************************************************************
6003 SUBROUTINE pw_axpy_c1d_c1d_rs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
6004
6005 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw1
6006 TYPE(pw_c1d_rs_type), INTENT(INOUT) :: pw2
6007 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
6008 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
6009
6010 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
6011
6012 INTEGER :: handle
6013 LOGICAL :: my_allow_noncompatible_grids
6014 REAL(KIND=dp) :: my_alpha, my_beta
6015 INTEGER :: i, j, ng, ng1, ng2
6016
6017 CALL timeset(routinen, handle)
6018
6019 my_alpha = 1.0_dp
6020 IF (PRESENT(alpha)) my_alpha = alpha
6021
6022 my_beta = 1.0_dp
6023 IF (PRESENT(beta)) my_beta = beta
6024
6025 my_allow_noncompatible_grids = .false.
6026 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
6027
6028 IF (my_beta /= 1.0_dp) THEN
6029 IF (my_beta == 0.0_dp) THEN
6030 CALL pw_zero(pw2)
6031 ELSE
6032!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
6033 pw2%array = pw2%array*my_beta
6034!$OMP END PARALLEL WORKSHARE
6035 END IF
6036 END IF
6037
6038 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
6039
6040 IF (my_alpha == 1.0_dp) THEN
6041!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
6042 pw2%array = pw2%array + pw1%array
6043!$OMP END PARALLEL WORKSHARE
6044 ELSE IF (my_alpha /= 0.0_dp) THEN
6045!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha)
6046 pw2%array = pw2%array + my_alpha* pw1%array
6047!$OMP END PARALLEL WORKSHARE
6048 END IF
6049
6050 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
6051
6052 ng1 = SIZE(pw1%array)
6053 ng2 = SIZE(pw2%array)
6054 ng = min(ng1, ng2)
6055
6056 IF (pw1%pw_grid%spherical) THEN
6057 IF (my_alpha == 1.0_dp) THEN
6058!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(ng, pw1, pw2)
6059 DO i = 1, ng
6060 pw2%array(i) = pw2%array(i) + pw1%array(i)
6061 END DO
6062!$OMP END PARALLEL DO
6063 ELSE IF (my_alpha /= 0.0_dp) THEN
6064!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha,ng)
6065 DO i = 1, ng
6066 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(i)
6067 END DO
6068!$OMP END PARALLEL DO
6069 END IF
6070 ELSE IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
6071 IF (ng1 >= ng2) THEN
6072 IF (my_alpha == 1.0_dp) THEN
6073!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
6074 DO i = 1, ng
6075 j = pw2%pw_grid%gidx(i)
6076 pw2%array(i) = pw2%array(i) + pw1%array(j)
6077 END DO
6078!$OMP END PARALLEL DO
6079 ELSE IF (my_alpha /= 0.0_dp) THEN
6080!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
6081 DO i = 1, ng
6082 j = pw2%pw_grid%gidx(i)
6083 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(j)
6084 END DO
6085!$OMP END PARALLEL DO
6086 END IF
6087 ELSE
6088 IF (my_alpha == 1.0_dp) THEN
6089!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
6090 DO i = 1, ng
6091 j = pw2%pw_grid%gidx(i)
6092 pw2%array(j) = pw2%array(j) + pw1%array(i)
6093 END DO
6094!$OMP END PARALLEL DO
6095 ELSE IF (my_alpha /= 0.0_dp) THEN
6096!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
6097 DO i = 1, ng
6098 j = pw2%pw_grid%gidx(i)
6099 pw2%array(j) = pw2%array(j) + my_alpha* pw1%array(i)
6100 END DO
6101!$OMP END PARALLEL DO
6102 END IF
6103 END IF
6104 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
6105 IF (ng1 >= ng2) THEN
6106 IF (my_alpha == 1.0_dp) THEN
6107!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
6108 DO i = 1, ng
6109 j = pw1%pw_grid%gidx(i)
6110 pw2%array(i) = pw2%array(i) + pw1%array(j)
6111 END DO
6112!$OMP END PARALLEL DO
6113 ELSE IF (my_alpha /= 0.0_dp) THEN
6114!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
6115 DO i = 1, ng
6116 j = pw1%pw_grid%gidx(i)
6117 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(j)
6118 END DO
6119!$OMP END PARALLEL DO
6120 END IF
6121 ELSE
6122 IF (my_alpha == 1.0_dp) THEN
6123!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
6124 DO i = 1, ng
6125 j = pw1%pw_grid%gidx(i)
6126 pw2%array(j) = pw2%array(j) + pw1%array(i)
6127 END DO
6128!$OMP END PARALLEL DO
6129 ELSE IF (my_alpha /= 0.0_dp) THEN
6130!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
6131 DO i = 1, ng
6132 j = pw1%pw_grid%gidx(i)
6133 pw2%array(j) = pw2%array(j) + my_alpha* pw1%array(i)
6134 END DO
6135!$OMP END PARALLEL DO
6136 END IF
6137 END IF
6138 ELSE
6139 cpabort("Grids not compatible")
6140 END IF
6141
6142 ELSE
6143
6144 cpabort("Grids not compatible")
6145
6146 END IF
6147
6148 CALL timestop(handle)
6149
6150 END SUBROUTINE pw_axpy_c1d_c1d_rs
6151
6152! **************************************************************************************************
6153!> \brief pw_out = pw_out + alpha * pw1 * pw2
6154!> alpha defaults to 1
6155!> \param pw_out ...
6156!> \param pw1 ...
6157!> \param pw2 ...
6158!> \param alpha ...
6159!> \author JGH
6160! **************************************************************************************************
6161 SUBROUTINE pw_multiply_c1d_c1d_rs (pw_out, pw1, pw2, alpha)
6162
6163 TYPE(pw_c1d_rs_type), INTENT(INOUT) :: pw_out
6164 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw1
6165 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw2
6166 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
6167
6168 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
6169
6170 INTEGER :: handle
6171 REAL(KIND=dp) :: my_alpha
6172
6173 CALL timeset(routinen, handle)
6174
6175 my_alpha = 1.0_dp
6176 IF (PRESENT(alpha)) my_alpha = alpha
6177
6178 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
6179 cpabort("pw_multiply not implemented for non-identical grids!")
6180
6181 IF (my_alpha == 1.0_dp) THEN
6182!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
6183 pw_out%array = pw_out%array + pw1%array* pw2%array
6184!$OMP END PARALLEL WORKSHARE
6185 ELSE
6186!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
6187 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
6188!$OMP END PARALLEL WORKSHARE
6189 END IF
6190
6191 CALL timestop(handle)
6192
6193 END SUBROUTINE pw_multiply_c1d_c1d_rs
6194
6195! **************************************************************************************************
6196!> \brief ...
6197!> \param pw1 ...
6198!> \param pw2 ...
6199! **************************************************************************************************
6200 SUBROUTINE pw_multiply_with_c1d_c1d_rs (pw1, pw2)
6201 TYPE(pw_c1d_rs_type), INTENT(INOUT) :: pw1
6202 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw2
6203
6204 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
6205
6206 INTEGER :: handle
6207
6208 CALL timeset(routinen, handle)
6209
6210 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
6211 cpabort("Incompatible grids!")
6212
6213!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
6214 pw1%array = pw1%array* pw2%array
6215!$OMP END PARALLEL WORKSHARE
6216
6217 CALL timestop(handle)
6218
6219 END SUBROUTINE pw_multiply_with_c1d_c1d_rs
6220
6221! **************************************************************************************************
6222!> \brief Calculate integral over unit cell for functions in plane wave basis
6223!> only returns the real part of it ......
6224!> \param pw1 ...
6225!> \param pw2 ...
6226!> \param sumtype ...
6227!> \param just_sum ...
6228!> \param local_only ...
6229!> \return ...
6230!> \par History
6231!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
6232!> \author apsi
6233! **************************************************************************************************
6234 FUNCTION pw_integral_ab_c1d_c1d_rs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
6235
6236 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw1
6237 TYPE(pw_c1d_rs_type), INTENT(IN) :: pw2
6238 INTEGER, INTENT(IN), OPTIONAL :: sumtype
6239 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
6240 REAL(kind=dp) :: integral_value
6241
6242 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab'
6243
6244 INTEGER :: handle, loc_sumtype
6245 LOGICAL :: my_just_sum, my_local_only
6246
6247 CALL timeset(routinen, handle)
6248
6249 loc_sumtype = do_accurate_sum
6250 IF (PRESENT(sumtype)) loc_sumtype = sumtype
6251
6252 my_local_only = .false.
6253 IF (PRESENT(local_only)) my_local_only = local_only
6254
6255 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
6256 cpabort("Grids incompatible")
6257 END IF
6258
6259 my_just_sum = .false.
6260 IF (PRESENT(just_sum)) my_just_sum = just_sum
6261
6262 ! do standard sum
6263 IF (loc_sumtype == do_standard_sum) THEN
6264
6265 ! Do standard sum
6266
6267 integral_value = sum(real(conjg(pw1%array) &
6268 *pw2%array, kind=dp)) !? complex bit
6269
6270 ELSE
6271
6272 ! Do accurate sum
6273 integral_value = accurate_sum(real(conjg(pw1%array) &
6274 *pw2%array, kind=dp)) !? complex bit
6275
6276 END IF
6277
6278 IF (.NOT. my_just_sum) THEN
6279 integral_value = integral_value*pw1%pw_grid%dvol
6280 END IF
6281
6282 IF (pw1%pw_grid%grid_span == halfspace) THEN
6283 integral_value = 2.0_dp*integral_value
6284 IF (pw1%pw_grid%have_g0) integral_value = integral_value - &
6285 REAL(conjg(pw1%array(1))*pw2%array(1), kind=dp)
6286 END IF
6287
6288 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
6289 CALL pw1%pw_grid%para%group%sum(integral_value)
6290
6291 CALL timestop(handle)
6292
6293 END FUNCTION pw_integral_ab_c1d_c1d_rs
6294! **************************************************************************************************
6295!> \brief copy a pw type variable
6296!> \param pw1 ...
6297!> \param pw2 ...
6298!> \par History
6299!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
6300!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
6301!> JGH (21-Feb-2003) : Code for generalized reference grids
6302!> \author apsi
6303!> \note
6304!> Currently only copying of respective types allowed,
6305!> in order to avoid errors
6306! **************************************************************************************************
6307 SUBROUTINE pw_copy_c1d_c1d_gs (pw1, pw2)
6308
6309 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
6310 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw2
6311
6312 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
6313
6314 INTEGER :: handle
6315 INTEGER :: i, j, ng, ng1, ng2, ns
6316
6317 CALL timeset(routinen, handle)
6318
6319 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
6320 cpabort("Both grids must be either spherical or non-spherical!")
6321
6322 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
6323 IF (pw1%pw_grid%spherical .AND. pw2%pw_grid%spherical) THEN
6324 IF (pw_compatible(pw1%pw_grid, pw2%pw_grid)) THEN
6325 ng1 = SIZE(pw1%array)
6326 ng2 = SIZE(pw2%array)
6327 ng = min(ng1, ng2)
6328!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, pw1, pw2)
6329 pw2%array(1:ng) = pw1%array(1:ng)
6330!$OMP END PARALLEL WORKSHARE
6331 IF (ng2 > ng) THEN
6332!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(ng, ng2, pw2)
6333 pw2%array(ng + 1:ng2) = cmplx(0.0_dp, 0.0_dp, kind=dp)
6334!$OMP END PARALLEL WORKSHARE
6335 END IF
6336 ELSE
6337 cpabort("Copies between spherical grids require compatible grids!")
6338 END IF
6339 ELSE
6340 ng1 = SIZE(pw1%array)
6341 ng2 = SIZE(pw2%array)
6342 ns = 2*max(ng1, ng2)
6343
6344 IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
6345 IF (ng1 >= ng2) THEN
6346!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng2, pw1, pw2)
6347 DO i = 1, ng2
6348 j = pw2%pw_grid%gidx(i)
6349 pw2%array(i) = pw1%array(j)
6350 END DO
6351!$OMP END PARALLEL DO
6352 ELSE
6353 CALL pw_zero(pw2)
6354!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng1, pw1, pw2)
6355 DO i = 1, ng1
6356 j = pw2%pw_grid%gidx(i)
6357 pw2%array(j) = pw1%array(i)
6358 END DO
6359!$OMP END PARALLEL DO
6360 END IF
6361 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
6362 IF (ng1 >= ng2) THEN
6363!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng2, pw1, pw2)
6364 DO i = 1, ng2
6365 j = pw1%pw_grid%gidx(i)
6366 pw2%array(i) = pw1%array(j)
6367 END DO
6368!$OMP END PARALLEL DO
6369 ELSE
6370 CALL pw_zero(pw2)
6371!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng1, pw1, pw2)
6372 DO i = 1, ng1
6373 j = pw1%pw_grid%gidx(i)
6374 pw2%array(j) = pw1%array(i)
6375 END DO
6376!$OMP END PARALLEL DO
6377 END IF
6378 ELSE
6379 CALL pw_copy_match(pw1, pw2)
6380 END IF
6381
6382 END IF
6383
6384 ELSE
6385!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
6386 pw2%array = pw1%array
6387!$OMP END PARALLEL WORKSHARE
6388 END IF
6389
6390 CALL timestop(handle)
6391
6392 END SUBROUTINE pw_copy_c1d_c1d_gs
6393
6394! **************************************************************************************************
6395!> \brief ...
6396!> \param pw ...
6397!> \param array ...
6398! **************************************************************************************************
6399 SUBROUTINE pw_copy_to_array_c1d_c1d_gs (pw, array)
6400 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
6401 COMPLEX(KIND=dp), DIMENSION(:), INTENT(INOUT) :: array
6402
6403 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
6404
6405 INTEGER :: handle
6406
6407 CALL timeset(routinen, handle)
6408
6409!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
6410 array(:) = pw%array(:)
6411!$OMP END PARALLEL WORKSHARE
6412
6413 CALL timestop(handle)
6414 END SUBROUTINE pw_copy_to_array_c1d_c1d_gs
6415
6416! **************************************************************************************************
6417!> \brief ...
6418!> \param pw ...
6419!> \param array ...
6420! **************************************************************************************************
6421 SUBROUTINE pw_copy_from_array_c1d_c1d_gs (pw, array)
6422 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
6423 COMPLEX(KIND=dp), DIMENSION(:), INTENT(IN) :: array
6424
6425 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
6426
6427 INTEGER :: handle
6428
6429 CALL timeset(routinen, handle)
6430
6431!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
6432 pw%array = array
6433!$OMP END PARALLEL WORKSHARE
6434
6435 CALL timestop(handle)
6436 END SUBROUTINE pw_copy_from_array_c1d_c1d_gs
6437
6438! **************************************************************************************************
6439!> \brief pw2 = alpha*pw1 + beta*pw2
6440!> alpha defaults to 1, beta defaults to 1
6441!> \param pw1 ...
6442!> \param pw2 ...
6443!> \param alpha ...
6444!> \param beta ...
6445!> \param allow_noncompatible_grids ...
6446!> \par History
6447!> JGH (21-Feb-2003) : added reference grid functionality
6448!> JGH (01-Dec-2007) : rename and remove complex alpha
6449!> \author apsi
6450!> \note
6451!> Currently only summing up of respective types allowed,
6452!> in order to avoid errors
6453! **************************************************************************************************
6454 SUBROUTINE pw_axpy_c1d_c1d_gs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
6455
6456 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
6457 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw2
6458 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
6459 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
6460
6461 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
6462
6463 INTEGER :: handle
6464 LOGICAL :: my_allow_noncompatible_grids
6465 REAL(KIND=dp) :: my_alpha, my_beta
6466 INTEGER :: i, j, ng, ng1, ng2
6467
6468 CALL timeset(routinen, handle)
6469
6470 my_alpha = 1.0_dp
6471 IF (PRESENT(alpha)) my_alpha = alpha
6472
6473 my_beta = 1.0_dp
6474 IF (PRESENT(beta)) my_beta = beta
6475
6476 my_allow_noncompatible_grids = .false.
6477 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
6478
6479 IF (my_beta /= 1.0_dp) THEN
6480 IF (my_beta == 0.0_dp) THEN
6481 CALL pw_zero(pw2)
6482 ELSE
6483!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
6484 pw2%array = pw2%array*my_beta
6485!$OMP END PARALLEL WORKSHARE
6486 END IF
6487 END IF
6488
6489 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
6490
6491 IF (my_alpha == 1.0_dp) THEN
6492!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw1, pw2)
6493 pw2%array = pw2%array + pw1%array
6494!$OMP END PARALLEL WORKSHARE
6495 ELSE IF (my_alpha /= 0.0_dp) THEN
6496!$OMP PARALLEL WORKSHARE PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha)
6497 pw2%array = pw2%array + my_alpha* pw1%array
6498!$OMP END PARALLEL WORKSHARE
6499 END IF
6500
6501 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
6502
6503 ng1 = SIZE(pw1%array)
6504 ng2 = SIZE(pw2%array)
6505 ng = min(ng1, ng2)
6506
6507 IF (pw1%pw_grid%spherical) THEN
6508 IF (my_alpha == 1.0_dp) THEN
6509!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(ng, pw1, pw2)
6510 DO i = 1, ng
6511 pw2%array(i) = pw2%array(i) + pw1%array(i)
6512 END DO
6513!$OMP END PARALLEL DO
6514 ELSE IF (my_alpha /= 0.0_dp) THEN
6515!$OMP PARALLEL DO PRIVATE(i) DEFAULT(NONE) SHARED(pw2,pw1,my_alpha,ng)
6516 DO i = 1, ng
6517 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(i)
6518 END DO
6519!$OMP END PARALLEL DO
6520 END IF
6521 ELSE IF ((pw1%pw_grid%id_nr == pw2%pw_grid%reference)) THEN
6522 IF (ng1 >= ng2) THEN
6523 IF (my_alpha == 1.0_dp) THEN
6524!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
6525 DO i = 1, ng
6526 j = pw2%pw_grid%gidx(i)
6527 pw2%array(i) = pw2%array(i) + pw1%array(j)
6528 END DO
6529!$OMP END PARALLEL DO
6530 ELSE IF (my_alpha /= 0.0_dp) THEN
6531!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
6532 DO i = 1, ng
6533 j = pw2%pw_grid%gidx(i)
6534 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(j)
6535 END DO
6536!$OMP END PARALLEL DO
6537 END IF
6538 ELSE
6539 IF (my_alpha == 1.0_dp) THEN
6540!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
6541 DO i = 1, ng
6542 j = pw2%pw_grid%gidx(i)
6543 pw2%array(j) = pw2%array(j) + pw1%array(i)
6544 END DO
6545!$OMP END PARALLEL DO
6546 ELSE IF (my_alpha /= 0.0_dp) THEN
6547!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
6548 DO i = 1, ng
6549 j = pw2%pw_grid%gidx(i)
6550 pw2%array(j) = pw2%array(j) + my_alpha* pw1%array(i)
6551 END DO
6552!$OMP END PARALLEL DO
6553 END IF
6554 END IF
6555 ELSE IF ((pw2%pw_grid%id_nr == pw1%pw_grid%reference)) THEN
6556 IF (ng1 >= ng2) THEN
6557 IF (my_alpha == 1.0_dp) THEN
6558!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
6559 DO i = 1, ng
6560 j = pw1%pw_grid%gidx(i)
6561 pw2%array(i) = pw2%array(i) + pw1%array(j)
6562 END DO
6563!$OMP END PARALLEL DO
6564 ELSE IF (my_alpha /= 0.0_dp) THEN
6565!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
6566 DO i = 1, ng
6567 j = pw1%pw_grid%gidx(i)
6568 pw2%array(i) = pw2%array(i) + my_alpha* pw1%array(j)
6569 END DO
6570!$OMP END PARALLEL DO
6571 END IF
6572 ELSE
6573 IF (my_alpha == 1.0_dp) THEN
6574!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(ng, pw1, pw2)
6575 DO i = 1, ng
6576 j = pw1%pw_grid%gidx(i)
6577 pw2%array(j) = pw2%array(j) + pw1%array(i)
6578 END DO
6579!$OMP END PARALLEL DO
6580 ELSE IF (my_alpha /= 0.0_dp) THEN
6581!$OMP PARALLEL DO PRIVATE(i,j) DEFAULT(NONE) SHARED(my_alpha, ng, pw1, pw2)
6582 DO i = 1, ng
6583 j = pw1%pw_grid%gidx(i)
6584 pw2%array(j) = pw2%array(j) + my_alpha* pw1%array(i)
6585 END DO
6586!$OMP END PARALLEL DO
6587 END IF
6588 END IF
6589 ELSE
6590 cpabort("Grids not compatible")
6591 END IF
6592
6593 ELSE
6594
6595 cpabort("Grids not compatible")
6596
6597 END IF
6598
6599 CALL timestop(handle)
6600
6601 END SUBROUTINE pw_axpy_c1d_c1d_gs
6602
6603! **************************************************************************************************
6604!> \brief pw_out = pw_out + alpha * pw1 * pw2
6605!> alpha defaults to 1
6606!> \param pw_out ...
6607!> \param pw1 ...
6608!> \param pw2 ...
6609!> \param alpha ...
6610!> \author JGH
6611! **************************************************************************************************
6612 SUBROUTINE pw_multiply_c1d_c1d_gs (pw_out, pw1, pw2, alpha)
6613
6614 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw_out
6615 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
6616 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw2
6617 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
6618
6619 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
6620
6621 INTEGER :: handle
6622 REAL(KIND=dp) :: my_alpha
6623
6624 CALL timeset(routinen, handle)
6625
6626 my_alpha = 1.0_dp
6627 IF (PRESENT(alpha)) my_alpha = alpha
6628
6629 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
6630 cpabort("pw_multiply not implemented for non-identical grids!")
6631
6632 IF (my_alpha == 1.0_dp) THEN
6633!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
6634 pw_out%array = pw_out%array + pw1%array* pw2%array
6635!$OMP END PARALLEL WORKSHARE
6636 ELSE
6637!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
6638 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
6639!$OMP END PARALLEL WORKSHARE
6640 END IF
6641
6642 CALL timestop(handle)
6643
6644 END SUBROUTINE pw_multiply_c1d_c1d_gs
6645
6646! **************************************************************************************************
6647!> \brief ...
6648!> \param pw1 ...
6649!> \param pw2 ...
6650! **************************************************************************************************
6651 SUBROUTINE pw_multiply_with_c1d_c1d_gs (pw1, pw2)
6652 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw1
6653 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw2
6654
6655 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
6656
6657 INTEGER :: handle
6658
6659 CALL timeset(routinen, handle)
6660
6661 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
6662 cpabort("Incompatible grids!")
6663
6664!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
6665 pw1%array = pw1%array* pw2%array
6666!$OMP END PARALLEL WORKSHARE
6667
6668 CALL timestop(handle)
6669
6670 END SUBROUTINE pw_multiply_with_c1d_c1d_gs
6671
6672! **************************************************************************************************
6673!> \brief Calculate integral over unit cell for functions in plane wave basis
6674!> only returns the real part of it ......
6675!> \param pw1 ...
6676!> \param pw2 ...
6677!> \param sumtype ...
6678!> \param just_sum ...
6679!> \param local_only ...
6680!> \return ...
6681!> \par History
6682!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
6683!> \author apsi
6684! **************************************************************************************************
6685 FUNCTION pw_integral_ab_c1d_c1d_gs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
6686
6687 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
6688 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw2
6689 INTEGER, INTENT(IN), OPTIONAL :: sumtype
6690 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
6691 REAL(kind=dp) :: integral_value
6692
6693 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab'
6694
6695 INTEGER :: handle, loc_sumtype
6696 LOGICAL :: my_just_sum, my_local_only
6697
6698 CALL timeset(routinen, handle)
6699
6700 loc_sumtype = do_accurate_sum
6701 IF (PRESENT(sumtype)) loc_sumtype = sumtype
6702
6703 my_local_only = .false.
6704 IF (PRESENT(local_only)) my_local_only = local_only
6705
6706 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
6707 cpabort("Grids incompatible")
6708 END IF
6709
6710 my_just_sum = .false.
6711 IF (PRESENT(just_sum)) my_just_sum = just_sum
6712
6713 ! do standard sum
6714 IF (loc_sumtype == do_standard_sum) THEN
6715
6716 ! Do standard sum
6717
6718 integral_value = sum(real(conjg(pw1%array) &
6719 *pw2%array, kind=dp)) !? complex bit
6720
6721 ELSE
6722
6723 ! Do accurate sum
6724 integral_value = accurate_sum(real(conjg(pw1%array) &
6725 *pw2%array, kind=dp)) !? complex bit
6726
6727 END IF
6728
6729 IF (.NOT. my_just_sum) THEN
6730 integral_value = integral_value*pw1%pw_grid%vol
6731 END IF
6732
6733 IF (pw1%pw_grid%grid_span == halfspace) THEN
6734 integral_value = 2.0_dp*integral_value
6735 IF (pw1%pw_grid%have_g0) integral_value = integral_value - &
6736 REAL(conjg(pw1%array(1))*pw2%array(1), kind=dp)
6737 END IF
6738
6739 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
6740 CALL pw1%pw_grid%para%group%sum(integral_value)
6741
6742 CALL timestop(handle)
6743
6744 END FUNCTION pw_integral_ab_c1d_c1d_gs
6745
6746! **************************************************************************************************
6747!> \brief ...
6748!> \param pw1 ...
6749!> \param pw2 ...
6750!> \return ...
6751! **************************************************************************************************
6752 FUNCTION pw_integral_a2b_c1d_c1d (pw1, pw2) RESULT(integral_value)
6753
6754 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
6755 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw2
6756 REAL(kind=dp) :: integral_value
6757
6758 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_a2b'
6759
6760 INTEGER :: handle
6761
6762 CALL timeset(routinen, handle)
6763
6764 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
6765 cpabort("Grids incompatible")
6766 END IF
6767
6768 integral_value = accurate_sum(real(conjg(pw1%array)*pw2%array, kind=dp)*pw1%pw_grid%gsq)
6769 IF (pw1%pw_grid%grid_span == halfspace) THEN
6770 integral_value = 2.0_dp*integral_value
6771 END IF
6772
6773 integral_value = integral_value*pw1%pw_grid%vol
6774
6775 IF (pw1%pw_grid%para%mode == pw_mode_distributed) &
6776 CALL pw1%pw_grid%para%group%sum(integral_value)
6777 CALL timestop(handle)
6778
6779 END FUNCTION pw_integral_a2b_c1d_c1d
6780! **************************************************************************************************
6781!> \brief copy a pw type variable
6782!> \param pw1 ...
6783!> \param pw2 ...
6784!> \par History
6785!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
6786!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
6787!> JGH (21-Feb-2003) : Code for generalized reference grids
6788!> \author apsi
6789!> \note
6790!> Currently only copying of respective types allowed,
6791!> in order to avoid errors
6792! **************************************************************************************************
6793 SUBROUTINE pw_copy_c3d_r3d_rs (pw1, pw2)
6794
6795 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw1
6796 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw2
6797
6798 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
6799
6800 INTEGER :: handle
6801
6802 CALL timeset(routinen, handle)
6803
6804 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
6805 cpabort("Both grids must be either spherical or non-spherical!")
6806 IF (pw1%pw_grid%spherical) &
6807 cpabort("Spherical grids only exist in reciprocal space!")
6808
6809 IF (any(shape(pw2%array) /= shape(pw1%array))) &
6810 cpabort("3D grids must be compatible!")
6811 IF (pw1%pw_grid%spherical) &
6812 cpabort("3D grids must not be spherical!")
6813!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
6814 pw2%array(:, :, :) = real(pw1%array(:, :, :), kind=dp)
6815!$OMP END PARALLEL WORKSHARE
6816
6817 CALL timestop(handle)
6818
6819 END SUBROUTINE pw_copy_c3d_r3d_rs
6820
6821! **************************************************************************************************
6822!> \brief ...
6823!> \param pw ...
6824!> \param array ...
6825! **************************************************************************************************
6826 SUBROUTINE pw_copy_to_array_c3d_r3d_rs (pw, array)
6827 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw
6828 REAL(KIND=dp), DIMENSION(:, :, :), INTENT(INOUT) :: array
6829
6830 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
6831
6832 INTEGER :: handle
6833
6834 CALL timeset(routinen, handle)
6835
6836!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
6837 array(:, :, :) = real(pw%array(:, :, :), kind=dp)
6838!$OMP END PARALLEL WORKSHARE
6839
6840 CALL timestop(handle)
6841 END SUBROUTINE pw_copy_to_array_c3d_r3d_rs
6842
6843! **************************************************************************************************
6844!> \brief ...
6845!> \param pw ...
6846!> \param array ...
6847! **************************************************************************************************
6848 SUBROUTINE pw_copy_from_array_c3d_r3d_rs (pw, array)
6849 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw
6850 REAL(KIND=dp), DIMENSION(:, :, :), INTENT(IN) :: array
6851
6852 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
6853
6854 INTEGER :: handle
6855
6856 CALL timeset(routinen, handle)
6857
6858!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
6859 pw%array = cmplx(array, 0.0_dp, kind=dp)
6860!$OMP END PARALLEL WORKSHARE
6861
6862 CALL timestop(handle)
6863 END SUBROUTINE pw_copy_from_array_c3d_r3d_rs
6864
6865! **************************************************************************************************
6866!> \brief pw2 = alpha*pw1 + beta*pw2
6867!> alpha defaults to 1, beta defaults to 1
6868!> \param pw1 ...
6869!> \param pw2 ...
6870!> \param alpha ...
6871!> \param beta ...
6872!> \param allow_noncompatible_grids ...
6873!> \par History
6874!> JGH (21-Feb-2003) : added reference grid functionality
6875!> JGH (01-Dec-2007) : rename and remove complex alpha
6876!> \author apsi
6877!> \note
6878!> Currently only summing up of respective types allowed,
6879!> in order to avoid errors
6880! **************************************************************************************************
6881 SUBROUTINE pw_axpy_c3d_r3d_rs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
6882
6883 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw1
6884 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw2
6885 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
6886 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
6887
6888 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
6889
6890 INTEGER :: handle
6891 LOGICAL :: my_allow_noncompatible_grids
6892 REAL(KIND=dp) :: my_alpha, my_beta
6893
6894 CALL timeset(routinen, handle)
6895
6896 my_alpha = 1.0_dp
6897 IF (PRESENT(alpha)) my_alpha = alpha
6898
6899 my_beta = 1.0_dp
6900 IF (PRESENT(beta)) my_beta = beta
6901
6902 my_allow_noncompatible_grids = .false.
6903 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
6904
6905 IF (my_beta /= 1.0_dp) THEN
6906 IF (my_beta == 0.0_dp) THEN
6907 CALL pw_zero(pw2)
6908 ELSE
6909!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
6910 pw2%array = pw2%array*my_beta
6911!$OMP END PARALLEL WORKSHARE
6912 END IF
6913 END IF
6914
6915 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
6916 IF (my_alpha == 1.0_dp) THEN
6917!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2)
6918 pw2%array = pw2%array + real(pw1%array, kind=dp)
6919!$OMP END PARALLEL WORKSHARE
6920 ELSE IF (my_alpha /= 0.0_dp) THEN
6921!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2, my_alpha)
6922 pw2%array = pw2%array + my_alpha* real(pw1%array, kind=dp)
6923!$OMP END PARALLEL WORKSHARE
6924 END IF
6925
6926 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
6927
6928 IF (any(shape(pw1%array) /= shape(pw2%array))) &
6929 cpabort("Noncommensurate grids not implemented for 3D grids!")
6930
6931 IF (my_alpha == 1.0_dp) THEN
6932!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
6933 pw2%array = pw2%array + real(pw1%array, kind=dp)
6934!$OMP END PARALLEL WORKSHARE
6935 ELSE
6936!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2,my_alpha)
6937 pw2%array = pw2%array + my_alpha* real(pw1%array, kind=dp)
6938!$OMP END PARALLEL WORKSHARE
6939 END IF
6940
6941 ELSE
6942
6943 cpabort("Grids not compatible")
6944
6945 END IF
6946
6947 CALL timestop(handle)
6948
6949 END SUBROUTINE pw_axpy_c3d_r3d_rs
6950
6951! **************************************************************************************************
6952!> \brief pw_out = pw_out + alpha * pw1 * pw2
6953!> alpha defaults to 1
6954!> \param pw_out ...
6955!> \param pw1 ...
6956!> \param pw2 ...
6957!> \param alpha ...
6958!> \author JGH
6959! **************************************************************************************************
6960 SUBROUTINE pw_multiply_c3d_r3d_rs (pw_out, pw1, pw2, alpha)
6961
6962 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw_out
6963 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw1
6964 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw2
6965 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
6966
6967 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
6968
6969 INTEGER :: handle
6970 REAL(KIND=dp) :: my_alpha
6971
6972 CALL timeset(routinen, handle)
6973
6974 my_alpha = 1.0_dp
6975 IF (PRESENT(alpha)) my_alpha = alpha
6976
6977 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
6978 cpabort("pw_multiply not implemented for non-identical grids!")
6979
6980 IF (my_alpha == 1.0_dp) THEN
6981!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
6982 pw_out%array = pw_out%array + pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
6983!$OMP END PARALLEL WORKSHARE
6984 ELSE
6985!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
6986 pw_out%array = pw_out%array + my_alpha*pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
6987!$OMP END PARALLEL WORKSHARE
6988 END IF
6989
6990 CALL timestop(handle)
6991
6992 END SUBROUTINE pw_multiply_c3d_r3d_rs
6993
6994! **************************************************************************************************
6995!> \brief ...
6996!> \param pw1 ...
6997!> \param pw2 ...
6998! **************************************************************************************************
6999 SUBROUTINE pw_multiply_with_c3d_r3d_rs (pw1, pw2)
7000 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw1
7001 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw2
7002
7003 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
7004
7005 INTEGER :: handle
7006
7007 CALL timeset(routinen, handle)
7008
7009 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
7010 cpabort("Incompatible grids!")
7011
7012!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
7013 pw1%array = pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
7014!$OMP END PARALLEL WORKSHARE
7015
7016 CALL timestop(handle)
7017
7018 END SUBROUTINE pw_multiply_with_c3d_r3d_rs
7019
7020! **************************************************************************************************
7021!> \brief Calculate integral over unit cell for functions in plane wave basis
7022!> only returns the real part of it ......
7023!> \param pw1 ...
7024!> \param pw2 ...
7025!> \param sumtype ...
7026!> \param just_sum ...
7027!> \param local_only ...
7028!> \return ...
7029!> \par History
7030!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
7031!> \author apsi
7032! **************************************************************************************************
7033 FUNCTION pw_integral_ab_c3d_r3d_rs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
7034
7035 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw1
7036 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw2
7037 INTEGER, INTENT(IN), OPTIONAL :: sumtype
7038 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
7039 REAL(kind=dp) :: integral_value
7040
7041 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab'
7042
7043 INTEGER :: handle, loc_sumtype
7044 LOGICAL :: my_just_sum, my_local_only
7045
7046 CALL timeset(routinen, handle)
7047
7048 loc_sumtype = do_accurate_sum
7049 IF (PRESENT(sumtype)) loc_sumtype = sumtype
7050
7051 my_local_only = .false.
7052 IF (PRESENT(local_only)) my_local_only = local_only
7053
7054 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
7055 cpabort("Grids incompatible")
7056 END IF
7057
7058 my_just_sum = .false.
7059 IF (PRESENT(just_sum)) my_just_sum = just_sum
7060
7061 ! do standard sum
7062 IF (loc_sumtype == do_standard_sum) THEN
7063
7064 ! Do standard sum
7065
7066 integral_value = sum(real(pw1%array, kind=dp)*pw2%array) !? complex bit
7067
7068 ELSE
7069
7070 ! Do accurate sum
7071 integral_value = accurate_sum(real(pw1%array, kind=dp)*pw2%array) !? complex bit
7072
7073 END IF
7074
7075 IF (.NOT. my_just_sum) THEN
7076 integral_value = integral_value*pw1%pw_grid%dvol
7077 END IF
7078
7079
7080 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
7081 CALL pw1%pw_grid%para%group%sum(integral_value)
7082
7083 CALL timestop(handle)
7084
7085 END FUNCTION pw_integral_ab_c3d_r3d_rs
7086! **************************************************************************************************
7087!> \brief copy a pw type variable
7088!> \param pw1 ...
7089!> \param pw2 ...
7090!> \par History
7091!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
7092!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
7093!> JGH (21-Feb-2003) : Code for generalized reference grids
7094!> \author apsi
7095!> \note
7096!> Currently only copying of respective types allowed,
7097!> in order to avoid errors
7098! **************************************************************************************************
7099 SUBROUTINE pw_copy_c3d_r3d_gs (pw1, pw2)
7100
7101 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
7102 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw2
7103
7104 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
7105
7106 INTEGER :: handle
7107
7108 CALL timeset(routinen, handle)
7109
7110 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
7111 cpabort("Both grids must be either spherical or non-spherical!")
7112
7113 IF (any(shape(pw2%array) /= shape(pw1%array))) &
7114 cpabort("3D grids must be compatible!")
7115 IF (pw1%pw_grid%spherical) &
7116 cpabort("3D grids must not be spherical!")
7117!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
7118 pw2%array(:, :, :) = real(pw1%array(:, :, :), kind=dp)
7119!$OMP END PARALLEL WORKSHARE
7120
7121 CALL timestop(handle)
7122
7123 END SUBROUTINE pw_copy_c3d_r3d_gs
7124
7125! **************************************************************************************************
7126!> \brief ...
7127!> \param pw ...
7128!> \param array ...
7129! **************************************************************************************************
7130 SUBROUTINE pw_copy_to_array_c3d_r3d_gs (pw, array)
7131 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw
7132 REAL(KIND=dp), DIMENSION(:, :, :), INTENT(INOUT) :: array
7133
7134 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
7135
7136 INTEGER :: handle
7137
7138 CALL timeset(routinen, handle)
7139
7140!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
7141 array(:, :, :) = real(pw%array(:, :, :), kind=dp)
7142!$OMP END PARALLEL WORKSHARE
7143
7144 CALL timestop(handle)
7145 END SUBROUTINE pw_copy_to_array_c3d_r3d_gs
7146
7147! **************************************************************************************************
7148!> \brief ...
7149!> \param pw ...
7150!> \param array ...
7151! **************************************************************************************************
7152 SUBROUTINE pw_copy_from_array_c3d_r3d_gs (pw, array)
7153 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw
7154 REAL(KIND=dp), DIMENSION(:, :, :), INTENT(IN) :: array
7155
7156 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
7157
7158 INTEGER :: handle
7159
7160 CALL timeset(routinen, handle)
7161
7162!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
7163 pw%array = cmplx(array, 0.0_dp, kind=dp)
7164!$OMP END PARALLEL WORKSHARE
7165
7166 CALL timestop(handle)
7167 END SUBROUTINE pw_copy_from_array_c3d_r3d_gs
7168
7169! **************************************************************************************************
7170!> \brief pw2 = alpha*pw1 + beta*pw2
7171!> alpha defaults to 1, beta defaults to 1
7172!> \param pw1 ...
7173!> \param pw2 ...
7174!> \param alpha ...
7175!> \param beta ...
7176!> \param allow_noncompatible_grids ...
7177!> \par History
7178!> JGH (21-Feb-2003) : added reference grid functionality
7179!> JGH (01-Dec-2007) : rename and remove complex alpha
7180!> \author apsi
7181!> \note
7182!> Currently only summing up of respective types allowed,
7183!> in order to avoid errors
7184! **************************************************************************************************
7185 SUBROUTINE pw_axpy_c3d_r3d_gs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
7186
7187 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
7188 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw2
7189 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
7190 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
7191
7192 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
7193
7194 INTEGER :: handle
7195 LOGICAL :: my_allow_noncompatible_grids
7196 REAL(KIND=dp) :: my_alpha, my_beta
7197
7198 CALL timeset(routinen, handle)
7199
7200 my_alpha = 1.0_dp
7201 IF (PRESENT(alpha)) my_alpha = alpha
7202
7203 my_beta = 1.0_dp
7204 IF (PRESENT(beta)) my_beta = beta
7205
7206 my_allow_noncompatible_grids = .false.
7207 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
7208
7209 IF (my_beta /= 1.0_dp) THEN
7210 IF (my_beta == 0.0_dp) THEN
7211 CALL pw_zero(pw2)
7212 ELSE
7213!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
7214 pw2%array = pw2%array*my_beta
7215!$OMP END PARALLEL WORKSHARE
7216 END IF
7217 END IF
7218
7219 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
7220 IF (my_alpha == 1.0_dp) THEN
7221!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2)
7222 pw2%array = pw2%array + real(pw1%array, kind=dp)
7223!$OMP END PARALLEL WORKSHARE
7224 ELSE IF (my_alpha /= 0.0_dp) THEN
7225!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2, my_alpha)
7226 pw2%array = pw2%array + my_alpha* real(pw1%array, kind=dp)
7227!$OMP END PARALLEL WORKSHARE
7228 END IF
7229
7230 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
7231
7232 IF (any(shape(pw1%array) /= shape(pw2%array))) &
7233 cpabort("Noncommensurate grids not implemented for 3D grids!")
7234
7235 IF (my_alpha == 1.0_dp) THEN
7236!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
7237 pw2%array = pw2%array + real(pw1%array, kind=dp)
7238!$OMP END PARALLEL WORKSHARE
7239 ELSE
7240!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2,my_alpha)
7241 pw2%array = pw2%array + my_alpha* real(pw1%array, kind=dp)
7242!$OMP END PARALLEL WORKSHARE
7243 END IF
7244
7245 ELSE
7246
7247 cpabort("Grids not compatible")
7248
7249 END IF
7250
7251 CALL timestop(handle)
7252
7253 END SUBROUTINE pw_axpy_c3d_r3d_gs
7254
7255! **************************************************************************************************
7256!> \brief pw_out = pw_out + alpha * pw1 * pw2
7257!> alpha defaults to 1
7258!> \param pw_out ...
7259!> \param pw1 ...
7260!> \param pw2 ...
7261!> \param alpha ...
7262!> \author JGH
7263! **************************************************************************************************
7264 SUBROUTINE pw_multiply_c3d_r3d_gs (pw_out, pw1, pw2, alpha)
7265
7266 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw_out
7267 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
7268 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw2
7269 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
7270
7271 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
7272
7273 INTEGER :: handle
7274 REAL(KIND=dp) :: my_alpha
7275
7276 CALL timeset(routinen, handle)
7277
7278 my_alpha = 1.0_dp
7279 IF (PRESENT(alpha)) my_alpha = alpha
7280
7281 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
7282 cpabort("pw_multiply not implemented for non-identical grids!")
7283
7284 IF (my_alpha == 1.0_dp) THEN
7285!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
7286 pw_out%array = pw_out%array + pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
7287!$OMP END PARALLEL WORKSHARE
7288 ELSE
7289!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
7290 pw_out%array = pw_out%array + my_alpha*pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
7291!$OMP END PARALLEL WORKSHARE
7292 END IF
7293
7294 CALL timestop(handle)
7295
7296 END SUBROUTINE pw_multiply_c3d_r3d_gs
7297
7298! **************************************************************************************************
7299!> \brief ...
7300!> \param pw1 ...
7301!> \param pw2 ...
7302! **************************************************************************************************
7303 SUBROUTINE pw_multiply_with_c3d_r3d_gs (pw1, pw2)
7304 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw1
7305 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw2
7306
7307 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
7308
7309 INTEGER :: handle
7310
7311 CALL timeset(routinen, handle)
7312
7313 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
7314 cpabort("Incompatible grids!")
7315
7316!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
7317 pw1%array = pw1%array* cmplx(pw2%array, 0.0_dp, kind=dp)
7318!$OMP END PARALLEL WORKSHARE
7319
7320 CALL timestop(handle)
7321
7322 END SUBROUTINE pw_multiply_with_c3d_r3d_gs
7323
7324! **************************************************************************************************
7325!> \brief Calculate integral over unit cell for functions in plane wave basis
7326!> only returns the real part of it ......
7327!> \param pw1 ...
7328!> \param pw2 ...
7329!> \param sumtype ...
7330!> \param just_sum ...
7331!> \param local_only ...
7332!> \return ...
7333!> \par History
7334!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
7335!> \author apsi
7336! **************************************************************************************************
7337 FUNCTION pw_integral_ab_c3d_r3d_gs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
7338
7339 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
7340 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw2
7341 INTEGER, INTENT(IN), OPTIONAL :: sumtype
7342 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
7343 REAL(kind=dp) :: integral_value
7344
7345 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab'
7346
7347 INTEGER :: handle, loc_sumtype
7348 LOGICAL :: my_just_sum, my_local_only
7349
7350 CALL timeset(routinen, handle)
7351
7352 loc_sumtype = do_accurate_sum
7353 IF (PRESENT(sumtype)) loc_sumtype = sumtype
7354
7355 my_local_only = .false.
7356 IF (PRESENT(local_only)) my_local_only = local_only
7357
7358 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
7359 cpabort("Grids incompatible")
7360 END IF
7361
7362 my_just_sum = .false.
7363 IF (PRESENT(just_sum)) my_just_sum = just_sum
7364
7365 ! do standard sum
7366 IF (loc_sumtype == do_standard_sum) THEN
7367
7368 ! Do standard sum
7369
7370 integral_value = sum(real(pw1%array, kind=dp)*pw2%array) !? complex bit
7371
7372 ELSE
7373
7374 ! Do accurate sum
7375 integral_value = accurate_sum(real(pw1%array, kind=dp)*pw2%array) !? complex bit
7376
7377 END IF
7378
7379 IF (.NOT. my_just_sum) THEN
7380 integral_value = integral_value*pw1%pw_grid%vol
7381 END IF
7382
7383
7384 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
7385 CALL pw1%pw_grid%para%group%sum(integral_value)
7386
7387 CALL timestop(handle)
7388
7389 END FUNCTION pw_integral_ab_c3d_r3d_gs
7390
7391! **************************************************************************************************
7392!> \brief copy a pw type variable
7393!> \param pw1 ...
7394!> \param pw2 ...
7395!> \par History
7396!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
7397!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
7398!> JGH (21-Feb-2003) : Code for generalized reference grids
7399!> \author apsi
7400!> \note
7401!> Currently only copying of respective types allowed,
7402!> in order to avoid errors
7403! **************************************************************************************************
7404 SUBROUTINE pw_copy_c3d_c3d_rs (pw1, pw2)
7405
7406 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw1
7407 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw2
7408
7409 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
7410
7411 INTEGER :: handle
7412
7413 CALL timeset(routinen, handle)
7414
7415 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
7416 cpabort("Both grids must be either spherical or non-spherical!")
7417 IF (pw1%pw_grid%spherical) &
7418 cpabort("Spherical grids only exist in reciprocal space!")
7419
7420 IF (any(shape(pw2%array) /= shape(pw1%array))) &
7421 cpabort("3D grids must be compatible!")
7422 IF (pw1%pw_grid%spherical) &
7423 cpabort("3D grids must not be spherical!")
7424!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
7425 pw2%array(:, :, :) = pw1%array(:, :, :)
7426!$OMP END PARALLEL WORKSHARE
7427
7428 CALL timestop(handle)
7429
7430 END SUBROUTINE pw_copy_c3d_c3d_rs
7431
7432! **************************************************************************************************
7433!> \brief ...
7434!> \param pw ...
7435!> \param array ...
7436! **************************************************************************************************
7437 SUBROUTINE pw_copy_to_array_c3d_c3d_rs (pw, array)
7438 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw
7439 COMPLEX(KIND=dp), DIMENSION(:, :, :), INTENT(INOUT) :: array
7440
7441 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
7442
7443 INTEGER :: handle
7444
7445 CALL timeset(routinen, handle)
7446
7447!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
7448 array(:, :, :) = pw%array(:, :, :)
7449!$OMP END PARALLEL WORKSHARE
7450
7451 CALL timestop(handle)
7452 END SUBROUTINE pw_copy_to_array_c3d_c3d_rs
7453
7454! **************************************************************************************************
7455!> \brief ...
7456!> \param pw ...
7457!> \param array ...
7458! **************************************************************************************************
7459 SUBROUTINE pw_copy_from_array_c3d_c3d_rs (pw, array)
7460 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw
7461 COMPLEX(KIND=dp), DIMENSION(:, :, :), INTENT(IN) :: array
7462
7463 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
7464
7465 INTEGER :: handle
7466
7467 CALL timeset(routinen, handle)
7468
7469!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
7470 pw%array = array
7471!$OMP END PARALLEL WORKSHARE
7472
7473 CALL timestop(handle)
7474 END SUBROUTINE pw_copy_from_array_c3d_c3d_rs
7475
7476! **************************************************************************************************
7477!> \brief pw2 = alpha*pw1 + beta*pw2
7478!> alpha defaults to 1, beta defaults to 1
7479!> \param pw1 ...
7480!> \param pw2 ...
7481!> \param alpha ...
7482!> \param beta ...
7483!> \param allow_noncompatible_grids ...
7484!> \par History
7485!> JGH (21-Feb-2003) : added reference grid functionality
7486!> JGH (01-Dec-2007) : rename and remove complex alpha
7487!> \author apsi
7488!> \note
7489!> Currently only summing up of respective types allowed,
7490!> in order to avoid errors
7491! **************************************************************************************************
7492 SUBROUTINE pw_axpy_c3d_c3d_rs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
7493
7494 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw1
7495 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw2
7496 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
7497 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
7498
7499 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
7500
7501 INTEGER :: handle
7502 LOGICAL :: my_allow_noncompatible_grids
7503 REAL(KIND=dp) :: my_alpha, my_beta
7504
7505 CALL timeset(routinen, handle)
7506
7507 my_alpha = 1.0_dp
7508 IF (PRESENT(alpha)) my_alpha = alpha
7509
7510 my_beta = 1.0_dp
7511 IF (PRESENT(beta)) my_beta = beta
7512
7513 my_allow_noncompatible_grids = .false.
7514 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
7515
7516 IF (my_beta /= 1.0_dp) THEN
7517 IF (my_beta == 0.0_dp) THEN
7518 CALL pw_zero(pw2)
7519 ELSE
7520!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
7521 pw2%array = pw2%array*my_beta
7522!$OMP END PARALLEL WORKSHARE
7523 END IF
7524 END IF
7525
7526 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
7527 IF (my_alpha == 1.0_dp) THEN
7528!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2)
7529 pw2%array = pw2%array + pw1%array
7530!$OMP END PARALLEL WORKSHARE
7531 ELSE IF (my_alpha /= 0.0_dp) THEN
7532!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2, my_alpha)
7533 pw2%array = pw2%array + my_alpha* pw1%array
7534!$OMP END PARALLEL WORKSHARE
7535 END IF
7536
7537 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
7538
7539 IF (any(shape(pw1%array) /= shape(pw2%array))) &
7540 cpabort("Noncommensurate grids not implemented for 3D grids!")
7541
7542 IF (my_alpha == 1.0_dp) THEN
7543!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
7544 pw2%array = pw2%array + pw1%array
7545!$OMP END PARALLEL WORKSHARE
7546 ELSE
7547!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2,my_alpha)
7548 pw2%array = pw2%array + my_alpha* pw1%array
7549!$OMP END PARALLEL WORKSHARE
7550 END IF
7551
7552 ELSE
7553
7554 cpabort("Grids not compatible")
7555
7556 END IF
7557
7558 CALL timestop(handle)
7559
7560 END SUBROUTINE pw_axpy_c3d_c3d_rs
7561
7562! **************************************************************************************************
7563!> \brief pw_out = pw_out + alpha * pw1 * pw2
7564!> alpha defaults to 1
7565!> \param pw_out ...
7566!> \param pw1 ...
7567!> \param pw2 ...
7568!> \param alpha ...
7569!> \author JGH
7570! **************************************************************************************************
7571 SUBROUTINE pw_multiply_c3d_c3d_rs (pw_out, pw1, pw2, alpha)
7572
7573 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw_out
7574 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw1
7575 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw2
7576 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
7577
7578 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
7579
7580 INTEGER :: handle
7581 REAL(KIND=dp) :: my_alpha
7582
7583 CALL timeset(routinen, handle)
7584
7585 my_alpha = 1.0_dp
7586 IF (PRESENT(alpha)) my_alpha = alpha
7587
7588 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
7589 cpabort("pw_multiply not implemented for non-identical grids!")
7590
7591 IF (my_alpha == 1.0_dp) THEN
7592!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
7593 pw_out%array = pw_out%array + pw1%array* pw2%array
7594!$OMP END PARALLEL WORKSHARE
7595 ELSE
7596!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
7597 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
7598!$OMP END PARALLEL WORKSHARE
7599 END IF
7600
7601 CALL timestop(handle)
7602
7603 END SUBROUTINE pw_multiply_c3d_c3d_rs
7604
7605! **************************************************************************************************
7606!> \brief ...
7607!> \param pw1 ...
7608!> \param pw2 ...
7609! **************************************************************************************************
7610 SUBROUTINE pw_multiply_with_c3d_c3d_rs (pw1, pw2)
7611 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw1
7612 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw2
7613
7614 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
7615
7616 INTEGER :: handle
7617
7618 CALL timeset(routinen, handle)
7619
7620 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
7621 cpabort("Incompatible grids!")
7622
7623!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
7624 pw1%array = pw1%array* pw2%array
7625!$OMP END PARALLEL WORKSHARE
7626
7627 CALL timestop(handle)
7628
7629 END SUBROUTINE pw_multiply_with_c3d_c3d_rs
7630
7631! **************************************************************************************************
7632!> \brief Calculate integral over unit cell for functions in plane wave basis
7633!> only returns the real part of it ......
7634!> \param pw1 ...
7635!> \param pw2 ...
7636!> \param sumtype ...
7637!> \param just_sum ...
7638!> \param local_only ...
7639!> \return ...
7640!> \par History
7641!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
7642!> \author apsi
7643! **************************************************************************************************
7644 FUNCTION pw_integral_ab_c3d_c3d_rs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
7645
7646 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw1
7647 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw2
7648 INTEGER, INTENT(IN), OPTIONAL :: sumtype
7649 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
7650 REAL(kind=dp) :: integral_value
7651
7652 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab'
7653
7654 INTEGER :: handle, loc_sumtype
7655 LOGICAL :: my_just_sum, my_local_only
7656
7657 CALL timeset(routinen, handle)
7658
7659 loc_sumtype = do_accurate_sum
7660 IF (PRESENT(sumtype)) loc_sumtype = sumtype
7661
7662 my_local_only = .false.
7663 IF (PRESENT(local_only)) my_local_only = local_only
7664
7665 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
7666 cpabort("Grids incompatible")
7667 END IF
7668
7669 my_just_sum = .false.
7670 IF (PRESENT(just_sum)) my_just_sum = just_sum
7671
7672 ! do standard sum
7673 IF (loc_sumtype == do_standard_sum) THEN
7674
7675 ! Do standard sum
7676
7677 integral_value = sum(real(conjg(pw1%array) &
7678 *pw2%array, kind=dp)) !? complex bit
7679
7680 ELSE
7681
7682 ! Do accurate sum
7683 integral_value = accurate_sum(real(conjg(pw1%array) &
7684 *pw2%array, kind=dp)) !? complex bit
7685
7686 END IF
7687
7688 IF (.NOT. my_just_sum) THEN
7689 integral_value = integral_value*pw1%pw_grid%dvol
7690 END IF
7691
7692
7693 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
7694 CALL pw1%pw_grid%para%group%sum(integral_value)
7695
7696 CALL timestop(handle)
7697
7698 END FUNCTION pw_integral_ab_c3d_c3d_rs
7699! **************************************************************************************************
7700!> \brief copy a pw type variable
7701!> \param pw1 ...
7702!> \param pw2 ...
7703!> \par History
7704!> JGH (7-Mar-2001) : check for pw_grid %id_nr, allow copy if
7705!> in_use == COMPLEXDATA1D and in_space == RECIPROCALSPACE
7706!> JGH (21-Feb-2003) : Code for generalized reference grids
7707!> \author apsi
7708!> \note
7709!> Currently only copying of respective types allowed,
7710!> in order to avoid errors
7711! **************************************************************************************************
7712 SUBROUTINE pw_copy_c3d_c3d_gs (pw1, pw2)
7713
7714 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
7715 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw2
7716
7717 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy'
7718
7719 INTEGER :: handle
7720
7721 CALL timeset(routinen, handle)
7722
7723 IF (pw1%pw_grid%spherical .neqv. pw2%pw_grid%spherical) &
7724 cpabort("Both grids must be either spherical or non-spherical!")
7725
7726 IF (any(shape(pw2%array) /= shape(pw1%array))) &
7727 cpabort("3D grids must be compatible!")
7728 IF (pw1%pw_grid%spherical) &
7729 cpabort("3D grids must not be spherical!")
7730!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
7731 pw2%array(:, :, :) = pw1%array(:, :, :)
7732!$OMP END PARALLEL WORKSHARE
7733
7734 CALL timestop(handle)
7735
7736 END SUBROUTINE pw_copy_c3d_c3d_gs
7737
7738! **************************************************************************************************
7739!> \brief ...
7740!> \param pw ...
7741!> \param array ...
7742! **************************************************************************************************
7743 SUBROUTINE pw_copy_to_array_c3d_c3d_gs (pw, array)
7744 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw
7745 COMPLEX(KIND=dp), DIMENSION(:, :, :), INTENT(INOUT) :: array
7746
7747 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_to_array'
7748
7749 INTEGER :: handle
7750
7751 CALL timeset(routinen, handle)
7752
7753!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
7754 array(:, :, :) = pw%array(:, :, :)
7755!$OMP END PARALLEL WORKSHARE
7756
7757 CALL timestop(handle)
7758 END SUBROUTINE pw_copy_to_array_c3d_c3d_gs
7759
7760! **************************************************************************************************
7761!> \brief ...
7762!> \param pw ...
7763!> \param array ...
7764! **************************************************************************************************
7765 SUBROUTINE pw_copy_from_array_c3d_c3d_gs (pw, array)
7766 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw
7767 COMPLEX(KIND=dp), DIMENSION(:, :, :), INTENT(IN) :: array
7768
7769 CHARACTER(len=*), PARAMETER :: routineN = 'pw_copy_from_array'
7770
7771 INTEGER :: handle
7772
7773 CALL timeset(routinen, handle)
7774
7775!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, array)
7776 pw%array = array
7777!$OMP END PARALLEL WORKSHARE
7778
7779 CALL timestop(handle)
7780 END SUBROUTINE pw_copy_from_array_c3d_c3d_gs
7781
7782! **************************************************************************************************
7783!> \brief pw2 = alpha*pw1 + beta*pw2
7784!> alpha defaults to 1, beta defaults to 1
7785!> \param pw1 ...
7786!> \param pw2 ...
7787!> \param alpha ...
7788!> \param beta ...
7789!> \param allow_noncompatible_grids ...
7790!> \par History
7791!> JGH (21-Feb-2003) : added reference grid functionality
7792!> JGH (01-Dec-2007) : rename and remove complex alpha
7793!> \author apsi
7794!> \note
7795!> Currently only summing up of respective types allowed,
7796!> in order to avoid errors
7797! **************************************************************************************************
7798 SUBROUTINE pw_axpy_c3d_c3d_gs (pw1, pw2, alpha, beta, allow_noncompatible_grids)
7799
7800 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
7801 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw2
7802 REAL(KIND=dp), INTENT(in), OPTIONAL :: alpha, beta
7803 LOGICAL, INTENT(IN), OPTIONAL :: allow_noncompatible_grids
7804
7805 CHARACTER(len=*), PARAMETER :: routineN = 'pw_axpy'
7806
7807 INTEGER :: handle
7808 LOGICAL :: my_allow_noncompatible_grids
7809 REAL(KIND=dp) :: my_alpha, my_beta
7810
7811 CALL timeset(routinen, handle)
7812
7813 my_alpha = 1.0_dp
7814 IF (PRESENT(alpha)) my_alpha = alpha
7815
7816 my_beta = 1.0_dp
7817 IF (PRESENT(beta)) my_beta = beta
7818
7819 my_allow_noncompatible_grids = .false.
7820 IF (PRESENT(allow_noncompatible_grids)) my_allow_noncompatible_grids = allow_noncompatible_grids
7821
7822 IF (my_beta /= 1.0_dp) THEN
7823 IF (my_beta == 0.0_dp) THEN
7824 CALL pw_zero(pw2)
7825 ELSE
7826!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw2,my_beta)
7827 pw2%array = pw2%array*my_beta
7828!$OMP END PARALLEL WORKSHARE
7829 END IF
7830 END IF
7831
7832 IF (ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
7833 IF (my_alpha == 1.0_dp) THEN
7834!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2)
7835 pw2%array = pw2%array + pw1%array
7836!$OMP END PARALLEL WORKSHARE
7837 ELSE IF (my_alpha /= 0.0_dp) THEN
7838!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1, pw2, my_alpha)
7839 pw2%array = pw2%array + my_alpha* pw1%array
7840!$OMP END PARALLEL WORKSHARE
7841 END IF
7842
7843 ELSE IF (pw_compatible(pw1%pw_grid, pw2%pw_grid) .OR. my_allow_noncompatible_grids) THEN
7844
7845 IF (any(shape(pw1%array) /= shape(pw2%array))) &
7846 cpabort("Noncommensurate grids not implemented for 3D grids!")
7847
7848 IF (my_alpha == 1.0_dp) THEN
7849!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
7850 pw2%array = pw2%array + pw1%array
7851!$OMP END PARALLEL WORKSHARE
7852 ELSE
7853!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2,my_alpha)
7854 pw2%array = pw2%array + my_alpha* pw1%array
7855!$OMP END PARALLEL WORKSHARE
7856 END IF
7857
7858 ELSE
7859
7860 cpabort("Grids not compatible")
7861
7862 END IF
7863
7864 CALL timestop(handle)
7865
7866 END SUBROUTINE pw_axpy_c3d_c3d_gs
7867
7868! **************************************************************************************************
7869!> \brief pw_out = pw_out + alpha * pw1 * pw2
7870!> alpha defaults to 1
7871!> \param pw_out ...
7872!> \param pw1 ...
7873!> \param pw2 ...
7874!> \param alpha ...
7875!> \author JGH
7876! **************************************************************************************************
7877 SUBROUTINE pw_multiply_c3d_c3d_gs (pw_out, pw1, pw2, alpha)
7878
7879 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw_out
7880 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
7881 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw2
7882 REAL(KIND=dp), INTENT(IN), OPTIONAL :: alpha
7883
7884 CHARACTER(len=*), PARAMETER :: routineN = 'pw_multiply'
7885
7886 INTEGER :: handle
7887 REAL(KIND=dp) :: my_alpha
7888
7889 CALL timeset(routinen, handle)
7890
7891 my_alpha = 1.0_dp
7892 IF (PRESENT(alpha)) my_alpha = alpha
7893
7894 IF (.NOT. ASSOCIATED(pw_out%pw_grid, pw1%pw_grid) .OR. .NOT. ASSOCIATED(pw_out%pw_grid, pw2%pw_grid)) &
7895 cpabort("pw_multiply not implemented for non-identical grids!")
7896
7897 IF (my_alpha == 1.0_dp) THEN
7898!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw_out, pw1, pw2)
7899 pw_out%array = pw_out%array + pw1%array* pw2%array
7900!$OMP END PARALLEL WORKSHARE
7901 ELSE
7902!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(my_alpha, pw_out, pw1, pw2)
7903 pw_out%array = pw_out%array + my_alpha*pw1%array* pw2%array
7904!$OMP END PARALLEL WORKSHARE
7905 END IF
7906
7907 CALL timestop(handle)
7908
7909 END SUBROUTINE pw_multiply_c3d_c3d_gs
7910
7911! **************************************************************************************************
7912!> \brief ...
7913!> \param pw1 ...
7914!> \param pw2 ...
7915! **************************************************************************************************
7916 SUBROUTINE pw_multiply_with_c3d_c3d_gs (pw1, pw2)
7917 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw1
7918 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw2
7919
7920 CHARACTER(LEN=*), PARAMETER :: routineN = 'pw_multiply_with'
7921
7922 INTEGER :: handle
7923
7924 CALL timeset(routinen, handle)
7925
7926 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) &
7927 cpabort("Incompatible grids!")
7928
7929!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw1,pw2)
7930 pw1%array = pw1%array* pw2%array
7931!$OMP END PARALLEL WORKSHARE
7932
7933 CALL timestop(handle)
7934
7935 END SUBROUTINE pw_multiply_with_c3d_c3d_gs
7936
7937! **************************************************************************************************
7938!> \brief Calculate integral over unit cell for functions in plane wave basis
7939!> only returns the real part of it ......
7940!> \param pw1 ...
7941!> \param pw2 ...
7942!> \param sumtype ...
7943!> \param just_sum ...
7944!> \param local_only ...
7945!> \return ...
7946!> \par History
7947!> JGH (14-Mar-2001) : Parallel sum and some tests, HALFSPACE case
7948!> \author apsi
7949! **************************************************************************************************
7950 FUNCTION pw_integral_ab_c3d_c3d_gs (pw1, pw2, sumtype, just_sum, local_only) RESULT(integral_value)
7951
7952 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
7953 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw2
7954 INTEGER, INTENT(IN), OPTIONAL :: sumtype
7955 LOGICAL, INTENT(IN), OPTIONAL :: just_sum, local_only
7956 REAL(kind=dp) :: integral_value
7957
7958 CHARACTER(len=*), PARAMETER :: routinen = 'pw_integral_ab'
7959
7960 INTEGER :: handle, loc_sumtype
7961 LOGICAL :: my_just_sum, my_local_only
7962
7963 CALL timeset(routinen, handle)
7964
7965 loc_sumtype = do_accurate_sum
7966 IF (PRESENT(sumtype)) loc_sumtype = sumtype
7967
7968 my_local_only = .false.
7969 IF (PRESENT(local_only)) my_local_only = local_only
7970
7971 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
7972 cpabort("Grids incompatible")
7973 END IF
7974
7975 my_just_sum = .false.
7976 IF (PRESENT(just_sum)) my_just_sum = just_sum
7977
7978 ! do standard sum
7979 IF (loc_sumtype == do_standard_sum) THEN
7980
7981 ! Do standard sum
7982
7983 integral_value = sum(real(conjg(pw1%array) &
7984 *pw2%array, kind=dp)) !? complex bit
7985
7986 ELSE
7987
7988 ! Do accurate sum
7989 integral_value = accurate_sum(real(conjg(pw1%array) &
7990 *pw2%array, kind=dp)) !? complex bit
7991
7992 END IF
7993
7994 IF (.NOT. my_just_sum) THEN
7995 integral_value = integral_value*pw1%pw_grid%vol
7996 END IF
7997
7998
7999 IF (.NOT. my_local_only .AND. pw1%pw_grid%para%mode == pw_mode_distributed) &
8000 CALL pw1%pw_grid%para%group%sum(integral_value)
8001
8002 CALL timestop(handle)
8003
8004 END FUNCTION pw_integral_ab_c3d_c3d_gs
8005
8006
8007
8008
8009
8010
8011
8012
8013
8014
8015
8016
8017
8018! **************************************************************************************************
8019!> \brief Gathers the pw vector from a 3d data field
8020!> \param pw ...
8021!> \param c ...
8022!> \param scale ...
8023!> \par History
8024!> none
8025!> \author JGH
8026! **************************************************************************************************
8027 SUBROUTINE pw_gather_s_r1d_r3d_2(pw1, pw2, scale)
8028
8029 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw1
8030 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw2
8031 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
8032
8033 CALL pw_gather_s_r1d_r3d (pw2, pw1%array, scale)
8034
8035 END SUBROUTINE pw_gather_s_r1d_r3d_2
8036
8037! **************************************************************************************************
8038!> \brief Gathers the pw vector from a 3d data field
8039!> \param pw ...
8040!> \param c ...
8041!> \param scale ...
8042!> \par History
8043!> none
8044!> \author JGH
8045! **************************************************************************************************
8046 SUBROUTINE pw_gather_s_r1d_r3d (pw, c, scale)
8047
8048 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw
8049 REAL(kind=dp), DIMENSION(:, :, :), CONTIGUOUS, INTENT(IN) :: c
8050 REAL(kind=dp), INTENT(IN), OPTIONAL :: scale
8051
8052 CHARACTER(len=*), PARAMETER :: routinen = 'pw_gather_s'
8053
8054 INTEGER :: gpt, handle, l, m, n
8055
8056 CALL timeset(routinen, handle)
8057
8058 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
8059 ngpts => SIZE(pw%pw_grid%gsq), ghat => pw%pw_grid%g_hat)
8060
8061 IF (PRESENT(scale)) THEN
8062!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
8063 DO gpt = 1, ngpts
8064 l = mapl(ghat(1, gpt)) + 1
8065 m = mapm(ghat(2, gpt)) + 1
8066 n = mapn(ghat(3, gpt)) + 1
8067 pw%array(gpt) = scale* c(l, m, n)
8068 END DO
8069!$OMP END PARALLEL DO
8070 ELSE
8071!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
8072 DO gpt = 1, ngpts
8073 l = mapl(ghat(1, gpt)) + 1
8074 m = mapm(ghat(2, gpt)) + 1
8075 n = mapn(ghat(3, gpt)) + 1
8076 pw%array(gpt) = c(l, m, n)
8077 END DO
8078!$OMP END PARALLEL DO
8079 END IF
8080
8081 END associate
8082
8083 CALL timestop(handle)
8084
8085 END SUBROUTINE pw_gather_s_r1d_r3d
8086
8087! **************************************************************************************************
8088!> \brief Scatters a pw vector to a 3d data field
8089!> \param pw ...
8090!> \param c ...
8091!> \param scale ...
8092!> \par History
8093!> none
8094!> \author JGH
8095! **************************************************************************************************
8096 SUBROUTINE pw_scatter_s_r1d_r3d_2(pw1, pw2, scale)
8097
8098 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
8099 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw2
8100 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
8101
8102 CALL pw_scatter_s_r1d_r3d (pw1, pw2%array, scale)
8103
8104 END SUBROUTINE pw_scatter_s_r1d_r3d_2
8105
8106! **************************************************************************************************
8107!> \brief Scatters a pw vector to a 3d data field
8108!> \param pw ...
8109!> \param c ...
8110!> \param scale ...
8111!> \par History
8112!> none
8113!> \author JGH
8114! **************************************************************************************************
8115 SUBROUTINE pw_scatter_s_r1d_r3d (pw, c, scale)
8116
8117 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw
8118 REAL(kind=dp), DIMENSION(:, :, :), CONTIGUOUS, INTENT(INOUT) :: c
8119 REAL(kind=dp), INTENT(IN), OPTIONAL :: scale
8120
8121 CHARACTER(len=*), PARAMETER :: routinen = 'pw_scatter_s'
8122
8123 INTEGER :: gpt, handle, l, m, n
8124
8125 CALL timeset(routinen, handle)
8126
8127 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
8128 ghat => pw%pw_grid%g_hat, ngpts => SIZE(pw%pw_grid%gsq))
8129
8130 ! should only zero the unused bits (but the zero is needed)
8131 IF (.NOT. PRESENT(scale)) c = 0.0_dp
8132
8133 IF (PRESENT(scale)) THEN
8134!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
8135 DO gpt = 1, ngpts
8136 l = mapl(ghat(1, gpt)) + 1
8137 m = mapm(ghat(2, gpt)) + 1
8138 n = mapn(ghat(3, gpt)) + 1
8139 c(l, m, n) = scale* pw%array(gpt)
8140 END DO
8141!$OMP END PARALLEL DO
8142 ELSE
8143!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
8144 DO gpt = 1, ngpts
8145 l = mapl(ghat(1, gpt)) + 1
8146 m = mapm(ghat(2, gpt)) + 1
8147 n = mapn(ghat(3, gpt)) + 1
8148 c(l, m, n) = pw%array(gpt)
8149 END DO
8150!$OMP END PARALLEL DO
8151 END IF
8152
8153 END associate
8154
8155 IF (pw%pw_grid%grid_span == halfspace) THEN
8156
8157 associate(mapl => pw%pw_grid%mapl%neg, mapm => pw%pw_grid%mapm%neg, mapn => pw%pw_grid%mapn%neg, &
8158 ghat => pw%pw_grid%g_hat, ngpts => SIZE(pw%pw_grid%gsq))
8159
8160 IF (PRESENT(scale)) THEN
8161!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
8162 DO gpt = 1, ngpts
8163 l = mapl(ghat(1, gpt)) + 1
8164 m = mapm(ghat(2, gpt)) + 1
8165 n = mapn(ghat(3, gpt)) + 1
8166 c(l, m, n) = scale*( pw%array(gpt))
8167 END DO
8168!$OMP END PARALLEL DO
8169 ELSE
8170!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
8171 DO gpt = 1, ngpts
8172 l = mapl(ghat(1, gpt)) + 1
8173 m = mapm(ghat(2, gpt)) + 1
8174 n = mapn(ghat(3, gpt)) + 1
8175 c(l, m, n) = ( pw%array(gpt))
8176 END DO
8177!$OMP END PARALLEL DO
8178 END IF
8179
8180 END associate
8181
8182 END IF
8183
8184 CALL timestop(handle)
8185
8186 END SUBROUTINE pw_scatter_s_r1d_r3d
8187
8188
8189
8190
8191
8192
8193
8194
8195
8196
8197
8198! **************************************************************************************************
8199!> \brief Gathers the pw vector from a 3d data field
8200!> \param pw ...
8201!> \param c ...
8202!> \param scale ...
8203!> \par History
8204!> none
8205!> \author JGH
8206! **************************************************************************************************
8207 SUBROUTINE pw_gather_s_r1d_c3d_2(pw1, pw2, scale)
8208
8209 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
8210 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw2
8211 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
8212
8213 CALL pw_gather_s_r1d_c3d (pw2, pw1%array, scale)
8214
8215 END SUBROUTINE pw_gather_s_r1d_c3d_2
8216
8217! **************************************************************************************************
8218!> \brief Gathers the pw vector from a 3d data field
8219!> \param pw ...
8220!> \param c ...
8221!> \param scale ...
8222!> \par History
8223!> none
8224!> \author JGH
8225! **************************************************************************************************
8226 SUBROUTINE pw_gather_s_r1d_c3d (pw, c, scale)
8227
8228 TYPE(pw_r1d_gs_type), INTENT(INOUT) :: pw
8229 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, INTENT(IN) :: c
8230 REAL(kind=dp), INTENT(IN), OPTIONAL :: scale
8231
8232 CHARACTER(len=*), PARAMETER :: routinen = 'pw_gather_s'
8233
8234 INTEGER :: gpt, handle, l, m, n
8235
8236 CALL timeset(routinen, handle)
8237
8238 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
8239 ngpts => SIZE(pw%pw_grid%gsq), ghat => pw%pw_grid%g_hat)
8240
8241 IF (PRESENT(scale)) THEN
8242!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
8243 DO gpt = 1, ngpts
8244 l = mapl(ghat(1, gpt)) + 1
8245 m = mapm(ghat(2, gpt)) + 1
8246 n = mapn(ghat(3, gpt)) + 1
8247 pw%array(gpt) = scale* real(c(l, m, n), kind=dp)
8248 END DO
8249!$OMP END PARALLEL DO
8250 ELSE
8251!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
8252 DO gpt = 1, ngpts
8253 l = mapl(ghat(1, gpt)) + 1
8254 m = mapm(ghat(2, gpt)) + 1
8255 n = mapn(ghat(3, gpt)) + 1
8256 pw%array(gpt) = real(c(l, m, n), kind=dp)
8257 END DO
8258!$OMP END PARALLEL DO
8259 END IF
8260
8261 END associate
8262
8263 CALL timestop(handle)
8264
8265 END SUBROUTINE pw_gather_s_r1d_c3d
8266
8267! **************************************************************************************************
8268!> \brief Scatters a pw vector to a 3d data field
8269!> \param pw ...
8270!> \param c ...
8271!> \param scale ...
8272!> \par History
8273!> none
8274!> \author JGH
8275! **************************************************************************************************
8276 SUBROUTINE pw_scatter_s_r1d_c3d_2(pw1, pw2, scale)
8277
8278 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw1
8279 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw2
8280 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
8281
8282 CALL pw_scatter_s_r1d_c3d (pw1, pw2%array, scale)
8283
8284 END SUBROUTINE pw_scatter_s_r1d_c3d_2
8285
8286! **************************************************************************************************
8287!> \brief Scatters a pw vector to a 3d data field
8288!> \param pw ...
8289!> \param c ...
8290!> \param scale ...
8291!> \par History
8292!> none
8293!> \author JGH
8294! **************************************************************************************************
8295 SUBROUTINE pw_scatter_s_r1d_c3d (pw, c, scale)
8296
8297 TYPE(pw_r1d_gs_type), INTENT(IN) :: pw
8298 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, INTENT(INOUT) :: c
8299 REAL(kind=dp), INTENT(IN), OPTIONAL :: scale
8300
8301 CHARACTER(len=*), PARAMETER :: routinen = 'pw_scatter_s'
8302
8303 INTEGER :: gpt, handle, l, m, n
8304
8305 CALL timeset(routinen, handle)
8306
8307 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
8308 ghat => pw%pw_grid%g_hat, ngpts => SIZE(pw%pw_grid%gsq))
8309
8310 ! should only zero the unused bits (but the zero is needed)
8311 IF (.NOT. PRESENT(scale)) c = 0.0_dp
8312
8313 IF (PRESENT(scale)) THEN
8314!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
8315 DO gpt = 1, ngpts
8316 l = mapl(ghat(1, gpt)) + 1
8317 m = mapm(ghat(2, gpt)) + 1
8318 n = mapn(ghat(3, gpt)) + 1
8319 c(l, m, n) = scale* cmplx(pw%array(gpt), 0.0_dp, kind=dp)
8320 END DO
8321!$OMP END PARALLEL DO
8322 ELSE
8323!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
8324 DO gpt = 1, ngpts
8325 l = mapl(ghat(1, gpt)) + 1
8326 m = mapm(ghat(2, gpt)) + 1
8327 n = mapn(ghat(3, gpt)) + 1
8328 c(l, m, n) = cmplx(pw%array(gpt), 0.0_dp, kind=dp)
8329 END DO
8330!$OMP END PARALLEL DO
8331 END IF
8332
8333 END associate
8334
8335 IF (pw%pw_grid%grid_span == halfspace) THEN
8336
8337 associate(mapl => pw%pw_grid%mapl%neg, mapm => pw%pw_grid%mapm%neg, mapn => pw%pw_grid%mapn%neg, &
8338 ghat => pw%pw_grid%g_hat, ngpts => SIZE(pw%pw_grid%gsq))
8339
8340 IF (PRESENT(scale)) THEN
8341!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
8342 DO gpt = 1, ngpts
8343 l = mapl(ghat(1, gpt)) + 1
8344 m = mapm(ghat(2, gpt)) + 1
8345 n = mapn(ghat(3, gpt)) + 1
8346 c(l, m, n) = scale*( cmplx(pw%array(gpt), 0.0_dp, kind=dp))
8347 END DO
8348!$OMP END PARALLEL DO
8349 ELSE
8350!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
8351 DO gpt = 1, ngpts
8352 l = mapl(ghat(1, gpt)) + 1
8353 m = mapm(ghat(2, gpt)) + 1
8354 n = mapn(ghat(3, gpt)) + 1
8355 c(l, m, n) = ( cmplx(pw%array(gpt), 0.0_dp, kind=dp))
8356 END DO
8357!$OMP END PARALLEL DO
8358 END IF
8359
8360 END associate
8361
8362 END IF
8363
8364 CALL timestop(handle)
8365
8366 END SUBROUTINE pw_scatter_s_r1d_c3d
8367
8368
8369
8370
8371
8372
8373
8374
8375
8376
8377
8378
8379! **************************************************************************************************
8380!> \brief Generic function for 3d FFT of a coefficient_type or pw_r3d_rs_type
8381!> \param pw1 ...
8382!> \param pw2 ...
8383!> \param debug ...
8384!> \par History
8385!> JGH (30-12-2000): New setup of functions and adaptation to parallelism
8386!> JGH (04-01-2001): Moved routine from pws to this module, only covers
8387!> pw_types, no more coefficient types
8388!> \author apsi
8389!> \note
8390!> fft_wrap_pw1pw2
8391! **************************************************************************************************
8392 SUBROUTINE fft_wrap_pw1pw2_r3d_c1d_rs_gs (pw1, pw2, debug)
8393
8394 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw1
8395 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw2
8396 LOGICAL, INTENT(IN), OPTIONAL :: debug
8397
8398 CHARACTER(len=*), PARAMETER :: routineN = 'fft_wrap_pw1pw2'
8399
8400 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, POINTER :: grays
8401 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, POINTER :: c_in, c_out
8402 INTEGER :: handle, handle2, my_pos, nrays, &
8403 out_unit
8404 INTEGER, DIMENSION(3) :: nloc
8405#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_PW)
8406 LOGICAL :: use_pw_gpu
8407#endif
8408 INTEGER, DIMENSION(:), POINTER :: n
8409 LOGICAL :: test
8410 REAL(KIND=dp) :: norm
8411
8412 CALL timeset(routinen, handle2)
8413 out_unit = cp_logger_get_default_io_unit()
8414 CALL timeset(routinen//"_"//trim(adjustl(cp_to_string( &
8415 ceiling(pw1%pw_grid%cutoff/10)*10))), handle)
8416
8417 NULLIFY (c_in)
8418 NULLIFY (c_out)
8419
8420 IF (PRESENT(debug)) THEN
8421 test = debug
8422 ELSE
8423 test = .false.
8424 END IF
8425
8426 !..check if grids are compatible
8427 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
8428 IF (pw1%pw_grid%dvol /= pw2%pw_grid%dvol) THEN
8429 cpabort("PW grids not compatible")
8430 END IF
8431 IF (pw1%pw_grid%para%group /= pw2%pw_grid%para%group) THEN
8432 cpabort("PW grids have not compatible MPI groups")
8433 END IF
8434 END IF
8435
8436 !..prepare input
8437 norm = 1.0_dp/pw1%pw_grid%ngpts
8438
8439 n => pw1%pw_grid%npts
8440
8441 IF (pw1%pw_grid%para%mode == pw_mode_local) THEN
8442
8443 !
8444 !..replicated data, use local FFT
8445 !
8446
8447 IF (test .AND. out_unit > 0) THEN
8448 WRITE (out_unit, '(A)') " FFT Protocol "
8449 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "FWFFT"
8450 WRITE (out_unit, '(A,T72,A)') " in space ", "REALSPACE"
8451 WRITE (out_unit, '(A,T72,A)') " out space ", "REALSPACE"
8452 WRITE (out_unit, '(A,T66,E15.6)') " scale factor", norm
8453 END IF
8454
8455#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_PW)
8456 CALL pw_gpu_r3dc1d_3d(pw1, pw2, scale=norm)
8457#elif defined (__PW_FPGA)
8458 ALLOCATE (c_out(n(1), n(2), n(3)))
8459 ! check if bitstream for the fft size is present
8460 ! if not, perform fft3d in CPU
8461 IF (pw_fpga_init_bitstream(n) == 1) THEN
8462 CALL pw_copy_to_array(pw1, c_out)
8463#if (__PW_FPGA_SP && __PW_FPGA)
8464 CALL pw_fpga_r3dc1d_3d_sp(n, c_out)
8465#else
8466 CALL pw_fpga_r3dc1d_3d_dp(n, c_out)
8467#endif
8468 CALL zdscal(n(1)*n(2)*n(3), norm, c_out, 1)
8469 CALL pw_gather_s_c1d_c3d(pw2, c_out)
8470 ELSE
8471 CALL pw_copy_to_array(pw1, c_out)
8472 CALL fft3d(fwfft, n, c_out, scale=norm, debug=test)
8473 CALL pw_gather_s_c1d_c3d(pw2, c_out)
8474 END IF
8475 DEALLOCATE (c_out)
8476#else
8477 ALLOCATE (c_out(n(1), n(2), n(3)))
8478 c_out = 0.0_dp
8479 CALL pw_copy_to_array(pw1, c_out)
8480 CALL fft3d(fwfft, n, c_out, scale=norm, debug=test)
8481 CALL pw_gather_s_c1d_c3d(pw2, c_out)
8482 DEALLOCATE (c_out)
8483#endif
8484
8485 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " End of FFT Protocol "
8486
8487 ELSE
8488
8489 !
8490 !..parallel FFT
8491 !
8492
8493 IF (test .AND. pw1%pw_grid%para%group_head .AND. out_unit > 0) THEN
8494 WRITE (out_unit, '(A)') " FFT Protocol "
8495 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "FWFFT"
8496 WRITE (out_unit, '(A,T72,A)') " in space ", "REALSPACE"
8497 WRITE (out_unit, '(A,T72,A)') " out space ", "REALSPACE"
8498 WRITE (out_unit, '(A,T66,E15.6)') " scale factor", norm
8499 END IF
8500
8501 my_pos = pw1%pw_grid%para%my_pos
8502 nrays = pw1%pw_grid%para%nyzray(my_pos)
8503 grays => pw1%pw_grid%grays
8504
8505#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_PW)
8506 ! (no ray dist. is not efficient in CUDA)
8507 use_pw_gpu = pw1%pw_grid%para%ray_distribution
8508 IF (use_pw_gpu) THEN
8509 CALL pw_gpu_r3dc1d_3d_ps(pw1, pw2, scale=norm)
8510 ELSE
8511#endif
8512!.. prepare input
8513 nloc = pw1%pw_grid%npts_local
8514 ALLOCATE (c_in(nloc(1), nloc(2), nloc(3)))
8515 CALL pw_copy_to_array(pw1, c_in)
8516 grays = z_zero
8517 !..transform
8518 IF (pw1%pw_grid%para%ray_distribution) THEN
8519 CALL fft3d(fwfft, n, c_in, grays, pw1%pw_grid%para%group, &
8520 pw1%pw_grid%para%rs_group, &
8521 pw1%pw_grid%para%yzp, pw1%pw_grid%para%nyzray, &
8522 pw1%pw_grid%para%bo, scale=norm, debug=test)
8523 ELSE
8524 CALL fft3d(fwfft, n, c_in, grays, pw1%pw_grid%para%rs_group, &
8525 pw1%pw_grid%para%bo, scale=norm, debug=test)
8526 END IF
8527 !..prepare output
8528 IF (test .AND. pw1%pw_grid%para%group_head .AND. out_unit > 0) &
8529 WRITE (out_unit, '(A)') " PW_GATHER : 2d -> 1d "
8530 CALL pw_gather_p_c1d (pw2, grays)
8531 DEALLOCATE (c_in)
8532
8533#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_PW)
8534 END IF
8535#endif
8536 END IF
8537
8538 IF (test .AND. pw1%pw_grid%para%group_head .AND. out_unit > 0) THEN
8539 WRITE (out_unit, '(A)') " End of FFT Protocol "
8540 END IF
8541
8542 CALL timestop(handle)
8543 CALL timestop(handle2)
8544
8545 END SUBROUTINE fft_wrap_pw1pw2_r3d_c1d_rs_gs
8546
8547
8548
8549
8550
8551! **************************************************************************************************
8552!> \brief Generic function for 3d FFT of a coefficient_type or pw_r3d_rs_type
8553!> \param pw1 ...
8554!> \param pw2 ...
8555!> \param debug ...
8556!> \par History
8557!> JGH (30-12-2000): New setup of functions and adaptation to parallelism
8558!> JGH (04-01-2001): Moved routine from pws to this module, only covers
8559!> pw_types, no more coefficient types
8560!> \author apsi
8561!> \note
8562!> fft_wrap_pw1pw2
8563! **************************************************************************************************
8564 SUBROUTINE fft_wrap_pw1pw2_r3d_c3d_rs_gs (pw1, pw2, debug)
8565
8566 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw1
8567 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw2
8568 LOGICAL, INTENT(IN), OPTIONAL :: debug
8569
8570 CHARACTER(len=*), PARAMETER :: routineN = 'fft_wrap_pw1pw2'
8571
8572 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, POINTER :: grays
8573 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, POINTER :: c_in, c_out
8574 INTEGER :: handle, handle2, my_pos, nrays, &
8575 out_unit
8576 INTEGER, DIMENSION(:), POINTER :: n
8577 LOGICAL :: test
8578 REAL(KIND=dp) :: norm
8579
8580 CALL timeset(routinen, handle2)
8581 out_unit = cp_logger_get_default_io_unit()
8582 CALL timeset(routinen//"_"//trim(adjustl(cp_to_string( &
8583 ceiling(pw1%pw_grid%cutoff/10)*10))), handle)
8584
8585 NULLIFY (c_in)
8586 NULLIFY (c_out)
8587
8588 IF (PRESENT(debug)) THEN
8589 test = debug
8590 ELSE
8591 test = .false.
8592 END IF
8593
8594 !..check if grids are compatible
8595 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
8596 IF (pw1%pw_grid%dvol /= pw2%pw_grid%dvol) THEN
8597 cpabort("PW grids not compatible")
8598 END IF
8599 IF (pw1%pw_grid%para%group /= pw2%pw_grid%para%group) THEN
8600 cpabort("PW grids have not compatible MPI groups")
8601 END IF
8602 END IF
8603
8604 !..prepare input
8605 norm = 1.0_dp/pw1%pw_grid%ngpts
8606
8607 n => pw1%pw_grid%npts
8608
8609 IF (pw1%pw_grid%para%mode == pw_mode_local) THEN
8610
8611 !
8612 !..replicated data, use local FFT
8613 !
8614
8615 IF (test .AND. out_unit > 0) THEN
8616 WRITE (out_unit, '(A)') " FFT Protocol "
8617 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "FWFFT"
8618 WRITE (out_unit, '(A,T72,A)') " in space ", "REALSPACE"
8619 WRITE (out_unit, '(A,T72,A)') " out space ", "REALSPACE"
8620 WRITE (out_unit, '(A,T66,E15.6)') " scale factor", norm
8621 END IF
8622
8623 pw2%array = cmplx(pw1%array, 0.0_dp, kind=dp)
8624 c_out => pw2%array
8625 CALL fft3d(fwfft, n, c_out, scale=norm, debug=test)
8626
8627 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " End of FFT Protocol "
8628
8629 ELSE
8630
8631 !
8632 !..parallel FFT
8633 !
8634
8635 IF (test .AND. pw1%pw_grid%para%group_head .AND. out_unit > 0) THEN
8636 WRITE (out_unit, '(A)') " FFT Protocol "
8637 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "FWFFT"
8638 WRITE (out_unit, '(A,T72,A)') " in space ", "REALSPACE"
8639 WRITE (out_unit, '(A,T72,A)') " out space ", "REALSPACE"
8640 WRITE (out_unit, '(A,T66,E15.6)') " scale factor", norm
8641 END IF
8642
8643 my_pos = pw1%pw_grid%para%my_pos
8644 nrays = pw1%pw_grid%para%nyzray(my_pos)
8645 grays => pw1%pw_grid%grays
8646
8647 END IF
8648
8649 IF (test .AND. pw1%pw_grid%para%group_head .AND. out_unit > 0) THEN
8650 WRITE (out_unit, '(A)') " End of FFT Protocol "
8651 END IF
8652
8653 CALL timestop(handle)
8654 CALL timestop(handle2)
8655
8656 END SUBROUTINE fft_wrap_pw1pw2_r3d_c3d_rs_gs
8657
8658
8659
8660
8661
8662
8663
8664
8665
8666
8667
8668! **************************************************************************************************
8669!> \brief Generic function for 3d FFT of a coefficient_type or pw_r3d_rs_type
8670!> \param pw1 ...
8671!> \param pw2 ...
8672!> \param debug ...
8673!> \par History
8674!> JGH (30-12-2000): New setup of functions and adaptation to parallelism
8675!> JGH (04-01-2001): Moved routine from pws to this module, only covers
8676!> pw_types, no more coefficient types
8677!> \author apsi
8678!> \note
8679!> fft_wrap_pw1pw2
8680! **************************************************************************************************
8681 SUBROUTINE fft_wrap_pw1pw2_c1d_r3d_gs_rs (pw1, pw2, debug)
8682
8683 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
8684 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw2
8685 LOGICAL, INTENT(IN), OPTIONAL :: debug
8686
8687 CHARACTER(len=*), PARAMETER :: routineN = 'fft_wrap_pw1pw2'
8688
8689 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, POINTER :: grays
8690 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, POINTER :: c_in, c_out
8691 INTEGER :: handle, handle2, my_pos, nrays, &
8692 out_unit
8693 INTEGER, DIMENSION(3) :: nloc
8694#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_PW)
8695 LOGICAL :: use_pw_gpu
8696#endif
8697 INTEGER, DIMENSION(:), POINTER :: n
8698 LOGICAL :: test
8699 REAL(KIND=dp) :: norm
8700
8701 CALL timeset(routinen, handle2)
8702 out_unit = cp_logger_get_default_io_unit()
8703 CALL timeset(routinen//"_"//trim(adjustl(cp_to_string( &
8704 ceiling(pw1%pw_grid%cutoff/10)*10))), handle)
8705
8706 NULLIFY (c_in)
8707 NULLIFY (c_out)
8708
8709 IF (PRESENT(debug)) THEN
8710 test = debug
8711 ELSE
8712 test = .false.
8713 END IF
8714
8715 !..check if grids are compatible
8716 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
8717 IF (pw1%pw_grid%dvol /= pw2%pw_grid%dvol) THEN
8718 cpabort("PW grids not compatible")
8719 END IF
8720 IF (pw1%pw_grid%para%group /= pw2%pw_grid%para%group) THEN
8721 cpabort("PW grids have not compatible MPI groups")
8722 END IF
8723 END IF
8724
8725 !..prepare input
8726 norm = 1.0_dp
8727
8728 n => pw1%pw_grid%npts
8729
8730 IF (pw1%pw_grid%para%mode == pw_mode_local) THEN
8731
8732 !
8733 !..replicated data, use local FFT
8734 !
8735
8736 IF (test .AND. out_unit > 0) THEN
8737 WRITE (out_unit, '(A)') " FFT Protocol "
8738 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "BWFFT"
8739 WRITE (out_unit, '(A,T66,A)') " in space ", "RECIPROCALSPACE"
8740 WRITE (out_unit, '(A,T66,A)') " out space ", "RECIPROCALSPACE"
8741 WRITE (out_unit, '(A,T66,E15.6)') " scale factor", norm
8742 END IF
8743
8744#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_PW)
8745 CALL pw_gpu_c1dr3d_3d(pw1, pw2, scale=norm)
8746#elif defined (__PW_FPGA)
8747 ALLOCATE (c_out(n(1), n(2), n(3)))
8748 ! check if bitstream for the fft size is present
8749 ! if not, perform fft3d in CPU
8750 IF (pw_fpga_init_bitstream(n) == 1) THEN
8751 CALL pw_scatter_s_c1d_c3d(pw1, c_out)
8752 ! transform using FPGA
8753#if (__PW_FPGA_SP && __PW_FPGA)
8754 CALL pw_fpga_c1dr3d_3d_sp(n, c_out)
8755#else
8756 CALL pw_fpga_c1dr3d_3d_dp(n, c_out)
8757#endif
8758 CALL zdscal(n(1)*n(2)*n(3), norm, c_out, 1)
8759 ! use real part only
8760 CALL pw_copy_from_array(pw2, c_out)
8761 ELSE
8762 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " PW_SCATTER : 3d -> 1d "
8763 CALL pw_scatter_s_c1d_c3d(pw1, c_out)
8764 ! transform
8765 CALL fft3d(bwfft, n, c_out, scale=norm, debug=test)
8766 ! use real part only
8767 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " REAL part "
8768 CALL pw_copy_from_array(pw2, c_out)
8769 END IF
8770 DEALLOCATE (c_out)
8771#else
8772 ALLOCATE (c_out(n(1), n(2), n(3)))
8773 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " PW_SCATTER : 3d -> 1d "
8774 CALL pw_scatter_s_c1d_c3d(pw1, c_out)
8775 ! transform
8776 CALL fft3d(bwfft, n, c_out, scale=norm, debug=test)
8777 ! use real part only
8778 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " REAL part "
8779 CALL pw_copy_from_array(pw2, c_out)
8780 DEALLOCATE (c_out)
8781#endif
8782
8783 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " End of FFT Protocol "
8784
8785 ELSE
8786
8787 !
8788 !..parallel FFT
8789 !
8790
8791 IF (test .AND. pw1%pw_grid%para%group_head .AND. out_unit > 0) THEN
8792 WRITE (out_unit, '(A)') " FFT Protocol "
8793 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "BWFFT"
8794 WRITE (out_unit, '(A,T66,A)') " in space ", "RECIPROCALSPACE"
8795 WRITE (out_unit, '(A,T66,A)') " out space ", "RECIPROCALSPACE"
8796 WRITE (out_unit, '(A,T66,E15.6)') " scale factor", norm
8797 END IF
8798
8799 my_pos = pw1%pw_grid%para%my_pos
8800 nrays = pw1%pw_grid%para%nyzray(my_pos)
8801 grays => pw1%pw_grid%grays
8802
8803#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_PW)
8804 ! (no ray dist. is not efficient in CUDA)
8805 use_pw_gpu = pw1%pw_grid%para%ray_distribution
8806 IF (use_pw_gpu) THEN
8807 CALL pw_gpu_c1dr3d_3d_ps(pw1, pw2, scale=norm)
8808 ELSE
8809#endif
8810!.. prepare input
8811 IF (test .AND. pw1%pw_grid%para%group_head .AND. out_unit > 0) &
8812 WRITE (out_unit, '(A)') " PW_SCATTER : 2d -> 1d "
8813 grays = z_zero
8814 CALL pw_scatter_p_c1d (pw1, grays)
8815 nloc = pw2%pw_grid%npts_local
8816 ALLOCATE (c_in(nloc(1), nloc(2), nloc(3)))
8817 !..transform
8818 IF (pw1%pw_grid%para%ray_distribution) THEN
8819 CALL fft3d(bwfft, n, c_in, grays, pw1%pw_grid%para%group, &
8820 pw1%pw_grid%para%rs_group, &
8821 pw1%pw_grid%para%yzp, pw1%pw_grid%para%nyzray, &
8822 pw1%pw_grid%para%bo, scale=norm, debug=test)
8823 ELSE
8824 CALL fft3d(bwfft, n, c_in, grays, pw1%pw_grid%para%rs_group, &
8825 pw1%pw_grid%para%bo, scale=norm, debug=test)
8826 END IF
8827 !..prepare output
8828 IF (test .AND. pw1%pw_grid%para%group_head .AND. out_unit > 0) &
8829 WRITE (out_unit, '(A)') " Real part "
8830 CALL pw_copy_from_array(pw2, c_in)
8831 DEALLOCATE (c_in)
8832#if defined(__OFFLOAD) && !defined(__NO_OFFLOAD_PW)
8833 END IF
8834#endif
8835 END IF
8836
8837 IF (test .AND. pw1%pw_grid%para%group_head .AND. out_unit > 0) THEN
8838 WRITE (out_unit, '(A)') " End of FFT Protocol "
8839 END IF
8840
8841 CALL timestop(handle)
8842 CALL timestop(handle2)
8843
8844 END SUBROUTINE fft_wrap_pw1pw2_c1d_r3d_gs_rs
8845
8846
8847
8848! **************************************************************************************************
8849!> \brief Gathers the pw vector from a 3d data field
8850!> \param pw ...
8851!> \param c ...
8852!> \param scale ...
8853!> \par History
8854!> none
8855!> \author JGH
8856! **************************************************************************************************
8857 SUBROUTINE pw_gather_s_c1d_r3d_2(pw1, pw2, scale)
8858
8859 TYPE(pw_r3d_gs_type), INTENT(IN) :: pw1
8860 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw2
8861 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
8862
8863 CALL pw_gather_s_c1d_r3d (pw2, pw1%array, scale)
8864
8865 END SUBROUTINE pw_gather_s_c1d_r3d_2
8866
8867! **************************************************************************************************
8868!> \brief Gathers the pw vector from a 3d data field
8869!> \param pw ...
8870!> \param c ...
8871!> \param scale ...
8872!> \par History
8873!> none
8874!> \author JGH
8875! **************************************************************************************************
8876 SUBROUTINE pw_gather_s_c1d_r3d (pw, c, scale)
8877
8878 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
8879 REAL(kind=dp), DIMENSION(:, :, :), CONTIGUOUS, INTENT(IN) :: c
8880 REAL(kind=dp), INTENT(IN), OPTIONAL :: scale
8881
8882 CHARACTER(len=*), PARAMETER :: routinen = 'pw_gather_s'
8883
8884 INTEGER :: gpt, handle, l, m, n
8885
8886 CALL timeset(routinen, handle)
8887
8888 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
8889 ngpts => SIZE(pw%pw_grid%gsq), ghat => pw%pw_grid%g_hat)
8890
8891 IF (PRESENT(scale)) THEN
8892!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
8893 DO gpt = 1, ngpts
8894 l = mapl(ghat(1, gpt)) + 1
8895 m = mapm(ghat(2, gpt)) + 1
8896 n = mapn(ghat(3, gpt)) + 1
8897 pw%array(gpt) = scale* cmplx(c(l, m, n), 0.0_dp, kind=dp)
8898 END DO
8899!$OMP END PARALLEL DO
8900 ELSE
8901!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
8902 DO gpt = 1, ngpts
8903 l = mapl(ghat(1, gpt)) + 1
8904 m = mapm(ghat(2, gpt)) + 1
8905 n = mapn(ghat(3, gpt)) + 1
8906 pw%array(gpt) = cmplx(c(l, m, n), 0.0_dp, kind=dp)
8907 END DO
8908!$OMP END PARALLEL DO
8909 END IF
8910
8911 END associate
8912
8913 CALL timestop(handle)
8914
8915 END SUBROUTINE pw_gather_s_c1d_r3d
8916
8917! **************************************************************************************************
8918!> \brief Scatters a pw vector to a 3d data field
8919!> \param pw ...
8920!> \param c ...
8921!> \param scale ...
8922!> \par History
8923!> none
8924!> \author JGH
8925! **************************************************************************************************
8926 SUBROUTINE pw_scatter_s_c1d_r3d_2(pw1, pw2, scale)
8927
8928 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
8929 TYPE(pw_r3d_gs_type), INTENT(INOUT) :: pw2
8930 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
8931
8932 CALL pw_scatter_s_c1d_r3d (pw1, pw2%array, scale)
8933
8934 END SUBROUTINE pw_scatter_s_c1d_r3d_2
8935
8936! **************************************************************************************************
8937!> \brief Scatters a pw vector to a 3d data field
8938!> \param pw ...
8939!> \param c ...
8940!> \param scale ...
8941!> \par History
8942!> none
8943!> \author JGH
8944! **************************************************************************************************
8945 SUBROUTINE pw_scatter_s_c1d_r3d (pw, c, scale)
8946
8947 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
8948 REAL(kind=dp), DIMENSION(:, :, :), CONTIGUOUS, INTENT(INOUT) :: c
8949 REAL(kind=dp), INTENT(IN), OPTIONAL :: scale
8950
8951 CHARACTER(len=*), PARAMETER :: routinen = 'pw_scatter_s'
8952
8953 INTEGER :: gpt, handle, l, m, n
8954
8955 CALL timeset(routinen, handle)
8956
8957 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
8958 ghat => pw%pw_grid%g_hat, ngpts => SIZE(pw%pw_grid%gsq))
8959
8960 ! should only zero the unused bits (but the zero is needed)
8961 IF (.NOT. PRESENT(scale)) c = 0.0_dp
8962
8963 IF (PRESENT(scale)) THEN
8964!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
8965 DO gpt = 1, ngpts
8966 l = mapl(ghat(1, gpt)) + 1
8967 m = mapm(ghat(2, gpt)) + 1
8968 n = mapn(ghat(3, gpt)) + 1
8969 c(l, m, n) = scale* real(pw%array(gpt), kind=dp)
8970 END DO
8971!$OMP END PARALLEL DO
8972 ELSE
8973!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
8974 DO gpt = 1, ngpts
8975 l = mapl(ghat(1, gpt)) + 1
8976 m = mapm(ghat(2, gpt)) + 1
8977 n = mapn(ghat(3, gpt)) + 1
8978 c(l, m, n) = real(pw%array(gpt), kind=dp)
8979 END DO
8980!$OMP END PARALLEL DO
8981 END IF
8982
8983 END associate
8984
8985 IF (pw%pw_grid%grid_span == halfspace) THEN
8986
8987 associate(mapl => pw%pw_grid%mapl%neg, mapm => pw%pw_grid%mapm%neg, mapn => pw%pw_grid%mapn%neg, &
8988 ghat => pw%pw_grid%g_hat, ngpts => SIZE(pw%pw_grid%gsq))
8989
8990 IF (PRESENT(scale)) THEN
8991!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
8992 DO gpt = 1, ngpts
8993 l = mapl(ghat(1, gpt)) + 1
8994 m = mapm(ghat(2, gpt)) + 1
8995 n = mapn(ghat(3, gpt)) + 1
8996 c(l, m, n) = scale*( real(pw%array(gpt), kind=dp))
8997 END DO
8998!$OMP END PARALLEL DO
8999 ELSE
9000!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
9001 DO gpt = 1, ngpts
9002 l = mapl(ghat(1, gpt)) + 1
9003 m = mapm(ghat(2, gpt)) + 1
9004 n = mapn(ghat(3, gpt)) + 1
9005 c(l, m, n) = ( real(pw%array(gpt), kind=dp))
9006 END DO
9007!$OMP END PARALLEL DO
9008 END IF
9009
9010 END associate
9011
9012 END IF
9013
9014 CALL timestop(handle)
9015
9016 END SUBROUTINE pw_scatter_s_c1d_r3d
9017
9018
9019
9020
9021
9022
9023
9024
9025! **************************************************************************************************
9026!> \brief Generic function for 3d FFT of a coefficient_type or pw_r3d_rs_type
9027!> \param pw1 ...
9028!> \param pw2 ...
9029!> \param debug ...
9030!> \par History
9031!> JGH (30-12-2000): New setup of functions and adaptation to parallelism
9032!> JGH (04-01-2001): Moved routine from pws to this module, only covers
9033!> pw_types, no more coefficient types
9034!> \author apsi
9035!> \note
9036!> fft_wrap_pw1pw2
9037! **************************************************************************************************
9038 SUBROUTINE fft_wrap_pw1pw2_c1d_c3d_gs_rs (pw1, pw2, debug)
9039
9040 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
9041 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw2
9042 LOGICAL, INTENT(IN), OPTIONAL :: debug
9043
9044 CHARACTER(len=*), PARAMETER :: routineN = 'fft_wrap_pw1pw2'
9045
9046 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, POINTER :: grays
9047 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, POINTER :: c_in, c_out
9048 INTEGER :: handle, handle2, my_pos, nrays, &
9049 out_unit
9050 INTEGER, DIMENSION(:), POINTER :: n
9051 LOGICAL :: test
9052 REAL(KIND=dp) :: norm
9053
9054 CALL timeset(routinen, handle2)
9055 out_unit = cp_logger_get_default_io_unit()
9056 CALL timeset(routinen//"_"//trim(adjustl(cp_to_string( &
9057 ceiling(pw1%pw_grid%cutoff/10)*10))), handle)
9058
9059 NULLIFY (c_in)
9060 NULLIFY (c_out)
9061
9062 IF (PRESENT(debug)) THEN
9063 test = debug
9064 ELSE
9065 test = .false.
9066 END IF
9067
9068 !..check if grids are compatible
9069 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
9070 IF (pw1%pw_grid%dvol /= pw2%pw_grid%dvol) THEN
9071 cpabort("PW grids not compatible")
9072 END IF
9073 IF (pw1%pw_grid%para%group /= pw2%pw_grid%para%group) THEN
9074 cpabort("PW grids have not compatible MPI groups")
9075 END IF
9076 END IF
9077
9078 !..prepare input
9079 norm = 1.0_dp
9080
9081 n => pw1%pw_grid%npts
9082
9083 IF (pw1%pw_grid%para%mode == pw_mode_local) THEN
9084
9085 !
9086 !..replicated data, use local FFT
9087 !
9088
9089 IF (test .AND. out_unit > 0) THEN
9090 WRITE (out_unit, '(A)') " FFT Protocol "
9091 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "BWFFT"
9092 WRITE (out_unit, '(A,T66,A)') " in space ", "RECIPROCALSPACE"
9093 WRITE (out_unit, '(A,T66,A)') " out space ", "RECIPROCALSPACE"
9094 WRITE (out_unit, '(A,T66,E15.6)') " scale factor", norm
9095 END IF
9096
9097 c_out => pw2%array
9098 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " PW_SCATTER : 3d -> 1d "
9099 CALL pw_scatter_s_c1d_c3d(pw1, c_out)
9100 CALL fft3d(bwfft, n, c_out, scale=norm, debug=test)
9101
9102 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " End of FFT Protocol "
9103
9104 ELSE
9105
9106 !
9107 !..parallel FFT
9108 !
9109
9110 IF (test .AND. pw1%pw_grid%para%group_head .AND. out_unit > 0) THEN
9111 WRITE (out_unit, '(A)') " FFT Protocol "
9112 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "BWFFT"
9113 WRITE (out_unit, '(A,T66,A)') " in space ", "RECIPROCALSPACE"
9114 WRITE (out_unit, '(A,T66,A)') " out space ", "RECIPROCALSPACE"
9115 WRITE (out_unit, '(A,T66,E15.6)') " scale factor", norm
9116 END IF
9117
9118 my_pos = pw1%pw_grid%para%my_pos
9119 nrays = pw1%pw_grid%para%nyzray(my_pos)
9120 grays => pw1%pw_grid%grays
9121
9122 !..prepare input
9123 IF (test .AND. pw1%pw_grid%para%group_head .AND. out_unit > 0) &
9124 WRITE (out_unit, '(A)') " PW_SCATTER : 2d -> 1d "
9125 grays = z_zero
9126 CALL pw_scatter_p_c1d (pw1, grays)
9127 c_in => pw2%array
9128 !..transform
9129 IF (pw1%pw_grid%para%ray_distribution) THEN
9130 CALL fft3d(bwfft, n, c_in, grays, pw1%pw_grid%para%group, &
9131 pw1%pw_grid%para%rs_group, &
9132 pw1%pw_grid%para%yzp, pw1%pw_grid%para%nyzray, &
9133 pw1%pw_grid%para%bo, scale=norm, debug=test)
9134 ELSE
9135 CALL fft3d(bwfft, n, c_in, grays, pw1%pw_grid%para%rs_group, &
9136 pw1%pw_grid%para%bo, scale=norm, debug=test)
9137 END IF
9138 !..prepare output (nothing to do)
9139 END IF
9140
9141 IF (test .AND. pw1%pw_grid%para%group_head .AND. out_unit > 0) THEN
9142 WRITE (out_unit, '(A)') " End of FFT Protocol "
9143 END IF
9144
9145 CALL timestop(handle)
9146 CALL timestop(handle2)
9147
9148 END SUBROUTINE fft_wrap_pw1pw2_c1d_c3d_gs_rs
9149
9150
9151
9152! **************************************************************************************************
9153!> \brief Gathers the pw vector from a 3d data field
9154!> \param pw ...
9155!> \param c ...
9156!> \param scale ...
9157!> \par History
9158!> none
9159!> \author JGH
9160! **************************************************************************************************
9161 SUBROUTINE pw_gather_s_c1d_c3d_2(pw1, pw2, scale)
9162
9163 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
9164 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw2
9165 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
9166
9167 CALL pw_gather_s_c1d_c3d (pw2, pw1%array, scale)
9168
9169 END SUBROUTINE pw_gather_s_c1d_c3d_2
9170
9171! **************************************************************************************************
9172!> \brief Gathers the pw vector from a 3d data field
9173!> \param pw ...
9174!> \param c ...
9175!> \param scale ...
9176!> \par History
9177!> none
9178!> \author JGH
9179! **************************************************************************************************
9180 SUBROUTINE pw_gather_s_c1d_c3d (pw, c, scale)
9181
9182 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
9183 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, INTENT(IN) :: c
9184 REAL(kind=dp), INTENT(IN), OPTIONAL :: scale
9185
9186 CHARACTER(len=*), PARAMETER :: routinen = 'pw_gather_s'
9187
9188 INTEGER :: gpt, handle, l, m, n
9189
9190 CALL timeset(routinen, handle)
9191
9192 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
9193 ngpts => SIZE(pw%pw_grid%gsq), ghat => pw%pw_grid%g_hat)
9194
9195 IF (PRESENT(scale)) THEN
9196!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
9197 DO gpt = 1, ngpts
9198 l = mapl(ghat(1, gpt)) + 1
9199 m = mapm(ghat(2, gpt)) + 1
9200 n = mapn(ghat(3, gpt)) + 1
9201 pw%array(gpt) = scale* c(l, m, n)
9202 END DO
9203!$OMP END PARALLEL DO
9204 ELSE
9205!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
9206 DO gpt = 1, ngpts
9207 l = mapl(ghat(1, gpt)) + 1
9208 m = mapm(ghat(2, gpt)) + 1
9209 n = mapn(ghat(3, gpt)) + 1
9210 pw%array(gpt) = c(l, m, n)
9211 END DO
9212!$OMP END PARALLEL DO
9213 END IF
9214
9215 END associate
9216
9217 CALL timestop(handle)
9218
9219 END SUBROUTINE pw_gather_s_c1d_c3d
9220
9221! **************************************************************************************************
9222!> \brief Scatters a pw vector to a 3d data field
9223!> \param pw ...
9224!> \param c ...
9225!> \param scale ...
9226!> \par History
9227!> none
9228!> \author JGH
9229! **************************************************************************************************
9230 SUBROUTINE pw_scatter_s_c1d_c3d_2(pw1, pw2, scale)
9231
9232 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw1
9233 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw2
9234 REAL(KIND=dp), INTENT(IN), OPTIONAL :: scale
9235
9236 CALL pw_scatter_s_c1d_c3d (pw1, pw2%array, scale)
9237
9238 END SUBROUTINE pw_scatter_s_c1d_c3d_2
9239
9240! **************************************************************************************************
9241!> \brief Scatters a pw vector to a 3d data field
9242!> \param pw ...
9243!> \param c ...
9244!> \param scale ...
9245!> \par History
9246!> none
9247!> \author JGH
9248! **************************************************************************************************
9249 SUBROUTINE pw_scatter_s_c1d_c3d (pw, c, scale)
9250
9251 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
9252 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, INTENT(INOUT) :: c
9253 REAL(kind=dp), INTENT(IN), OPTIONAL :: scale
9254
9255 CHARACTER(len=*), PARAMETER :: routinen = 'pw_scatter_s'
9256
9257 INTEGER :: gpt, handle, l, m, n
9258
9259 CALL timeset(routinen, handle)
9260
9261 associate(mapl => pw%pw_grid%mapl%pos, mapm => pw%pw_grid%mapm%pos, mapn => pw%pw_grid%mapn%pos, &
9262 ghat => pw%pw_grid%g_hat, ngpts => SIZE(pw%pw_grid%gsq))
9263
9264 ! should only zero the unused bits (but the zero is needed)
9265 IF (.NOT. PRESENT(scale)) c = 0.0_dp
9266
9267 IF (PRESENT(scale)) THEN
9268!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
9269 DO gpt = 1, ngpts
9270 l = mapl(ghat(1, gpt)) + 1
9271 m = mapm(ghat(2, gpt)) + 1
9272 n = mapn(ghat(3, gpt)) + 1
9273 c(l, m, n) = scale* pw%array(gpt)
9274 END DO
9275!$OMP END PARALLEL DO
9276 ELSE
9277!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
9278 DO gpt = 1, ngpts
9279 l = mapl(ghat(1, gpt)) + 1
9280 m = mapm(ghat(2, gpt)) + 1
9281 n = mapn(ghat(3, gpt)) + 1
9282 c(l, m, n) = pw%array(gpt)
9283 END DO
9284!$OMP END PARALLEL DO
9285 END IF
9286
9287 END associate
9288
9289 IF (pw%pw_grid%grid_span == halfspace) THEN
9290
9291 associate(mapl => pw%pw_grid%mapl%neg, mapm => pw%pw_grid%mapm%neg, mapn => pw%pw_grid%mapn%neg, &
9292 ghat => pw%pw_grid%g_hat, ngpts => SIZE(pw%pw_grid%gsq))
9293
9294 IF (PRESENT(scale)) THEN
9295!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw, scale)
9296 DO gpt = 1, ngpts
9297 l = mapl(ghat(1, gpt)) + 1
9298 m = mapm(ghat(2, gpt)) + 1
9299 n = mapn(ghat(3, gpt)) + 1
9300 c(l, m, n) = scale*conjg( pw%array(gpt))
9301 END DO
9302!$OMP END PARALLEL DO
9303 ELSE
9304!$OMP PARALLEL DO PRIVATE(gpt, l, m, n) DEFAULT(NONE) SHARED(c, pw)
9305 DO gpt = 1, ngpts
9306 l = mapl(ghat(1, gpt)) + 1
9307 m = mapm(ghat(2, gpt)) + 1
9308 n = mapn(ghat(3, gpt)) + 1
9309 c(l, m, n) = conjg( pw%array(gpt))
9310 END DO
9311!$OMP END PARALLEL DO
9312 END IF
9313
9314 END associate
9315
9316 END IF
9317
9318 CALL timestop(handle)
9319
9320 END SUBROUTINE pw_scatter_s_c1d_c3d
9321
9322
9323
9324
9325
9326
9327
9328
9329! **************************************************************************************************
9330!> \brief Generic function for 3d FFT of a coefficient_type or pw_r3d_rs_type
9331!> \param pw1 ...
9332!> \param pw2 ...
9333!> \param debug ...
9334!> \par History
9335!> JGH (30-12-2000): New setup of functions and adaptation to parallelism
9336!> JGH (04-01-2001): Moved routine from pws to this module, only covers
9337!> pw_types, no more coefficient types
9338!> \author apsi
9339!> \note
9340!> fft_wrap_pw1pw2
9341! **************************************************************************************************
9342 SUBROUTINE fft_wrap_pw1pw2_c3d_r3d_gs_rs (pw1, pw2, debug)
9343
9344 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
9345 TYPE(pw_r3d_rs_type), INTENT(INOUT) :: pw2
9346 LOGICAL, INTENT(IN), OPTIONAL :: debug
9347
9348 CHARACTER(len=*), PARAMETER :: routineN = 'fft_wrap_pw1pw2'
9349
9350 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, POINTER :: grays
9351 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, POINTER :: c_in, c_out
9352 INTEGER :: handle, handle2, my_pos, nrays, &
9353 out_unit
9354 INTEGER, DIMENSION(:), POINTER :: n
9355 LOGICAL :: test
9356 REAL(KIND=dp) :: norm
9357
9358 CALL timeset(routinen, handle2)
9359 out_unit = cp_logger_get_default_io_unit()
9360 CALL timeset(routinen//"_"//trim(adjustl(cp_to_string( &
9361 ceiling(pw1%pw_grid%cutoff/10)*10))), handle)
9362
9363 NULLIFY (c_in)
9364 NULLIFY (c_out)
9365
9366 IF (PRESENT(debug)) THEN
9367 test = debug
9368 ELSE
9369 test = .false.
9370 END IF
9371
9372 !..check if grids are compatible
9373 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
9374 IF (pw1%pw_grid%dvol /= pw2%pw_grid%dvol) THEN
9375 cpabort("PW grids not compatible")
9376 END IF
9377 IF (pw1%pw_grid%para%group /= pw2%pw_grid%para%group) THEN
9378 cpabort("PW grids have not compatible MPI groups")
9379 END IF
9380 END IF
9381
9382 !..prepare input
9383 norm = 1.0_dp
9384
9385 n => pw1%pw_grid%npts
9386
9387 IF (pw1%pw_grid%para%mode == pw_mode_local) THEN
9388
9389 !
9390 !..replicated data, use local FFT
9391 !
9392
9393 IF (test .AND. out_unit > 0) THEN
9394 WRITE (out_unit, '(A)') " FFT Protocol "
9395 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "BWFFT"
9396 WRITE (out_unit, '(A,T66,A)') " in space ", "RECIPROCALSPACE"
9397 WRITE (out_unit, '(A,T66,A)') " out space ", "RECIPROCALSPACE"
9398 WRITE (out_unit, '(A,T66,E15.6)') " scale factor", norm
9399 END IF
9400
9401 c_in => pw1%array
9402 ALLOCATE (c_out(n(1), n(2), n(3)))
9403 CALL fft3d(bwfft, n, c_in, c_out, scale=norm, debug=test)
9404 ! use real part only
9405 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " REAL part "
9406 pw2%array = real(c_out, kind=dp)
9407 DEALLOCATE (c_out)
9408
9409 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " End of FFT Protocol "
9410
9411 ELSE
9412
9413 !
9414 !..parallel FFT
9415 !
9416
9417 IF (test .AND. pw1%pw_grid%para%group_head .AND. out_unit > 0) THEN
9418 WRITE (out_unit, '(A)') " FFT Protocol "
9419 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "BWFFT"
9420 WRITE (out_unit, '(A,T66,A)') " in space ", "RECIPROCALSPACE"
9421 WRITE (out_unit, '(A,T66,A)') " out space ", "RECIPROCALSPACE"
9422 WRITE (out_unit, '(A,T66,E15.6)') " scale factor", norm
9423 END IF
9424
9425 my_pos = pw1%pw_grid%para%my_pos
9426 nrays = pw1%pw_grid%para%nyzray(my_pos)
9427 grays => pw1%pw_grid%grays
9428
9429 END IF
9430
9431 IF (test .AND. pw1%pw_grid%para%group_head .AND. out_unit > 0) THEN
9432 WRITE (out_unit, '(A)') " End of FFT Protocol "
9433 END IF
9434
9435 CALL timestop(handle)
9436 CALL timestop(handle2)
9437
9438 END SUBROUTINE fft_wrap_pw1pw2_c3d_r3d_gs_rs
9439
9440
9441
9442
9443! **************************************************************************************************
9444!> \brief Generic function for 3d FFT of a coefficient_type or pw_r3d_rs_type
9445!> \param pw1 ...
9446!> \param pw2 ...
9447!> \param debug ...
9448!> \par History
9449!> JGH (30-12-2000): New setup of functions and adaptation to parallelism
9450!> JGH (04-01-2001): Moved routine from pws to this module, only covers
9451!> pw_types, no more coefficient types
9452!> \author apsi
9453!> \note
9454!> fft_wrap_pw1pw2
9455! **************************************************************************************************
9456 SUBROUTINE fft_wrap_pw1pw2_c3d_c1d_rs_gs (pw1, pw2, debug)
9457
9458 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw1
9459 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw2
9460 LOGICAL, INTENT(IN), OPTIONAL :: debug
9461
9462 CHARACTER(len=*), PARAMETER :: routineN = 'fft_wrap_pw1pw2'
9463
9464 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, POINTER :: grays
9465 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, POINTER :: c_in, c_out
9466 INTEGER :: handle, handle2, my_pos, nrays, &
9467 out_unit
9468 INTEGER, DIMENSION(:), POINTER :: n
9469 LOGICAL :: test
9470 REAL(KIND=dp) :: norm
9471
9472 CALL timeset(routinen, handle2)
9473 out_unit = cp_logger_get_default_io_unit()
9474 CALL timeset(routinen//"_"//trim(adjustl(cp_to_string( &
9475 ceiling(pw1%pw_grid%cutoff/10)*10))), handle)
9476
9477 NULLIFY (c_in)
9478 NULLIFY (c_out)
9479
9480 IF (PRESENT(debug)) THEN
9481 test = debug
9482 ELSE
9483 test = .false.
9484 END IF
9485
9486 !..check if grids are compatible
9487 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
9488 IF (pw1%pw_grid%dvol /= pw2%pw_grid%dvol) THEN
9489 cpabort("PW grids not compatible")
9490 END IF
9491 IF (pw1%pw_grid%para%group /= pw2%pw_grid%para%group) THEN
9492 cpabort("PW grids have not compatible MPI groups")
9493 END IF
9494 END IF
9495
9496 !..prepare input
9497 norm = 1.0_dp/pw1%pw_grid%ngpts
9498
9499 n => pw1%pw_grid%npts
9500
9501 IF (pw1%pw_grid%para%mode == pw_mode_local) THEN
9502
9503 !
9504 !..replicated data, use local FFT
9505 !
9506
9507 IF (test .AND. out_unit > 0) THEN
9508 WRITE (out_unit, '(A)') " FFT Protocol "
9509 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "FWFFT"
9510 WRITE (out_unit, '(A,T72,A)') " in space ", "REALSPACE"
9511 WRITE (out_unit, '(A,T72,A)') " out space ", "REALSPACE"
9512 WRITE (out_unit, '(A,T66,E15.6)') " scale factor", norm
9513 END IF
9514
9515 c_in => pw1%array
9516 ALLOCATE (c_out(n(1), n(2), n(3)))
9517 ! transform
9518 CALL fft3d(fwfft, n, c_in, c_out, scale=norm, debug=test)
9519 ! gather results
9520 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " PW_GATHER : 3d -> 1d "
9521 CALL pw_gather_s_c1d_c3d(pw2, c_out)
9522 DEALLOCATE (c_out)
9523
9524 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " End of FFT Protocol "
9525
9526 ELSE
9527
9528 !
9529 !..parallel FFT
9530 !
9531
9532 IF (test .AND. pw1%pw_grid%para%group_head .AND. out_unit > 0) THEN
9533 WRITE (out_unit, '(A)') " FFT Protocol "
9534 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "FWFFT"
9535 WRITE (out_unit, '(A,T72,A)') " in space ", "REALSPACE"
9536 WRITE (out_unit, '(A,T72,A)') " out space ", "REALSPACE"
9537 WRITE (out_unit, '(A,T66,E15.6)') " scale factor", norm
9538 END IF
9539
9540 my_pos = pw1%pw_grid%para%my_pos
9541 nrays = pw1%pw_grid%para%nyzray(my_pos)
9542 grays => pw1%pw_grid%grays
9543
9544 !..prepare input
9545 c_in => pw1%array
9546 grays = z_zero
9547 !..transform
9548 IF (pw1%pw_grid%para%ray_distribution) THEN
9549 CALL fft3d(fwfft, n, c_in, grays, pw1%pw_grid%para%group, &
9550 pw1%pw_grid%para%rs_group, &
9551 pw1%pw_grid%para%yzp, pw1%pw_grid%para%nyzray, &
9552 pw1%pw_grid%para%bo, scale=norm, debug=test)
9553 ELSE
9554 CALL fft3d(fwfft, n, c_in, grays, pw1%pw_grid%para%rs_group, &
9555 pw1%pw_grid%para%bo, scale=norm, debug=test)
9556 END IF
9557 !..prepare output
9558 IF (test .AND. pw1%pw_grid%para%group_head .AND. out_unit > 0) &
9559 WRITE (out_unit, '(A)') " PW_GATHER : 2d -> 1d "
9560 CALL pw_gather_p_c1d (pw2, grays)
9561 END IF
9562
9563 IF (test .AND. pw1%pw_grid%para%group_head .AND. out_unit > 0) THEN
9564 WRITE (out_unit, '(A)') " End of FFT Protocol "
9565 END IF
9566
9567 CALL timestop(handle)
9568 CALL timestop(handle2)
9569
9570 END SUBROUTINE fft_wrap_pw1pw2_c3d_c1d_rs_gs
9571
9572
9573
9574
9575
9576! **************************************************************************************************
9577!> \brief Generic function for 3d FFT of a coefficient_type or pw_r3d_rs_type
9578!> \param pw1 ...
9579!> \param pw2 ...
9580!> \param debug ...
9581!> \par History
9582!> JGH (30-12-2000): New setup of functions and adaptation to parallelism
9583!> JGH (04-01-2001): Moved routine from pws to this module, only covers
9584!> pw_types, no more coefficient types
9585!> \author apsi
9586!> \note
9587!> fft_wrap_pw1pw2
9588! **************************************************************************************************
9589 SUBROUTINE fft_wrap_pw1pw2_c3d_c3d_rs_gs (pw1, pw2, debug)
9590
9591 TYPE(pw_c3d_rs_type), INTENT(IN) :: pw1
9592 TYPE(pw_c3d_gs_type), INTENT(INOUT) :: pw2
9593 LOGICAL, INTENT(IN), OPTIONAL :: debug
9594
9595 CHARACTER(len=*), PARAMETER :: routineN = 'fft_wrap_pw1pw2'
9596
9597 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, POINTER :: grays
9598 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, POINTER :: c_in, c_out
9599 INTEGER :: handle, handle2, my_pos, nrays, &
9600 out_unit
9601 INTEGER, DIMENSION(:), POINTER :: n
9602 LOGICAL :: test
9603 REAL(KIND=dp) :: norm
9604
9605 CALL timeset(routinen, handle2)
9606 out_unit = cp_logger_get_default_io_unit()
9607 CALL timeset(routinen//"_"//trim(adjustl(cp_to_string( &
9608 ceiling(pw1%pw_grid%cutoff/10)*10))), handle)
9609
9610 NULLIFY (c_in)
9611 NULLIFY (c_out)
9612
9613 IF (PRESENT(debug)) THEN
9614 test = debug
9615 ELSE
9616 test = .false.
9617 END IF
9618
9619 !..check if grids are compatible
9620 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
9621 IF (pw1%pw_grid%dvol /= pw2%pw_grid%dvol) THEN
9622 cpabort("PW grids not compatible")
9623 END IF
9624 IF (pw1%pw_grid%para%group /= pw2%pw_grid%para%group) THEN
9625 cpabort("PW grids have not compatible MPI groups")
9626 END IF
9627 END IF
9628
9629 !..prepare input
9630 norm = 1.0_dp/pw1%pw_grid%ngpts
9631
9632 n => pw1%pw_grid%npts
9633
9634 IF (pw1%pw_grid%para%mode == pw_mode_local) THEN
9635
9636 !
9637 !..replicated data, use local FFT
9638 !
9639
9640 IF (test .AND. out_unit > 0) THEN
9641 WRITE (out_unit, '(A)') " FFT Protocol "
9642 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "FWFFT"
9643 WRITE (out_unit, '(A,T72,A)') " in space ", "REALSPACE"
9644 WRITE (out_unit, '(A,T72,A)') " out space ", "REALSPACE"
9645 WRITE (out_unit, '(A,T66,E15.6)') " scale factor", norm
9646 END IF
9647
9648 c_in => pw1%array
9649 c_out => pw2%array
9650 CALL fft3d(fwfft, n, c_in, c_out, scale=norm, debug=test)
9651
9652 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " End of FFT Protocol "
9653
9654 ELSE
9655
9656 !
9657 !..parallel FFT
9658 !
9659
9660 IF (test .AND. pw1%pw_grid%para%group_head .AND. out_unit > 0) THEN
9661 WRITE (out_unit, '(A)') " FFT Protocol "
9662 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "FWFFT"
9663 WRITE (out_unit, '(A,T72,A)') " in space ", "REALSPACE"
9664 WRITE (out_unit, '(A,T72,A)') " out space ", "REALSPACE"
9665 WRITE (out_unit, '(A,T66,E15.6)') " scale factor", norm
9666 END IF
9667
9668 my_pos = pw1%pw_grid%para%my_pos
9669 nrays = pw1%pw_grid%para%nyzray(my_pos)
9670 grays => pw1%pw_grid%grays
9671
9672 END IF
9673
9674 IF (test .AND. pw1%pw_grid%para%group_head .AND. out_unit > 0) THEN
9675 WRITE (out_unit, '(A)') " End of FFT Protocol "
9676 END IF
9677
9678 CALL timestop(handle)
9679 CALL timestop(handle2)
9680
9681 END SUBROUTINE fft_wrap_pw1pw2_c3d_c3d_rs_gs
9682
9683! **************************************************************************************************
9684!> \brief Generic function for 3d FFT of a coefficient_type or pw_r3d_rs_type
9685!> \param pw1 ...
9686!> \param pw2 ...
9687!> \param debug ...
9688!> \par History
9689!> JGH (30-12-2000): New setup of functions and adaptation to parallelism
9690!> JGH (04-01-2001): Moved routine from pws to this module, only covers
9691!> pw_types, no more coefficient types
9692!> \author apsi
9693!> \note
9694!> fft_wrap_pw1pw2
9695! **************************************************************************************************
9696 SUBROUTINE fft_wrap_pw1pw2_c3d_c3d_gs_rs (pw1, pw2, debug)
9697
9698 TYPE(pw_c3d_gs_type), INTENT(IN) :: pw1
9699 TYPE(pw_c3d_rs_type), INTENT(INOUT) :: pw2
9700 LOGICAL, INTENT(IN), OPTIONAL :: debug
9701
9702 CHARACTER(len=*), PARAMETER :: routineN = 'fft_wrap_pw1pw2'
9703
9704 COMPLEX(KIND=dp), DIMENSION(:, :), CONTIGUOUS, POINTER :: grays
9705 COMPLEX(KIND=dp), DIMENSION(:, :, :), CONTIGUOUS, POINTER :: c_in, c_out
9706 INTEGER :: handle, handle2, my_pos, nrays, &
9707 out_unit
9708 INTEGER, DIMENSION(:), POINTER :: n
9709 LOGICAL :: test
9710 REAL(KIND=dp) :: norm
9711
9712 CALL timeset(routinen, handle2)
9713 out_unit = cp_logger_get_default_io_unit()
9714 CALL timeset(routinen//"_"//trim(adjustl(cp_to_string( &
9715 ceiling(pw1%pw_grid%cutoff/10)*10))), handle)
9716
9717 NULLIFY (c_in)
9718 NULLIFY (c_out)
9719
9720 IF (PRESENT(debug)) THEN
9721 test = debug
9722 ELSE
9723 test = .false.
9724 END IF
9725
9726 !..check if grids are compatible
9727 IF (.NOT. ASSOCIATED(pw1%pw_grid, pw2%pw_grid)) THEN
9728 IF (pw1%pw_grid%dvol /= pw2%pw_grid%dvol) THEN
9729 cpabort("PW grids not compatible")
9730 END IF
9731 IF (pw1%pw_grid%para%group /= pw2%pw_grid%para%group) THEN
9732 cpabort("PW grids have not compatible MPI groups")
9733 END IF
9734 END IF
9735
9736 !..prepare input
9737 norm = 1.0_dp
9738
9739 n => pw1%pw_grid%npts
9740
9741 IF (pw1%pw_grid%para%mode == pw_mode_local) THEN
9742
9743 !
9744 !..replicated data, use local FFT
9745 !
9746
9747 IF (test .AND. out_unit > 0) THEN
9748 WRITE (out_unit, '(A)') " FFT Protocol "
9749 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "BWFFT"
9750 WRITE (out_unit, '(A,T66,A)') " in space ", "RECIPROCALSPACE"
9751 WRITE (out_unit, '(A,T66,A)') " out space ", "RECIPROCALSPACE"
9752 WRITE (out_unit, '(A,T66,E15.6)') " scale factor", norm
9753 END IF
9754
9755 c_in => pw1%array
9756 c_out => pw2%array
9757 CALL fft3d(bwfft, n, c_in, c_out, scale=norm, debug=test)
9758
9759 IF (test .AND. out_unit > 0) WRITE (out_unit, '(A)') " End of FFT Protocol "
9760
9761 ELSE
9762
9763 !
9764 !..parallel FFT
9765 !
9766
9767 IF (test .AND. pw1%pw_grid%para%group_head .AND. out_unit > 0) THEN
9768 WRITE (out_unit, '(A)') " FFT Protocol "
9769 WRITE (out_unit, '(A,T76,A)') " Transform direction ", "BWFFT"
9770 WRITE (out_unit, '(A,T66,A)') " in space ", "RECIPROCALSPACE"
9771 WRITE (out_unit, '(A,T66,A)') " out space ", "RECIPROCALSPACE"
9772 WRITE (out_unit, '(A,T66,E15.6)') " scale factor", norm
9773 END IF
9774
9775 my_pos = pw1%pw_grid%para%my_pos
9776 nrays = pw1%pw_grid%para%nyzray(my_pos)
9777 grays => pw1%pw_grid%grays
9778
9779 END IF
9780
9781 IF (test .AND. pw1%pw_grid%para%group_head .AND. out_unit > 0) THEN
9782 WRITE (out_unit, '(A)') " End of FFT Protocol "
9783 END IF
9784
9785 CALL timestop(handle)
9786 CALL timestop(handle2)
9787
9788 END SUBROUTINE fft_wrap_pw1pw2_c3d_c3d_gs_rs
9789
9790
9791
9792! **************************************************************************************************
9793!> \brief Multiply all data points with a Gaussian damping factor
9794!> Needed for longrange Coulomb potential
9795!> V(\vec r)=erf(omega*r)/r
9796!> V(\vec g)=\frac{4*\pi}{g**2}*exp(-g**2/omega**2)
9797!> \param pw ...
9798!> \param omega ...
9799!> \par History
9800!> Frederick Stein (12-04-2019) created
9801!> \author Frederick Stein (12-Apr-2019)
9802!> \note
9803!> Performs a Gaussian damping
9804! **************************************************************************************************
9805 SUBROUTINE pw_gauss_damp(pw, omega)
9806
9807 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
9808 REAL(kind=dp), INTENT(IN) :: omega
9809
9810 CHARACTER(len=*), PARAMETER :: routinen = 'pw_gauss_damp'
9811
9812 INTEGER :: handle
9813 REAL(kind=dp) :: omega_2
9814
9815 CALL timeset(routinen, handle)
9816 cpassert(omega >= 0)
9817
9818 omega_2 = omega*omega
9819 omega_2 = 0.25_dp/omega_2
9820
9821!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw,omega_2)
9822 pw%array = pw%array*exp(-pw%pw_grid%gsq*omega_2)
9823!$OMP END PARALLEL WORKSHARE
9824
9825 CALL timestop(handle)
9826
9827 END SUBROUTINE pw_gauss_damp
9828
9829! **************************************************************************************************
9830!> \brief Multiply all data points with the logarithmic derivative of a Gaussian
9831!> \param pw ...
9832!> \param omega ...
9833!> \note
9834!> Performs a Gaussian damping
9835! **************************************************************************************************
9836 SUBROUTINE pw_log_deriv_gauss(pw, omega)
9837
9838 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
9839 REAL(kind=dp), INTENT(IN) :: omega
9840
9841 CHARACTER(len=*), PARAMETER :: routinen = 'pw_log_deriv_gauss'
9842
9843 INTEGER :: handle
9844 REAL(kind=dp) :: omega_2
9845
9846 CALL timeset(routinen, handle)
9847 cpassert(omega >= 0)
9848
9849 omega_2 = omega*omega
9850 omega_2 = 0.25_dp/omega_2
9851
9852!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw,omega_2)
9853 pw%array = pw%array*(1.0_dp + omega_2*pw%pw_grid%gsq)
9854!$OMP END PARALLEL WORKSHARE
9855
9856 CALL timestop(handle)
9857 END SUBROUTINE pw_log_deriv_gauss
9858
9859! **************************************************************************************************
9860!> \brief Multiply all data points with a Gaussian damping factor
9861!> Needed for longrange Coulomb potential
9862!> V(\vec r)=erf(omega*r)/r
9863!> V(\vec g)=\frac{4*\pi}{g**2}*exp(-g**2/omega**2)
9864!> \param pw ...
9865!> \param omega ...
9866!> \par History
9867!> Frederick Stein (12-04-2019) created
9868!> \author Frederick Stein (12-Apr-2019)
9869!> \note
9870!> Performs a Gaussian damping
9871! **************************************************************************************************
9872 SUBROUTINE pw_compl_gauss_damp(pw, omega)
9873
9874 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
9875 REAL(kind=dp), INTENT(IN) :: omega
9876
9877 CHARACTER(len=*), PARAMETER :: routinen = 'pw_compl_gauss_damp'
9878
9879 INTEGER :: cnt, handle, i
9880 REAL(kind=dp) :: omega_2, tmp
9881
9882 CALL timeset(routinen, handle)
9883
9884 omega_2 = omega*omega
9885 omega_2 = 0.25_dp/omega_2
9886
9887 cnt = SIZE(pw%array)
9888
9889!$OMP PARALLEL DO PRIVATE(i, tmp) DEFAULT(NONE) SHARED(cnt, pw,omega_2)
9890 DO i = 1, cnt
9891 tmp = -omega_2*pw%pw_grid%gsq(i)
9892 IF (abs(tmp) > 1.0e-5_dp) THEN
9893 pw%array(i) = pw%array(i)*(1.0_dp - exp(tmp))
9894 ELSE
9895 pw%array(i) = pw%array(i)*(tmp + 0.5_dp*tmp*(tmp + (1.0_dp/3.0_dp)*tmp**2))
9896 END IF
9897 END DO
9898!$OMP END PARALLEL DO
9899
9900 CALL timestop(handle)
9901
9902 END SUBROUTINE pw_compl_gauss_damp
9903
9904! **************************************************************************************************
9905!> \brief Multiply all data points with the logarithmic derivative of the complementary Gaussian damping factor
9906!> \param pw ...
9907!> \param omega ...
9908!> \note
9909! **************************************************************************************************
9910 SUBROUTINE pw_log_deriv_compl_gauss(pw, omega)
9911
9912 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
9913 REAL(kind=dp), INTENT(IN) :: omega
9914
9915 CHARACTER(len=*), PARAMETER :: routinen = 'pw_log_deriv_compl_gauss'
9916
9917 INTEGER :: handle, i
9918 REAL(kind=dp) :: omega_2, tmp
9919
9920 CALL timeset(routinen, handle)
9921
9922 omega_2 = omega*omega
9923 omega_2 = 0.25_dp/omega_2
9924
9925!$OMP PARALLEL DO DEFAULT(NONE) PRIVATE(i,tmp) &
9926!$OMP SHARED(pw,omega_2)
9927 DO i = 1, SIZE(pw%array)
9928 tmp = omega_2*pw%pw_grid%gsq(i)
9929 ! For too small arguments, use the Taylor polynomial to prevent division by zero
9930 IF (abs(tmp) >= 0.003_dp) THEN
9931 pw%array(i) = pw%array(i)*(1.0_dp - tmp*exp(-tmp)/(1.0_dp - exp(-tmp)))
9932 ELSE
9933 pw%array(i) = pw%array(i)*(0.5_dp*tmp - tmp**2/12.0_dp)
9934 END IF
9935 END DO
9936!$OMP END PARALLEL DO
9937
9938 CALL timestop(handle)
9939
9940 END SUBROUTINE pw_log_deriv_compl_gauss
9941
9942! **************************************************************************************************
9943!> \brief Multiply all data points with a Gaussian damping factor and mixes it with the original function
9944!> Needed for mixed longrange/Coulomb potential
9945!> V(\vec r)=(a+b*erf(omega*r))/r
9946!> V(\vec g)=\frac{4*\pi}{g**2}*(a+b*exp(-g**2/omega**2))
9947!> \param pw ...
9948!> \param omega ...
9949!> \param scale_coul ...
9950!> \param scale_long ...
9951!> \par History
9952!> Frederick Stein (16-Dec-2021) created
9953!> \author Frederick Stein (16-Dec-2021)
9954!> \note
9955!> Performs a Gaussian damping
9956! **************************************************************************************************
9957 SUBROUTINE pw_gauss_damp_mix(pw, omega, scale_coul, scale_long)
9958
9959 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
9960 REAL(kind=dp), INTENT(IN) :: omega, scale_coul, scale_long
9961
9962 CHARACTER(len=*), PARAMETER :: routinen = 'pw_gauss_damp_mix'
9963
9964 INTEGER :: handle
9965 REAL(kind=dp) :: omega_2
9966
9967 CALL timeset(routinen, handle)
9968
9969 omega_2 = omega*omega
9970 omega_2 = 0.25_dp/omega_2
9971
9972!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw, omega_2, scale_coul, scale_long)
9973 pw%array = pw%array*(scale_coul + scale_long*exp(-pw%pw_grid%gsq*omega_2))
9974!$OMP END PARALLEL WORKSHARE
9975
9976 CALL timestop(handle)
9977
9978 END SUBROUTINE pw_gauss_damp_mix
9979
9980! **************************************************************************************************
9981!> \brief Multiply all data points with the logarithmic derivative of the mixed longrange/Coulomb potential
9982!> Needed for mixed longrange/Coulomb potential
9983!> \param pw ...
9984!> \param omega ...
9985!> \param scale_coul ...
9986!> \param scale_long ...
9987!> \note
9988! **************************************************************************************************
9989 SUBROUTINE pw_log_deriv_mix_cl(pw, omega, scale_coul, scale_long)
9990
9991 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
9992 REAL(kind=dp), INTENT(IN) :: omega, scale_coul, scale_long
9993
9994 CHARACTER(len=*), PARAMETER :: routinen = 'pw_log_deriv_mix_cl'
9995
9996 INTEGER :: handle, i
9997 REAL(kind=dp) :: omega_2, tmp
9998
9999 CALL timeset(routinen, handle)
10000
10001 omega_2 = omega*omega
10002 omega_2 = 0.25_dp/omega_2
10003
10004!$OMP PARALLEL DO DEFAULT(NONE) PRIVATE(i,tmp) &
10005!$OMP SHARED(pw,omega_2,scale_long,scale_coul)
10006 DO i = 1, SIZE(pw%array)
10007 tmp = omega_2*pw%pw_grid%gsq(i)
10008 pw%array(i) = pw%array(i)*(1.0_dp + scale_long*tmp*exp(-tmp)/(scale_coul + scale_long*exp(-tmp)))
10009 END DO
10010!$OMP END PARALLEL DO
10011
10012 CALL timestop(handle)
10013
10014 END SUBROUTINE pw_log_deriv_mix_cl
10015
10016! **************************************************************************************************
10017!> \brief Multiply all data points with a complementary cosine
10018!> Needed for truncated Coulomb potential
10019!> V(\vec r)=1/r if r<rc, 0 else
10020!> V(\vec g)=\frac{4*\pi}{g**2}*(1-cos(g*rc))
10021!> \param pw ...
10022!> \param rcutoff ...
10023!> \par History
10024!> Frederick Stein (07-06-2021) created
10025!> \author Frederick Stein (07-Jun-2021)
10026!> \note
10027!> Multiplies by complementary cosine
10028! **************************************************************************************************
10029 SUBROUTINE pw_truncated(pw, rcutoff)
10030
10031 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
10032 REAL(kind=dp), INTENT(IN) :: rcutoff
10033
10034 CHARACTER(len=*), PARAMETER :: routinen = 'pw_truncated'
10035
10036 INTEGER :: handle, i
10037 REAL(kind=dp) :: tmp
10038
10039 CALL timeset(routinen, handle)
10040 cpassert(rcutoff >= 0)
10041
10042!$OMP PARALLEL DO PRIVATE(i,tmp) DEFAULT(NONE) SHARED(pw, rcutoff)
10043 DO i = 1, SIZE(pw%array)
10044 tmp = sqrt(pw%pw_grid%gsq(i))*rcutoff
10045 IF (tmp >= 0.005_dp) THEN
10046 pw%array(i) = pw%array(i)*(1.0_dp - cos(tmp))
10047 ELSE
10048 pw%array(i) = pw%array(i)*tmp**2/2.0_dp*(1.0 - tmp**2/12.0_dp)
10049 END IF
10050 END DO
10051!$OMP END PARALLEL DO
10052
10053 CALL timestop(handle)
10054
10055 END SUBROUTINE pw_truncated
10056
10057! **************************************************************************************************
10058!> \brief Multiply all data points with the logarithmic derivative of the complementary cosine
10059!> This function is needed for virials using truncated Coulomb potentials
10060!> \param pw ...
10061!> \param rcutoff ...
10062!> \note
10063! **************************************************************************************************
10064 SUBROUTINE pw_log_deriv_trunc(pw, rcutoff)
10065
10066 TYPE(pw_c1d_gs_type), INTENT(IN) :: pw
10067 REAL(kind=dp), INTENT(IN) :: rcutoff
10068
10069 CHARACTER(len=*), PARAMETER :: routinen = 'pw_log_deriv_trunc'
10070
10071 INTEGER :: handle, i
10072 REAL(kind=dp) :: rchalf, tmp
10073
10074 CALL timeset(routinen, handle)
10075 cpassert(rcutoff >= 0)
10076
10077 rchalf = 0.5_dp*rcutoff
10078!$OMP PARALLEL DO DEFAULT(NONE) PRIVATE(i,tmp) &
10079!$OMP SHARED(pw,rchalf)
10080 DO i = 1, SIZE(pw%array)
10081 tmp = rchalf*sqrt(pw%pw_grid%gsq(i))
10082 ! For too small arguments, use the Taylor polynomial to prevent division by zero
10083 IF (abs(tmp) >= 0.0001_dp) THEN
10084 pw%array(i) = pw%array(i)*(1.0_dp - tmp/tan(tmp))
10085 ELSE
10086 pw%array(i) = pw%array(i)*tmp**2*(1.0_dp + tmp**2/15.0_dp)/3.0_dp
10087 END IF
10088 END DO
10089!$OMP END PARALLEL DO
10090
10091 CALL timestop(handle)
10092
10093 END SUBROUTINE pw_log_deriv_trunc
10094
10095! **************************************************************************************************
10096!> \brief Calculate the derivative of a plane wave vector
10097!> \param pw ...
10098!> \param n ...
10099!> \par History
10100!> JGH (06-10-2002) allow only for inplace derivatives
10101!> \author JGH (25-Feb-2001)
10102!> \note
10103!> Calculate the derivative dx^n(1) dy^n(2) dz^n(3) PW
10104! **************************************************************************************************
10105 SUBROUTINE pw_derive(pw, n)
10106
10107 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
10108 INTEGER, DIMENSION(3), INTENT(IN) :: n
10109
10110 CHARACTER(len=*), PARAMETER :: routinen = 'pw_derive'
10111
10112 COMPLEX(KIND=dp) :: im
10113 INTEGER :: handle, m
10114
10115 CALL timeset(routinen, handle)
10116
10117 IF (any(n < 0)) &
10118 cpabort("Nonnegative exponents are not supported!")
10119
10120 m = sum(n)
10121 im = cmplx(0.0_dp, 1.0_dp, kind=dp)**m
10122
10123 IF (n(1) == 1) THEN
10124!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw)
10125 pw%array(:) = pw%array(:)*pw%pw_grid%g(1, :)
10126!$OMP END PARALLEL WORKSHARE
10127 ELSE IF (n(1) > 1) THEN
10128!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(n, pw)
10129 pw%array(:) = pw%array(:)*(pw%pw_grid%g(1, :)**n(1))
10130!$OMP END PARALLEL WORKSHARE
10131 END IF
10132
10133 IF (n(2) == 1) THEN
10134!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw)
10135 pw%array(:) = pw%array(:)*pw%pw_grid%g(2, :)
10136!$OMP END PARALLEL WORKSHARE
10137 ELSE IF (n(2) > 1) THEN
10138!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(n, pw)
10139 pw%array(:) = pw%array(:)*(pw%pw_grid%g(2, :)**n(2))
10140!$OMP END PARALLEL WORKSHARE
10141 END IF
10142
10143 IF (n(3) == 1) THEN
10144!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw)
10145 pw%array(:) = pw%array(:)*pw%pw_grid%g(3, :)
10146!$OMP END PARALLEL WORKSHARE
10147 ELSE IF (n(3) > 1) THEN
10148!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(n, pw)
10149 pw%array(:) = pw%array(:)*(pw%pw_grid%g(3, :)**n(3))
10150!$OMP END PARALLEL WORKSHARE
10151 END IF
10152
10153 ! im can take the values 1, -1, i, -i
10154 ! skip this if im == 1
10155 IF (abs(real(im, kind=dp) - 1.0_dp) > 1.0e-10_dp) THEN
10156!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(im, pw)
10157 pw%array(:) = im*pw%array(:)
10158!$OMP END PARALLEL WORKSHARE
10159 END IF
10160
10161 CALL timestop(handle)
10162
10163 END SUBROUTINE pw_derive
10164
10165! **************************************************************************************************
10166!> \brief Calculate the Laplacian of a plane wave vector
10167!> \param pw ...
10168!> \par History
10169!> Frederick Stein (01-02-2022) created
10170!> \author JGH (25-Feb-2001)
10171!> \note
10172!> Calculate the derivative DELTA PW
10173! **************************************************************************************************
10174 SUBROUTINE pw_laplace(pw)
10175
10176 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
10177
10178 CHARACTER(len=*), PARAMETER :: routinen = 'pw_laplace'
10179
10180 INTEGER :: handle
10181
10182 CALL timeset(routinen, handle)
10183
10184!$OMP PARALLEL WORKSHARE DEFAULT(NONE) SHARED(pw)
10185 pw%array(:) = -pw%array(:)*pw%pw_grid%gsq(:)
10186!$OMP END PARALLEL WORKSHARE
10187
10188 CALL timestop(handle)
10189
10190 END SUBROUTINE pw_laplace
10191
10192! **************************************************************************************************
10193!> \brief Calculate the tensorial 2nd derivative of a plane wave vector
10194!> \param pw ...
10195!> \param pwdr2 ...
10196!> \param i ...
10197!> \param j ...
10198!> \par History
10199!> none
10200!> \author JGH (05-May-2006)
10201!> \note
10202! **************************************************************************************************
10203 SUBROUTINE pw_dr2(pw, pwdr2, i, j)
10204
10205 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw, pwdr2
10206 INTEGER, INTENT(IN) :: i, j
10207
10208 CHARACTER(len=*), PARAMETER :: routinen = 'pw_dr2'
10209
10210 INTEGER :: cnt, handle, ig
10211 REAL(kind=dp) :: gg, o3
10212
10213 CALL timeset(routinen, handle)
10214
10215 o3 = 1.0_dp/3.0_dp
10216
10217 cnt = SIZE(pw%array)
10218
10219 IF (i == j) THEN
10220!$OMP PARALLEL DO DEFAULT(NONE) PRIVATE(ig,gg) SHARED(cnt, i, o3, pw, pwdr2)
10221 DO ig = 1, cnt
10222 gg = pw%pw_grid%g(i, ig)**2 - o3*pw%pw_grid%gsq(ig)
10223 pwdr2%array(ig) = gg*pw%array(ig)
10224 END DO
10225!$OMP END PARALLEL DO
10226 ELSE
10227!$OMP PARALLEL DO PRIVATE (ig) DEFAULT(NONE) SHARED(cnt, i, j, pw, pwdr2)
10228 DO ig = 1, cnt
10229 pwdr2%array(ig) = pw%array(ig)*(pw%pw_grid%g(i, ig)*pw%pw_grid%g(j, ig))
10230 END DO
10231!$OMP END PARALLEL DO
10232 END IF
10233
10234 CALL timestop(handle)
10235
10236 END SUBROUTINE pw_dr2
10237
10238! **************************************************************************************************
10239!> \brief Calculate the tensorial 2nd derivative of a plane wave vector
10240!> and divides by |G|^2. pwdr2_gg(G=0) is put to zero.
10241!> \param pw ...
10242!> \param pwdr2_gg ...
10243!> \param i ...
10244!> \param j ...
10245!> \par History
10246!> none
10247!> \author RD (20-Nov-2006)
10248!> \note
10249!> Adapted from pw_dr2
10250! **************************************************************************************************
10251 SUBROUTINE pw_dr2_gg(pw, pwdr2_gg, i, j)
10252
10253 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw, pwdr2_gg
10254 INTEGER, INTENT(IN) :: i, j
10255
10256 INTEGER :: cnt, handle, ig
10257 REAL(kind=dp) :: gg, o3
10258 CHARACTER(len=*), PARAMETER :: routinen = 'pw_dr2_gg'
10259
10260 CALL timeset(routinen, handle)
10261
10262 o3 = 1.0_dp/3.0_dp
10263
10264 cnt = SIZE(pw%array)
10265
10266 IF (i == j) THEN
10267!$OMP PARALLEL DO PRIVATE (ig) DEFAULT(NONE) PRIVATE(gg) SHARED(cnt, i, o3, pw, pwdr2_gg)
10268 DO ig = pw%pw_grid%first_gne0, cnt
10269 gg = pw%pw_grid%g(i, ig)**2 - o3*pw%pw_grid%gsq(ig)
10270 pwdr2_gg%array(ig) = gg*pw%array(ig)/pw%pw_grid%gsq(ig)
10271 END DO
10272!$OMP END PARALLEL DO
10273 ELSE
10274!$OMP PARALLEL DO PRIVATE (ig) DEFAULT(NONE) SHARED(cnt, i, j, pw, pwdr2_gg)
10275 DO ig = pw%pw_grid%first_gne0, cnt
10276 pwdr2_gg%array(ig) = pw%array(ig)*(pw%pw_grid%g(i, ig)*pw%pw_grid%g(j, ig)) &
10277 /pw%pw_grid%gsq(ig)
10278 END DO
10279!$OMP END PARALLEL DO
10280 END IF
10281
10282 IF (pw%pw_grid%have_g0) pwdr2_gg%array(1) = 0.0_dp
10283
10284 CALL timestop(handle)
10285
10286 END SUBROUTINE pw_dr2_gg
10287
10288! **************************************************************************************************
10289!> \brief Multiplies a G-space function with a smoothing factor of the form
10290!> f(|G|) = exp((ecut - G^2)/sigma)/(1+exp((ecut - G^2)/sigma))
10291!> \param pw ...
10292!> \param ecut ...
10293!> \param sigma ...
10294!> \par History
10295!> none
10296!> \author JGH (09-June-2006)
10297!> \note
10298! **************************************************************************************************
10299 SUBROUTINE pw_smoothing(pw, ecut, sigma)
10300
10301 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: pw
10302 REAL(kind=dp), INTENT(IN) :: ecut, sigma
10303
10304 CHARACTER(len=*), PARAMETER :: routinen = 'pw_smoothing'
10305
10306 INTEGER :: cnt, handle, ig
10307 REAL(kind=dp) :: arg, f
10308
10309 CALL timeset(routinen, handle)
10310
10311 cnt = SIZE(pw%array)
10312
10313!$OMP PARALLEL DO DEFAULT(NONE) PRIVATE(ig, arg, f) SHARED(cnt, ecut, pw, sigma)
10314 DO ig = 1, cnt
10315 arg = (ecut - pw%pw_grid%gsq(ig))/sigma
10316 f = exp(arg)/(1 + exp(arg))
10317 pw%array(ig) = f*pw%array(ig)
10318 END DO
10319!$OMP END PARALLEL DO
10320
10321 CALL timestop(handle)
10322
10323 END SUBROUTINE pw_smoothing
10324
10325! **************************************************************************************************
10326!> \brief ...
10327!> \param grida ...
10328!> \param gridb ...
10329!> \return ...
10330! **************************************************************************************************
10331 ELEMENTAL FUNCTION pw_compatible(grida, gridb) RESULT(compat)
10332
10333 TYPE(pw_grid_type), INTENT(IN) :: grida, gridb
10334 LOGICAL :: compat
10335
10336 compat = .false.
10337
10338 IF (grida%id_nr == gridb%id_nr) THEN
10339 compat = .true.
10340 ELSE IF (grida%reference == gridb%id_nr) THEN
10341 compat = .true.
10342 ELSE IF (gridb%reference == grida%id_nr) THEN
10343 compat = .true.
10344 END IF
10345
10346 END FUNCTION pw_compatible
10347
10348! **************************************************************************************************
10349!> \brief Calculate the structure factor for point r
10350!> \param sf ...
10351!> \param r ...
10352!> \par History
10353!> none
10354!> \author JGH (05-May-2006)
10355!> \note
10356! **************************************************************************************************
10357 SUBROUTINE pw_structure_factor(sf, r)
10358
10359 TYPE(pw_c1d_gs_type), INTENT(INOUT) :: sf
10360 REAL(kind=dp), DIMENSION(:), INTENT(IN) :: r
10361
10362 CHARACTER(len=*), PARAMETER :: routinen = 'pw_structure_factor'
10363
10364 INTEGER :: cnt, handle, ig
10365 REAL(kind=dp) :: arg
10366
10367 CALL timeset(routinen, handle)
10368
10369 cnt = SIZE(sf%array)
10370
10371!$OMP PARALLEL DO PRIVATE (ig, arg) DEFAULT(NONE) SHARED(cnt, r, sf)
10372 DO ig = 1, cnt
10373 arg = dot_product(sf%pw_grid%g(:, ig), r)
10374 sf%array(ig) = cmplx(cos(arg), -sin(arg), kind=dp)
10375 END DO
10376!$OMP END PARALLEL DO
10377
10378 CALL timestop(handle)
10379
10380 END SUBROUTINE pw_structure_factor
10381
10382 END MODULE pw_methods
various routines to log and control the output. The idea is that decisions about where to log should ...
integer function, public cp_logger_get_default_io_unit(logger)
returns the unit nr for the ionode (-1 on all other processors) skips as well checks if the procs cal...
integer, parameter, public bwfft
Definition fft_tools.F:145
integer, parameter, public fwfft
Definition fft_tools.F:145
sums arrays of real/complex numbers with much reduced round-off as compared to a naive implementation...
Definition kahan_sum.F:29
Defines the basic variable types.
Definition kinds.F:23
integer, parameter, public dp
Definition kinds.F:34
Machine interface based on Fortran 2003 and POSIX.
Definition machine.F:17
subroutine, public m_memory(mem)
Returns the total amount of memory [bytes] in use, if known, zero otherwise.
Definition machine.F:332
Definition of mathematical constants and functions.
complex(kind=dp), parameter, public z_zero
subroutine, public pw_copy_match(pw1, pw2)
copy a pw type variable
Definition pw_copy_all.F:42
integer function, public pw_fpga_init_bitstream(n)
Invoke the pw_fpga_check_bitstream C function passing the path to the data dir.
Definition pw_fpga.F:263
subroutine, public pw_fpga_r3dc1d_3d_dp(n, c_out)
perform an in-place double precision fft3d on the FPGA
Definition pw_fpga.F:146
subroutine, public pw_fpga_r3dc1d_3d_sp(n, c_out)
perform an in-place single precision fft3d on the FPGA
Definition pw_fpga.F:194
subroutine, public pw_fpga_c1dr3d_3d_dp(n, c_out)
perform an in-place double precision inverse fft3d on the FPGA
Definition pw_fpga.F:170
subroutine, public pw_fpga_c1dr3d_3d_sp(n, c_out)
perform an in-place single precision inverse fft3d on the FPGA
Definition pw_fpga.F:228
subroutine, public pw_gpu_c1dr3d_3d(pw1, pw2, scale)
perform an scatter followed by a fft on the gpu
Definition pw_gpu.F:162
subroutine, public pw_gpu_c1dr3d_3d_ps(pw1, pw2, scale)
perform an parallel scatter followed by a fft on the gpu
Definition pw_gpu.F:392
subroutine, public pw_gpu_r3dc1d_3d(pw1, pw2, scale)
perform an fft followed by a gather on the gpu
Definition pw_gpu.F:104
subroutine, public pw_gpu_r3dc1d_3d_ps(pw1, pw2, scale)
perform an parallel fft followed by a gather on the gpu
Definition pw_gpu.F:220
integer, parameter, public halfspace
integer, parameter, public pw_mode_local
integer, parameter, public pw_mode_distributed
subroutine, public pw_smoothing(pw, ecut, sigma)
Multiplies a G-space function with a smoothing factor of the form f(|G|) = exp((ecut - G^2)/sigma)/(1...
subroutine, public pw_log_deriv_trunc(pw, rcutoff)
Multiply all data points with the logarithmic derivative of the complementary cosine This function is...
subroutine, public pw_gauss_damp(pw, omega)
Multiply all data points with a Gaussian damping factor Needed for longrange Coulomb potential V(\vec...
subroutine, public pw_log_deriv_compl_gauss(pw, omega)
Multiply all data points with the logarithmic derivative of the complementary Gaussian damping factor...
subroutine, public pw_dr2_gg(pw, pwdr2_gg, i, j)
Calculate the tensorial 2nd derivative of a plane wave vector and divides by |G|^2....
subroutine, public pw_laplace(pw)
Calculate the Laplacian of a plane wave vector.
subroutine, public pw_log_deriv_gauss(pw, omega)
Multiply all data points with the logarithmic derivative of a Gaussian.
subroutine, public pw_gauss_damp_mix(pw, omega, scale_coul, scale_long)
Multiply all data points with a Gaussian damping factor and mixes it with the original function Neede...
subroutine, public pw_truncated(pw, rcutoff)
Multiply all data points with a complementary cosine Needed for truncated Coulomb potential V(\vec r)...
integer, parameter, public do_accurate_sum
Definition pw_methods.F:79
integer, parameter, public do_standard_sum
Definition pw_methods.F:79
subroutine, public pw_derive(pw, n)
Calculate the derivative of a plane wave vector.
subroutine, public pw_structure_factor(sf, r)
Calculate the structure factor for point r.
subroutine, public pw_compl_gauss_damp(pw, omega)
Multiply all data points with a Gaussian damping factor Needed for longrange Coulomb potential V(\vec...
subroutine, public pw_log_deriv_mix_cl(pw, omega, scale_coul, scale_long)
Multiply all data points with the logarithmic derivative of the mixed longrange/Coulomb potential Nee...
subroutine, public pw_dr2(pw, pwdr2, i, j)
Calculate the tensorial 2nd derivative of a plane wave vector.