(git:374b731)
Loading...
Searching...
No Matches
realspace_grid_cube.F
Go to the documentation of this file.
1!--------------------------------------------------------------------------------------------------!
2! CP2K: A general program to perform molecular dynamics simulations !
3! Copyright 2000-2024 CP2K developers group <https://cp2k.org> !
4! !
5! SPDX-License-Identifier: GPL-2.0-or-later !
6!--------------------------------------------------------------------------------------------------!
7
8! **************************************************************************************************
9!> \brief Generate Gaussian cube files
10! **************************************************************************************************
12 USE cp_files, ONLY: close_file,&
15 USE kinds, ONLY: dp
16 USE message_passing, ONLY: &
21 USE pw_types, ONLY: pw_r3d_rs_type
22#include "../base/base_uses.f90"
23
24 IMPLICIT NONE
25
26 PRIVATE
27
29
30 CHARACTER(len=*), PARAMETER, PRIVATE :: moduleN = 'realspace_grid_cube'
31 LOGICAL, PARAMETER, PRIVATE :: debug_this_module = .false.
32 LOGICAL, PRIVATE :: parses_linebreaks = .false., &
33 parse_test = .true.
34
35CONTAINS
36
37! **************************************************************************************************
38!> \brief ...
39!> \param pw ...
40!> \param unit_nr ...
41!> \param title ...
42!> \param particles_r ...
43!> \param particles_z ...
44!> \param stride ...
45!> \param zero_tails ...
46!> \param silent ...
47!> \param mpi_io ...
48! **************************************************************************************************
49 SUBROUTINE pw_to_cube(pw, unit_nr, title, particles_r, particles_z, stride, zero_tails, &
50 silent, mpi_io)
51 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw
52 INTEGER, INTENT(IN) :: unit_nr
53 CHARACTER(*), INTENT(IN), OPTIONAL :: title
54 REAL(kind=dp), DIMENSION(:, :), INTENT(IN), &
55 OPTIONAL :: particles_r
56 INTEGER, DIMENSION(:), INTENT(IN), OPTIONAL :: particles_z
57 INTEGER, DIMENSION(:), OPTIONAL, POINTER :: stride
58 LOGICAL, INTENT(IN), OPTIONAL :: zero_tails, silent, mpi_io
59
60 CHARACTER(len=*), PARAMETER :: routinen = 'pw_to_cube'
61 INTEGER, PARAMETER :: entry_len = 13, num_entries_line = 6
62
63 INTEGER :: checksum, dest, handle, i, i1, i2, i3, iat, ip, l1, l2, l3, msglen, my_rank, &
64 my_stride(3), np, num_linebreak, num_pe, rank(2), size_of_z, source, tag, u1, u2, u3
65 LOGICAL :: be_silent, my_zero_tails, parallel_write
66 REAL(kind=dp), ALLOCATABLE, DIMENSION(:) :: buf
67 TYPE(mp_comm_type) :: gid
68 TYPE(mp_file_type) :: mp_unit
69
70 CALL timeset(routinen, handle)
71
72 my_zero_tails = .false.
73 be_silent = .false.
74 parallel_write = .false.
75 IF (PRESENT(zero_tails)) my_zero_tails = zero_tails
76 IF (PRESENT(silent)) be_silent = silent
77 IF (PRESENT(mpi_io)) parallel_write = mpi_io
78 my_stride = 1
79 IF (PRESENT(stride)) THEN
80 IF (SIZE(stride) /= 1 .AND. SIZE(stride) /= 3) &
81 CALL cp_abort(__location__, "STRIDE keyword can accept only 1 "// &
82 "(the same for X,Y,Z) or 3 values. Correct your input file.")
83 IF (SIZE(stride) == 1) THEN
84 DO i = 1, 3
85 my_stride(i) = stride(1)
86 END DO
87 ELSE
88 my_stride = stride(1:3)
89 END IF
90 cpassert(my_stride(1) > 0)
91 cpassert(my_stride(2) > 0)
92 cpassert(my_stride(3) > 0)
93 END IF
94
95 IF (.NOT. parallel_write) THEN
96 IF (unit_nr > 0) THEN
97 ! this format seems to work for e.g. molekel and gOpenmol
98 ! latest version of VMD can read non orthorhombic cells
99 WRITE (unit_nr, '(a11)') "-Quickstep-"
100 IF (PRESENT(title)) THEN
101 WRITE (unit_nr, *) trim(title)
102 ELSE
103 WRITE (unit_nr, *) "No Title"
104 END IF
105
106 cpassert(PRESENT(particles_z) .EQV. PRESENT(particles_r))
107 np = 0
108 IF (PRESENT(particles_z)) THEN
109 cpassert(SIZE(particles_z) == SIZE(particles_r, dim=2))
110 ! cube files can only be written for 99999 particles due to a format limitation (I5)
111 ! so we limit the number of particles written.
112 np = min(99999, SIZE(particles_z))
113 END IF
114
115 WRITE (unit_nr, '(I5,3f12.6)') np, 0.0_dp, 0._dp, 0._dp !start of cube
116
117 WRITE (unit_nr, '(I5,3f12.6)') (pw%pw_grid%npts(1) + my_stride(1) - 1)/my_stride(1), &
118 pw%pw_grid%dh(1, 1)*real(my_stride(1), dp), pw%pw_grid%dh(2, 1)*real(my_stride(1), dp), &
119 pw%pw_grid%dh(3, 1)*real(my_stride(1), dp)
120 WRITE (unit_nr, '(I5,3f12.6)') (pw%pw_grid%npts(2) + my_stride(2) - 1)/my_stride(2), &
121 pw%pw_grid%dh(1, 2)*real(my_stride(2), dp), pw%pw_grid%dh(2, 2)*real(my_stride(2), dp), &
122 pw%pw_grid%dh(3, 2)*real(my_stride(2), dp)
123 WRITE (unit_nr, '(I5,3f12.6)') (pw%pw_grid%npts(3) + my_stride(3) - 1)/my_stride(3), &
124 pw%pw_grid%dh(1, 3)*real(my_stride(3), dp), pw%pw_grid%dh(2, 3)*real(my_stride(3), dp), &
125 pw%pw_grid%dh(3, 3)*real(my_stride(3), dp)
126
127 IF (PRESENT(particles_z)) THEN
128 DO iat = 1, np
129 WRITE (unit_nr, '(I5,4f12.6)') particles_z(iat), 0._dp, particles_r(:, iat)
130 END DO
131 END IF
132 END IF
133
134 ! shortcut
135 l1 = pw%pw_grid%bounds(1, 1)
136 l2 = pw%pw_grid%bounds(1, 2)
137 l3 = pw%pw_grid%bounds(1, 3)
138 u1 = pw%pw_grid%bounds(2, 1)
139 u2 = pw%pw_grid%bounds(2, 2)
140 u3 = pw%pw_grid%bounds(2, 3)
141
142 ALLOCATE (buf(l3:u3))
143
144 my_rank = pw%pw_grid%para%my_pos
145 gid = pw%pw_grid%para%group
146 num_pe = pw%pw_grid%para%group_size
147 tag = 1
148
149 rank(1) = unit_nr
150 rank(2) = my_rank
151 checksum = 0
152 IF (unit_nr > 0) checksum = 1
153
154 CALL gid%sum(checksum)
155 cpassert(checksum == 1)
156
157 CALL gid%maxloc(rank)
158 cpassert(rank(1) > 0)
159
160 dest = rank(2)
161 DO i1 = l1, u1, my_stride(1)
162 DO i2 = l2, u2, my_stride(2)
163
164 ! cycling through the CPUs, check if the current ray (I1,I2) is local to that CPU
165 IF (pw%pw_grid%para%mode .NE. pw_mode_local) THEN
166 DO ip = 0, num_pe - 1
167 IF (pw%pw_grid%para%bo(1, 1, ip, 1) <= i1 - l1 + 1 .AND. pw%pw_grid%para%bo(2, 1, ip, 1) >= i1 - l1 + 1 .AND. &
168 pw%pw_grid%para%bo(1, 2, ip, 1) <= i2 - l2 + 1 .AND. pw%pw_grid%para%bo(2, 2, ip, 1) >= i2 - l2 + 1) THEN
169 source = ip
170 END IF
171 END DO
172 ELSE
173 source = dest
174 END IF
175
176 IF (source == dest) THEN
177 IF (my_rank == source) THEN
178 buf(:) = pw%array(i1, i2, :)
179 END IF
180 ELSE
181 IF (my_rank == source) THEN
182 buf(:) = pw%array(i1, i2, :)
183 CALL gid%send(buf, dest, tag)
184 END IF
185 IF (my_rank == dest) THEN
186 CALL gid%recv(buf, source, tag)
187 END IF
188 END IF
189
190 IF (unit_nr > 0) THEN
191 IF (my_zero_tails) THEN
192 DO i3 = l3, u3
193 IF (buf(i3) < 1.e-7_dp) buf(i3) = 0.0_dp
194 END DO
195 END IF
196 WRITE (unit_nr, '(6E13.5)') (buf(i3), i3=l3, u3, my_stride(3))
197 END IF
198
199 ! this double loop generates so many messages that it can overload
200 ! the message passing system, e.g. on XT3
201 ! we therefore put a barrier here that limits the amount of message
202 ! that flies around at any given time.
203 ! if ever this routine becomes a bottleneck, we should go for a
204 ! more complicated rewrite
205 CALL gid%sync()
206
207 END DO
208 END DO
209
210 DEALLOCATE (buf)
211 ELSE
212 size_of_z = ceiling(real(pw%pw_grid%bounds(2, 3) - pw%pw_grid%bounds(1, 3) + 1, dp)/real(my_stride(3), dp))
213 num_linebreak = size_of_z/num_entries_line
214 IF (modulo(size_of_z, num_entries_line) /= 0) &
215 num_linebreak = num_linebreak + 1
216 msglen = (size_of_z*entry_len + num_linebreak)*mpi_character_size
217 CALL mp_unit%set_handle(unit_nr)
218 CALL pw_to_cube_parallel(pw, mp_unit, title, particles_r, particles_z, my_stride, my_zero_tails, msglen)
219 END IF
220
221 CALL timestop(handle)
222
223 END SUBROUTINE pw_to_cube
224
225! **************************************************************************************************
226!> \brief Computes the external density on the grid
227!> hacked from external_read_density
228!> \param grid pw to read from cube file
229!> \param filename name of cube file
230!> \param scaling scale values before storing
231!> \param parallel_read ...
232!> \param silent ...
233!> \par History
234!> Created [M.Watkins] (01.2014)
235!> Use blocking, collective MPI read for parallel simulations [Nico Holmberg] (05.2017)
236! **************************************************************************************************
237 SUBROUTINE cube_to_pw(grid, filename, scaling, parallel_read, silent)
238
239 TYPE(pw_r3d_rs_type), INTENT(IN) :: grid
240 CHARACTER(len=*), INTENT(in) :: filename
241 REAL(kind=dp), INTENT(in) :: scaling
242 LOGICAL, INTENT(in) :: parallel_read
243 LOGICAL, INTENT(in), OPTIONAL :: silent
244
245 CHARACTER(len=*), PARAMETER :: routinen = 'cube_to_pw'
246 INTEGER, PARAMETER :: entry_len = 13, num_entries_line = 6
247
248 INTEGER :: extunit, handle, i, j, k, msglen, &
249 my_rank, nat, ndum, num_linebreak, &
250 num_pe, output_unit, size_of_z, tag
251 INTEGER, DIMENSION(3) :: lbounds, lbounds_local, npoints, &
252 npoints_local, ubounds, ubounds_local
253 LOGICAL :: be_silent
254 REAL(kind=dp), ALLOCATABLE, DIMENSION(:) :: buffer
255 REAL(kind=dp), DIMENSION(3) :: dr, rdum
256 TYPE(mp_comm_type) :: gid
257
258 output_unit = cp_logger_get_default_io_unit()
259
260 CALL timeset(routinen, handle)
261
262 be_silent = .false.
263 IF (PRESENT(silent)) THEN
264 be_silent = silent
265 END IF
266 !get rs grids and parallel environment
267 gid = grid%pw_grid%para%group
268 my_rank = grid%pw_grid%para%my_pos
269 num_pe = grid%pw_grid%para%group_size
270 tag = 1
271
272 lbounds_local = grid%pw_grid%bounds_local(1, :)
273 ubounds_local = grid%pw_grid%bounds_local(2, :)
274 size_of_z = ubounds_local(3) - lbounds_local(3) + 1
275
276 IF (.NOT. parallel_read) THEN
277 npoints = grid%pw_grid%npts
278 lbounds = grid%pw_grid%bounds(1, :)
279 ubounds = grid%pw_grid%bounds(2, :)
280
281 DO i = 1, 3
282 dr(i) = grid%pw_grid%dh(i, i)
283 END DO
284
285 npoints_local = grid%pw_grid%npts_local
286 !pw grids at most pencils - all processors have a full set of z data for x,y
287 ALLOCATE (buffer(lbounds(3):ubounds(3)))
288
289 IF (my_rank == 0) THEN
290 IF (output_unit > 0 .AND. .NOT. be_silent) THEN
291 WRITE (output_unit, fmt="(/,T2,A,/,/,T2,A,/)") "Reading the cube file: ", trim(filename)
292 END IF
293
294 CALL open_file(file_name=filename, &
295 file_status="OLD", &
296 file_form="FORMATTED", &
297 file_action="READ", &
298 unit_number=extunit)
299
300 !skip header comments
301 DO i = 1, 2
302 READ (extunit, *)
303 END DO
304 READ (extunit, *) nat, rdum
305 DO i = 1, 3
306 READ (extunit, *) ndum, rdum
307 IF ((ndum /= npoints(i) .OR. (abs(rdum(i) - dr(i)) > 1e-4)) .AND. &
308 output_unit > 0) THEN
309 WRITE (output_unit, *) "Restart from density | ERROR! | CUBE FILE NOT COINCIDENT WITH INTERNAL GRID ", i
310 WRITE (output_unit, *) "Restart from density | ", ndum, " DIFFERS FROM ", npoints(i)
311 WRITE (output_unit, *) "Restart from density | ", rdum, " DIFFERS FROM ", dr(i)
312 END IF
313 END DO
314 !ignore atomic position data - read from coord or topology instead
315 DO i = 1, nat
316 READ (extunit, *)
317 END DO
318 END IF
319
320 !master sends all data to everyone
321 DO i = lbounds(1), ubounds(1)
322 DO j = lbounds(2), ubounds(2)
323 IF (my_rank .EQ. 0) THEN
324 READ (extunit, *) (buffer(k), k=lbounds(3), ubounds(3))
325 END IF
326 CALL gid%bcast(buffer(lbounds(3):ubounds(3)), 0)
327
328 !only use data that is local to me - i.e. in slice of pencil I own
329 IF ((lbounds_local(1) .LE. i) .AND. (i .LE. ubounds_local(1)) .AND. (lbounds_local(2) .LE. j) &
330 .AND. (j .LE. ubounds_local(2))) THEN
331 !allow scaling of external potential values by factor 'scaling' (SCALING_FACTOR in input file)
332 grid%array(i, j, lbounds(3):ubounds(3)) = buffer(lbounds(3):ubounds(3))*scaling
333 END IF
334
335 END DO
336 END DO
337
338 IF (my_rank == 0) CALL close_file(unit_number=extunit)
339
340 CALL gid%sync()
341 ELSE
342 ! Parallel routine needs as input the byte size of each grid z-slice
343 ! This is a hack to prevent compilation errors with gcc -Wall (up to versions 6.3)
344 ! related to allocatable-length string declaration CHARACTER(LEN=:), ALLOCATABLE, DIMENSION(:) :: string
345 ! Each data line of a Gaussian cube contains max 6 entries with last line potentially containing less if nz % 6 /= 0
346 ! Thus, this size is simply the number of entries multiplied by the entry size + the number of line breaks
347 num_linebreak = size_of_z/num_entries_line
348 IF (modulo(size_of_z, num_entries_line) /= 0) &
349 num_linebreak = num_linebreak + 1
350 msglen = (size_of_z*entry_len + num_linebreak)*mpi_character_size
351 CALL cube_to_pw_parallel(grid, filename, scaling, msglen, silent=silent)
352 END IF
353
354 CALL timestop(handle)
355
356 END SUBROUTINE cube_to_pw
357
358! **************************************************************************************************
359!> \brief Reads a realspace potential/density from a cube file using collective MPI I/O and
360!> stores it in grid.
361!> \param grid pw to read from cube file
362!> \param filename name of cube file
363!> \param scaling scale values before storing
364!> \param msglen the size of each grid slice along z-axis in bytes
365!> \param silent ...
366!> \par History
367!> Created [Nico Holmberg] (05.2017)
368! **************************************************************************************************
369 SUBROUTINE cube_to_pw_parallel(grid, filename, scaling, msglen, silent)
370
371 TYPE(pw_r3d_rs_type), INTENT(IN) :: grid
372 CHARACTER(len=*), INTENT(in) :: filename
373 REAL(kind=dp), INTENT(in) :: scaling
374 INTEGER, INTENT(in) :: msglen
375 LOGICAL, INTENT(in), OPTIONAL :: silent
376
377 INTEGER, PARAMETER :: entry_len = 13, num_entries_line = 6
378
379 INTEGER, DIMENSION(3) :: lbounds, lbounds_local, npoints, &
380 npoints_local, ubounds, ubounds_local
381 INTEGER, ALLOCATABLE, DIMENSION(:), TARGET :: blocklengths
382 INTEGER(kind=file_offset), ALLOCATABLE, &
383 DIMENSION(:), TARGET :: displacements
384 INTEGER(kind=file_offset) :: bof
385 INTEGER :: extunit_handle, i, ientry, islice, j, k, my_rank, nat, ndum, nslices, num_pe, &
386 offset_global, output_unit, pos, readstat, size_of_z, tag
387 CHARACTER(LEN=msglen), ALLOCATABLE, DIMENSION(:) :: readbuffer
388 CHARACTER(LEN=msglen) :: tmp
389 LOGICAL :: be_silent, should_read(2)
390 REAL(kind=dp), ALLOCATABLE, DIMENSION(:) :: buffer
391 REAL(kind=dp), DIMENSION(3) :: dr, rdum
392 TYPE(mp_comm_type) :: gid
393 TYPE(mp_file_descriptor_type) :: mp_file_desc
394 TYPE(mp_file_type) :: extunit
395
396 output_unit = cp_logger_get_default_io_unit()
397
398 be_silent = .false.
399 IF (PRESENT(silent)) THEN
400 be_silent = silent
401 END IF
402
403 !get rs grids and parallel envnment
404 gid = grid%pw_grid%para%group
405 my_rank = grid%pw_grid%para%my_pos
406 num_pe = grid%pw_grid%para%group_size
407 tag = 1
408
409 DO i = 1, 3
410 dr(i) = grid%pw_grid%dh(i, i)
411 END DO
412
413 npoints = grid%pw_grid%npts
414 lbounds = grid%pw_grid%bounds(1, :)
415 ubounds = grid%pw_grid%bounds(2, :)
416
417 npoints_local = grid%pw_grid%npts_local
418 lbounds_local = grid%pw_grid%bounds_local(1, :)
419 ubounds_local = grid%pw_grid%bounds_local(2, :)
420 size_of_z = ubounds_local(3) - lbounds_local(3) + 1
421 nslices = (ubounds_local(1) - lbounds_local(1) + 1)*(ubounds_local(2) - lbounds_local(2) + 1)
422 islice = 1
423
424 ! Read header information and determine byte offset of cube data on master process
425 IF (my_rank == 0) THEN
426 IF (output_unit > 0 .AND. .NOT. be_silent) THEN
427 WRITE (output_unit, fmt="(/,T2,A,/,/,T2,A,/)") "Reading the cube file: ", trim(filename)
428 END IF
429
430 CALL open_file(file_name=filename, &
431 file_status="OLD", &
432 file_form="FORMATTED", &
433 file_action="READ", &
434 file_access="STREAM", &
435 unit_number=extunit_handle)
436
437 !skip header comments
438 DO i = 1, 2
439 READ (extunit_handle, *)
440 END DO
441 READ (extunit_handle, *) nat, rdum
442 DO i = 1, 3
443 READ (extunit_handle, *) ndum, rdum
444 IF ((ndum /= npoints(i) .OR. (abs(rdum(i) - dr(i)) > 1e-4)) .AND. &
445 output_unit > 0) THEN
446 WRITE (output_unit, *) "Restart from density | ERROR! | CUBE FILE NOT COINCIDENT WITH INTERNAL GRID ", i
447 WRITE (output_unit, *) "Restart from density | ", ndum, " DIFFERS FROM ", npoints(i)
448 WRITE (output_unit, *) "Restart from density | ", rdum, " DIFFERS FROM ", dr(i)
449 END IF
450 END DO
451 !ignore atomic position data - read from coord or topology instead
452 DO i = 1, nat
453 READ (extunit_handle, *)
454 END DO
455 ! Get byte offset
456 INQUIRE (extunit_handle, pos=offset_global)
457 CALL close_file(unit_number=extunit_handle)
458 END IF
459 ! Sync offset and start parallel read
460 CALL gid%bcast(offset_global, grid%pw_grid%para%group_head_id)
461 bof = offset_global
462 CALL extunit%open(groupid=gid, filepath=filename, amode_status=file_amode_rdonly)
463 ! Determine byte offsets for each grid z-slice which are local to a process
464 ALLOCATE (displacements(nslices))
465 displacements = 0
466 DO i = lbounds(1), ubounds(1)
467 should_read(:) = .true.
468 IF (i .LT. lbounds_local(1)) THEN
469 should_read(1) = .false.
470 ELSE IF (i .GT. ubounds_local(1)) THEN
471 EXIT
472 END IF
473 DO j = lbounds(2), ubounds(2)
474 should_read(2) = .true.
475 IF (j .LT. lbounds_local(2) .OR. j .GT. ubounds_local(2)) THEN
476 should_read(2) = .false.
477 END IF
478 IF (all(should_read .EQV. .true.)) THEN
479 IF (islice > nslices) cpabort("Index out of bounds.")
480 displacements(islice) = bof
481 islice = islice + 1
482 END IF
483 ! Update global byte offset
484 bof = bof + msglen
485 END DO
486 END DO
487 ! Size of each z-slice is msglen
488 ALLOCATE (blocklengths(nslices))
489 blocklengths(:) = msglen
490 ! Create indexed MPI type using calculated byte offsets as displacements and use it as a file view
491 mp_file_desc = mp_file_type_hindexed_make_chv(nslices, blocklengths, displacements)
492 bof = 0
493 CALL mp_file_type_set_view_chv(extunit, bof, mp_file_desc)
494 ! Collective read of cube
495 ALLOCATE (readbuffer(nslices))
496 readbuffer(:) = ''
497 CALL extunit%read_all(msglen, nslices, readbuffer, mp_file_desc)
498 CALL mp_file_type_free(mp_file_desc)
499 CALL extunit%close()
500 ! Convert cube values string -> real
501 i = lbounds_local(1)
502 j = lbounds_local(2)
503 ALLOCATE (buffer(lbounds(3):ubounds(3)))
504 buffer = 0.0_dp
505 ! Test if the compiler supports parsing lines with line breaks in them
506 IF (parse_test) THEN
507 READ (readbuffer(1), *, iostat=readstat) (buffer(k), k=lbounds(3), ubounds(3))
508 IF (readstat == 0) THEN
509 parses_linebreaks = .true.
510 ELSE
511 parses_linebreaks = .false.
512 END IF
513 parse_test = .false.
514 buffer = 0.0_dp
515 END IF
516 DO islice = 1, nslices
517 IF (parses_linebreaks) THEN
518 ! Use list-directed conversion if supported
519 ! Produces faster code, but maybe the latter method could be optimized
520 READ (readbuffer(islice), *) (buffer(k), k=lbounds(3), ubounds(3))
521 ELSE
522 ! Convert values by going through the z-slice one value at a time
523 tmp = readbuffer(islice)
524 pos = 1
525 ientry = 1
526 DO k = lbounds_local(3), ubounds_local(3)
527 IF (modulo(ientry, num_entries_line) == 0 .OR. k == ubounds_local(3)) THEN
528 ! Last value on line, dont read line break
529 READ (tmp(pos:pos + (entry_len - 2)), '(E12.5)') buffer(k)
530 pos = pos + (entry_len + 1)
531 ELSE
532 READ (tmp(pos:pos + (entry_len - 1)), '(E13.5)') buffer(k)
533 pos = pos + entry_len
534 END IF
535 ientry = ientry + 1
536 END DO
537 END IF
538 ! Optionally scale cube file values
539 grid%array(i, j, lbounds(3):ubounds(3)) = scaling*buffer(lbounds(3):ubounds(3))
540 j = j + 1
541 IF (j > ubounds_local(2)) THEN
542 j = lbounds_local(2)
543 i = i + 1
544 END IF
545 END DO
546 DEALLOCATE (readbuffer)
547 DEALLOCATE (blocklengths, displacements)
548 IF (debug_this_module) THEN
549 ! Check that cube was correctly read using intrinsic read on master who sends data to everyone
550 buffer = 0.0_dp
551 IF (my_rank == 0) THEN
552 IF (output_unit > 0 .AND. .NOT. be_silent) THEN
553 WRITE (output_unit, fmt="(/,T2,A,/,/,T2,A)") "Reading the cube file: ", filename
554 END IF
555
556 CALL open_file(file_name=filename, &
557 file_status="OLD", &
558 file_form="FORMATTED", &
559 file_action="READ", &
560 unit_number=extunit_handle)
561
562 !skip header comments
563 DO i = 1, 2
564 READ (extunit_handle, *)
565 END DO
566 READ (extunit_handle, *) nat, rdum
567 DO i = 1, 3
568 READ (extunit_handle, *) ndum, rdum
569 IF ((ndum /= npoints(i) .OR. (abs(rdum(i) - dr(i)) > 1e-4)) .AND. &
570 output_unit > 0) THEN
571 WRITE (output_unit, *) "Restart from density | ERROR! | CUBE FILE NOT COINCIDENT WITH INTERNAL GRID ", i
572 WRITE (output_unit, *) "Restart from density | ", ndum, " DIFFERS FROM ", npoints(i)
573 WRITE (output_unit, *) "Restart from density | ", rdum, " DIFFERS FROM ", dr(i)
574 END IF
575 END DO
576 !ignore atomic position data - read from coord or topology instead
577 DO i = 1, nat
578 READ (extunit_handle, *)
579 END DO
580 END IF
581
582 !master sends all data to everyone
583 DO i = lbounds(1), ubounds(1)
584 DO j = lbounds(2), ubounds(2)
585 IF (my_rank .EQ. 0) THEN
586 READ (extunit_handle, *) (buffer(k), k=lbounds(3), ubounds(3))
587 END IF
588 CALL gid%bcast(buffer(lbounds(3):ubounds(3)), 0)
589
590 !only use data that is local to me - i.e. in slice of pencil I own
591 IF ((lbounds_local(1) .LE. i) .AND. (i .LE. ubounds_local(1)) .AND. (lbounds_local(2) .LE. j) &
592 .AND. (j .LE. ubounds_local(2))) THEN
593 !allow scaling of external potential values by factor 'scaling' (SCALING_FACTOR in input file)
594 IF (any(grid%array(i, j, lbounds(3):ubounds(3)) /= buffer(lbounds(3):ubounds(3))*scaling)) &
595 CALL cp_abort(__location__, &
596 "Error in parallel read of input cube file.")
597 END IF
598
599 END DO
600 END DO
601
602 IF (my_rank == 0) CALL close_file(unit_number=extunit_handle)
603
604 CALL gid%sync()
605 END IF
606 DEALLOCATE (buffer)
607
608 END SUBROUTINE cube_to_pw_parallel
609
610! **************************************************************************************************
611!> \brief Writes a realspace potential to a cube file using collective MPI I/O.
612!> \param grid the pw to output to the cube file
613!> \param unit_nr the handle associated with the cube file
614!> \param title title of the cube file
615!> \param particles_r Cartersian coordinates of the system
616!> \param particles_z atomic masses of atoms in the system
617!> \param stride every stride(i)th value of the potential is outputted (i=x,y,z)
618!> \param zero_tails flag that determines if small values of the potential should be zeroed
619!> \param msglen the size of each grid slice along z-axis in bytes
620!> \par History
621!> Created [Nico Holmberg] (11.2017)
622! **************************************************************************************************
623 SUBROUTINE pw_to_cube_parallel(grid, unit_nr, title, particles_r, particles_z, stride, zero_tails, &
624 msglen)
625
626 TYPE(pw_r3d_rs_type), INTENT(IN) :: grid
627 TYPE(mp_file_type), INTENT(IN) :: unit_nr
628 CHARACTER(*), INTENT(IN), OPTIONAL :: title
629 REAL(kind=dp), DIMENSION(:, :), INTENT(IN), &
630 OPTIONAL :: particles_r
631 INTEGER, DIMENSION(:), INTENT(IN), OPTIONAL :: particles_z
632 INTEGER, INTENT(IN) :: stride(3)
633 LOGICAL, INTENT(IN) :: zero_tails
634 INTEGER, INTENT(IN) :: msglen
635
636 INTEGER, PARAMETER :: entry_len = 13, header_len = 41, &
637 header_len_z = 53, num_entries_line = 6
638
639 CHARACTER(LEN=entry_len) :: value
640 CHARACTER(LEN=header_len) :: header
641 CHARACTER(LEN=header_len_z) :: header_z
642 INTEGER, DIMENSION(3) :: lbounds, lbounds_local, ubounds, &
643 ubounds_local
644 INTEGER, ALLOCATABLE, DIMENSION(:), TARGET :: blocklengths
645 INTEGER(kind=file_offset), ALLOCATABLE, &
646 DIMENSION(:), TARGET :: displacements
647 INTEGER(kind=file_offset) :: bof
648 INTEGER :: counter, i, islice, j, k, last_z, &
649 my_rank, np, nslices, size_of_z
650 CHARACTER(LEN=msglen), ALLOCATABLE, DIMENSION(:) :: writebuffer
651 CHARACTER(LEN=msglen) :: tmp
652 LOGICAL :: should_write(2)
653 TYPE(mp_comm_type) :: gid
654 TYPE(mp_file_descriptor_type) :: mp_desc
655
656 !get rs grids and parallel envnment
657 gid = grid%pw_grid%para%group
658 my_rank = grid%pw_grid%para%my_pos
659
660 ! Shortcut
661 lbounds = grid%pw_grid%bounds(1, :)
662 ubounds = grid%pw_grid%bounds(2, :)
663 lbounds_local = grid%pw_grid%bounds_local(1, :)
664 ubounds_local = grid%pw_grid%bounds_local(2, :)
665 ! Determine the total number of z-slices and the number of values per slice
666 size_of_z = ceiling(real(ubounds_local(3) - lbounds_local(3) + 1, dp)/real(stride(3), dp))
667 islice = 1
668 DO i = lbounds(1), ubounds(1), stride(1)
669 should_write(:) = .true.
670 IF (i .LT. lbounds_local(1)) THEN
671 should_write(1) = .false.
672 ELSE IF (i .GT. ubounds_local(1)) THEN
673 EXIT
674 END IF
675 DO j = lbounds(2), ubounds(2), stride(2)
676 should_write(2) = .true.
677 IF (j .LT. lbounds_local(2) .OR. j .GT. ubounds_local(2)) THEN
678 should_write(2) = .false.
679 END IF
680 IF (all(should_write .EQV. .true.)) THEN
681 islice = islice + 1
682 END IF
683 END DO
684 END DO
685 nslices = islice - 1
686 DO k = lbounds(3), ubounds(3), stride(3)
687 IF (k + stride(3) > ubounds(3)) last_z = k
688 END DO
689 islice = 1
690 ! Determine initial byte offset (0 or EOF if data is appended)
691 CALL unit_nr%get_position(bof)
692 ! Write header information on master process and update byte offset accordingly
693 IF (my_rank == 0) THEN
694 ! this format seems to work for e.g. molekel and gOpenmol
695 ! latest version of VMD can read non orthorhombic cells
696 CALL unit_nr%write_at(bof, "-Quickstep-"//new_line("C"))
697 bof = bof + len("-Quickstep-"//new_line("C"))*mpi_character_size
698 IF (PRESENT(title)) THEN
699 CALL unit_nr%write_at(bof, trim(title)//new_line("C"))
700 bof = bof + len(trim(title)//new_line("C"))*mpi_character_size
701 ELSE
702 CALL unit_nr%write_at(bof, "No Title"//new_line("C"))
703 bof = bof + len("No Title"//new_line("C"))*mpi_character_size
704 END IF
705
706 cpassert(PRESENT(particles_z) .EQV. PRESENT(particles_r))
707 np = 0
708 IF (PRESENT(particles_z)) THEN
709 cpassert(SIZE(particles_z) == SIZE(particles_r, dim=2))
710 ! cube files can only be written for 99999 particles due to a format limitation (I5)
711 ! so we limit the number of particles written.
712 np = min(99999, SIZE(particles_z))
713 END IF
714
715 WRITE (header, '(I5,3f12.6)') np, 0.0_dp, 0._dp, 0._dp !start of cube
716 CALL unit_nr%write_at(bof, header//new_line("C"))
717 bof = bof + len(header//new_line("C"))*mpi_character_size
718
719 WRITE (header, '(I5,3f12.6)') (grid%pw_grid%npts(1) + stride(1) - 1)/stride(1), &
720 grid%pw_grid%dh(1, 1)*real(stride(1), dp), grid%pw_grid%dh(2, 1)*real(stride(1), dp), &
721 grid%pw_grid%dh(3, 1)*real(stride(1), dp)
722 CALL unit_nr%write_at(bof, header//new_line("C"))
723 bof = bof + len(header//new_line("C"))*mpi_character_size
724
725 WRITE (header, '(I5,3f12.6)') (grid%pw_grid%npts(2) + stride(2) - 1)/stride(2), &
726 grid%pw_grid%dh(1, 2)*real(stride(2), dp), grid%pw_grid%dh(2, 2)*real(stride(2), dp), &
727 grid%pw_grid%dh(3, 2)*real(stride(2), dp)
728 CALL unit_nr%write_at(bof, header//new_line("C"))
729 bof = bof + len(header//new_line("C"))*mpi_character_size
730
731 WRITE (header, '(I5,3f12.6)') (grid%pw_grid%npts(3) + stride(3) - 1)/stride(3), &
732 grid%pw_grid%dh(1, 3)*real(stride(3), dp), grid%pw_grid%dh(2, 3)*real(stride(3), dp), &
733 grid%pw_grid%dh(3, 3)*real(stride(3), dp)
734 CALL unit_nr%write_at(bof, header//new_line("C"))
735 bof = bof + len(header//new_line("C"))*mpi_character_size
736
737 IF (PRESENT(particles_z)) THEN
738 DO i = 1, np
739 WRITE (header_z, '(I5,4f12.6)') particles_z(i), 0._dp, particles_r(:, i)
740 CALL unit_nr%write_at(bof, header_z//new_line("C"))
741 bof = bof + len(header_z//new_line("C"))*mpi_character_size
742 END DO
743 END IF
744 END IF
745 ! Sync offset
746 CALL gid%bcast(bof, grid%pw_grid%para%group_head_id)
747 ! Determine byte offsets for each grid z-slice which are local to a process
748 ! and convert z-slices to cube format compatible strings
749 ALLOCATE (displacements(nslices))
750 displacements = 0
751 ALLOCATE (writebuffer(nslices))
752 writebuffer(:) = ''
753 DO i = lbounds(1), ubounds(1), stride(1)
754 should_write(:) = .true.
755 IF (i .LT. lbounds_local(1)) THEN
756 should_write(1) = .false.
757 ELSE IF (i .GT. ubounds_local(1)) THEN
758 EXIT
759 END IF
760 DO j = lbounds(2), ubounds(2), stride(2)
761 should_write(2) = .true.
762 IF (j .LT. lbounds_local(2) .OR. j .GT. ubounds_local(2)) THEN
763 should_write(2) = .false.
764 END IF
765 IF (all(should_write .EQV. .true.)) THEN
766 IF (islice > nslices) cpabort("Index out of bounds.")
767 displacements(islice) = bof
768 tmp = ''
769 counter = 0
770 DO k = lbounds(3), ubounds(3), stride(3)
771 IF (zero_tails .AND. grid%array(i, j, k) < 1.e-7_dp) THEN
772 WRITE (value, '(E13.5)') 0.0_dp
773 ELSE
774 WRITE (value, '(E13.5)') grid%array(i, j, k)
775 END IF
776 tmp = trim(tmp)//trim(value)
777 counter = counter + 1
778 IF (modulo(counter, num_entries_line) == 0 .OR. k == last_z) &
779 tmp = trim(tmp)//new_line('C')
780 END DO
781 writebuffer(islice) = tmp
782 islice = islice + 1
783 END IF
784 ! Update global byte offset
785 bof = bof + msglen
786 END DO
787 END DO
788 ! Create indexed MPI type using calculated byte offsets as displacements
789 ! Size of each z-slice is msglen
790 ALLOCATE (blocklengths(nslices))
791 blocklengths(:) = msglen
792 mp_desc = mp_file_type_hindexed_make_chv(nslices, blocklengths, displacements)
793 ! Use the created type as a file view
794 ! NB. The vector 'displacements' contains the absolute offsets of each z-slice i.e.
795 ! they are given relative to the beginning of the file. The global offset to
796 ! set_view must therefore be set to 0
797 bof = 0
798 CALL mp_file_type_set_view_chv(unit_nr, bof, mp_desc)
799 ! Collective write of cube
800 CALL unit_nr%write_all(msglen, nslices, writebuffer, mp_desc)
801 ! Clean up
802 CALL mp_file_type_free(mp_desc)
803 DEALLOCATE (writebuffer)
804 DEALLOCATE (blocklengths, displacements)
805
806 END SUBROUTINE pw_to_cube_parallel
807
808! **************************************************************************************************
809!> \brief Prints a simple grid file: X Y Z value
810!> \param pw ...
811!> \param unit_nr ...
812!> \param stride ...
813!> \param pw2 ...
814!> \par History
815!> Created [Vladimir Rybkin] (08.2018)
816!> \author Vladimir Rybkin
817! **************************************************************************************************
818 SUBROUTINE pw_to_simple_volumetric(pw, unit_nr, stride, pw2)
819 TYPE(pw_r3d_rs_type), INTENT(IN) :: pw
820 INTEGER, INTENT(IN) :: unit_nr
821 INTEGER, DIMENSION(:), INTENT(IN), OPTIONAL :: stride
822 TYPE(pw_r3d_rs_type), INTENT(IN), OPTIONAL :: pw2
823
824 CHARACTER(len=*), PARAMETER :: routinen = 'pw_to_simple_volumetric'
825
826 INTEGER :: checksum, dest, handle, i, i1, i2, i3, &
827 ip, l1, l2, l3, my_rank, my_stride(3), &
828 ngrids, npoints, num_pe, rank(2), &
829 source, tag, u1, u2, u3
830 LOGICAL :: double
831 REAL(kind=dp) :: x, y, z
832 REAL(kind=dp), ALLOCATABLE, DIMENSION(:) :: buf, buf2
833 TYPE(mp_comm_type) :: gid
834
835 CALL timeset(routinen, handle)
836
837 ! Check if we write two grids
838 double = .false.
839 IF (PRESENT(pw2)) double = .true.
840
841 my_stride = 1
842 IF (PRESENT(stride)) THEN
843 IF (SIZE(stride) /= 1 .AND. SIZE(stride) /= 3) &
844 CALL cp_abort(__location__, "STRIDE keyword can accept only 1 "// &
845 "(the same for X,Y,Z) or 3 values. Correct your input file.")
846 IF (SIZE(stride) == 1) THEN
847 DO i = 1, 3
848 my_stride(i) = stride(1)
849 END DO
850 ELSE
851 my_stride = stride(1:3)
852 END IF
853 cpassert(my_stride(1) > 0)
854 cpassert(my_stride(2) > 0)
855 cpassert(my_stride(3) > 0)
856 END IF
857
858 ! shortcut
859 l1 = pw%pw_grid%bounds(1, 1)
860 l2 = pw%pw_grid%bounds(1, 2)
861 l3 = pw%pw_grid%bounds(1, 3)
862 u1 = pw%pw_grid%bounds(2, 1)
863 u2 = pw%pw_grid%bounds(2, 2)
864 u3 = pw%pw_grid%bounds(2, 3)
865
866 ! Write the header: number of points and number of spins
867 ngrids = 1
868 IF (double) ngrids = 2
869 npoints = ((pw%pw_grid%npts(1) + my_stride(1) - 1)/my_stride(1))* &
870 ((pw%pw_grid%npts(2) + my_stride(2) - 1)/my_stride(1))* &
871 ((pw%pw_grid%npts(3) + my_stride(3) - 1)/my_stride(1))
872 IF (unit_nr > 1) WRITE (unit_nr, '(I7,I5)') npoints, ngrids
873
874 ALLOCATE (buf(l3:u3))
875 IF (double) ALLOCATE (buf2(l3:u3))
876
877 my_rank = pw%pw_grid%para%my_pos
878 gid = pw%pw_grid%para%group
879 num_pe = pw%pw_grid%para%group_size
880 tag = 1
881
882 rank(1) = unit_nr
883 rank(2) = my_rank
884 checksum = 0
885 IF (unit_nr > 0) checksum = 1
886
887 CALL gid%sum(checksum)
888 cpassert(checksum == 1)
889
890 CALL gid%maxloc(rank)
891 cpassert(rank(1) > 0)
892
893 dest = rank(2)
894 DO i1 = l1, u1, my_stride(1)
895 DO i2 = l2, u2, my_stride(2)
896
897 ! cycling through the CPUs, check if the current ray (I1,I2) is local to that CPU
898 IF (pw%pw_grid%para%mode .NE. pw_mode_local) THEN
899 DO ip = 0, num_pe - 1
900 IF (pw%pw_grid%para%bo(1, 1, ip, 1) <= i1 - l1 + 1 .AND. pw%pw_grid%para%bo(2, 1, ip, 1) >= i1 - l1 + 1 .AND. &
901 pw%pw_grid%para%bo(1, 2, ip, 1) <= i2 - l2 + 1 .AND. pw%pw_grid%para%bo(2, 2, ip, 1) >= i2 - l2 + 1) THEN
902 source = ip
903 END IF
904 END DO
905 ELSE
906 source = dest
907 END IF
908
909 IF (source == dest) THEN
910 IF (my_rank == source) THEN
911 buf(:) = pw%array(i1, i2, :)
912 IF (double) buf2(:) = pw2%array(i1, i2, :)
913 END IF
914 ELSE
915 IF (my_rank == source) THEN
916 buf(:) = pw%array(i1, i2, :)
917 CALL gid%send(buf, dest, tag)
918 IF (double) THEN
919 buf2(:) = pw2%array(i1, i2, :)
920 CALL gid%send(buf2, dest, tag)
921 END IF
922 END IF
923 IF (my_rank == dest) THEN
924 CALL gid%recv(buf, source, tag)
925 IF (double) CALL gid%recv(buf2, source, tag)
926 END IF
927 END IF
928
929 IF (.NOT. double) THEN
930 DO i3 = l3, u3, my_stride(3)
931 x = pw%pw_grid%dh(1, 1)*i1 + &
932 pw%pw_grid%dh(2, 1)*i2 + &
933 pw%pw_grid%dh(3, 1)*i3
934
935 y = pw%pw_grid%dh(1, 2)*i1 + &
936 pw%pw_grid%dh(2, 2)*i2 + &
937 pw%pw_grid%dh(3, 2)*i3
938
939 z = pw%pw_grid%dh(1, 3)*i1 + &
940 pw%pw_grid%dh(2, 3)*i2 + &
941 pw%pw_grid%dh(3, 3)*i3
942
943 IF (unit_nr > 0) THEN
944 WRITE (unit_nr, '(6E13.5, 6E13.5, 6E13.5, 6E13.5)') x, y, z, buf(i3)
945 END IF
946 END DO
947
948 ELSE
949
950 DO i3 = l3, u3, my_stride(3)
951 x = pw%pw_grid%dh(1, 1)*i1 + &
952 pw%pw_grid%dh(2, 1)*i2 + &
953 pw%pw_grid%dh(3, 1)*i3
954
955 y = pw%pw_grid%dh(1, 2)*i1 + &
956 pw%pw_grid%dh(2, 2)*i2 + &
957 pw%pw_grid%dh(3, 2)*i3
958
959 z = pw%pw_grid%dh(1, 3)*i1 + &
960 pw%pw_grid%dh(2, 3)*i2 + &
961 pw%pw_grid%dh(3, 3)*i3
962
963 IF (unit_nr > 0) THEN
964 WRITE (unit_nr, '(6E13.5, 6E13.5, 6E13.5, 6E13.5)') x, y, z, buf(i3), buf2(i3)
965 END IF
966 END DO
967
968 END IF ! Double
969
970 ! this double loop generates so many messages that it can overload
971 ! the message passing system, e.g. on XT3
972 ! we therefore put a barrier here that limits the amount of message
973 ! that flies around at any given time.
974 ! if ever this routine becomes a bottleneck, we should go for a
975 ! more complicated rewrite
976 CALL gid%sync()
977
978 END DO
979 END DO
980
981 DEALLOCATE (buf)
982 IF (double) DEALLOCATE (buf2)
983
984 CALL timestop(handle)
985
986 END SUBROUTINE pw_to_simple_volumetric
987
988END MODULE realspace_grid_cube
static GRID_HOST_DEVICE int modulo(int a, int m)
Equivalent of Fortran's MODULO, which always return a positive number. https://gcc....
Utility routines to open and close files. Tracking of preconnections.
Definition cp_files.F:16
subroutine, public open_file(file_name, file_status, file_form, file_action, file_position, file_pad, unit_number, debug, skip_get_unit_number, file_access)
Opens the requested file using a free unit number.
Definition cp_files.F:308
subroutine, public close_file(unit_number, file_status, keep_preconnection)
Close an open file given by its logical unit number. Optionally, keep the file and unit preconnected.
Definition cp_files.F:119
various routines to log and control the output. The idea is that decisions about where to log should ...
integer function, public cp_logger_get_default_io_unit(logger)
returns the unit nr for the ionode (-1 on all other processors) skips as well checks if the procs cal...
Defines the basic variable types.
Definition kinds.F:23
integer, parameter, public dp
Definition kinds.F:34
Interface to the message passing library MPI.
type(mp_file_descriptor_type) function, public mp_file_type_hindexed_make_chv(count, lengths, displs)
Creates an indexed MPI type for arrays of strings using bytes for spacing (hindexed type)
subroutine, public mp_file_type_free(type_descriptor)
Releases the type used for MPI I/O.
integer, parameter, public mpi_character_size
integer, parameter, public file_offset
integer, parameter, public file_amode_rdonly
subroutine, public mp_file_type_set_view_chv(fh, offset, type_descriptor)
Uses a previously created indexed MPI character type to tell the MPI processes how to partition (set_...
integer, parameter, public pw_mode_local
Generate Gaussian cube files.
subroutine, public cube_to_pw(grid, filename, scaling, parallel_read, silent)
Computes the external density on the grid hacked from external_read_density.
subroutine, public pw_to_simple_volumetric(pw, unit_nr, stride, pw2)
Prints a simple grid file: X Y Z value.
subroutine, public pw_to_cube(pw, unit_nr, title, particles_r, particles_z, stride, zero_tails, silent, mpi_io)
...
void writebuffer(int *psockfd, char *data, int *plen)
Writes to a socket.
Definition sockets.c:201
void readbuffer(int *psockfd, char *data, int *plen)
Reads from a socket.
Definition sockets.c:219