(git:ccc2433)
rpa_communication.F
Go to the documentation of this file.
1 !--------------------------------------------------------------------------------------------------!
2 ! CP2K: A general program to perform molecular dynamics simulations !
3 ! Copyright 2000-2024 CP2K developers group <https://cp2k.org> !
4 ! !
5 ! SPDX-License-Identifier: GPL-2.0-or-later !
6 !--------------------------------------------------------------------------------------------------!
7 
8 ! **************************************************************************************************
9 !> \brief Auxiliary routines necessary to redistribute an fm_matrix from a
10 !> given blacs_env to another
11 !> \par History
12 !> 12.2012 created [Mauro Del Ben]
13 ! **************************************************************************************************
17  cp_blacs_env_type
23  cp_fm_struct_type
24  USE cp_fm_types, ONLY: cp_fm_create,&
28  cp_fm_release,&
30  cp_fm_type
31  USE dbcsr_api, ONLY: dbcsr_p_type,&
32  dbcsr_type,&
33  dbcsr_type_no_symmetry
34  USE group_dist_types, ONLY: create_group_dist,&
35  get_group_dist,&
36  group_dist_d1_type,&
37  release_group_dist
38  USE kinds, ONLY: dp
39  USE message_passing, ONLY: mp_para_env_type,&
41  mp_request_type,&
42  mp_waitall
43  USE mp2_ri_grad_util, ONLY: fm2array,&
45  USE mp2_types, ONLY: integ_mat_buffer_type
46  USE util, ONLY: get_limit
47 #include "./base/base_uses.f90"
48 
49  IMPLICIT NONE
50 
51  PRIVATE
52 
53  TYPE index_map
54  INTEGER, DIMENSION(:, :), ALLOCATABLE :: map
55  END TYPE
56 
57  CHARACTER(len=*), PARAMETER, PRIVATE :: moduleN = 'rpa_communication'
58 
59  PUBLIC :: gamma_fm_to_dbcsr, &
61 
62 CONTAINS
63 
64 ! **************************************************************************************************
65 !> \brief Redistribute RPA-AXK Gamma_3 density matrices: from fm to dbcsr
66 !> \param fm_mat_Gamma_3 ... ia*dime_RI sized density matrix (fm type on para_env_RPA)
67 !> \param dbcsr_Gamma_3 ... redistributed Gamma_3 (dbcsr array): dimen_RI of i*a: i*a on subgroup, L distributed in RPA_group
68 !> \param para_env_RPA ...
69 !> \param para_env_sub ...
70 !> \param homo ...
71 !> \param virtual ...
72 !> \param mo_coeff_o ... dbcsr on a subgroup
73 !> \param ngroup ...
74 !> \param my_group_L_start ...
75 !> \param my_group_L_end ...
76 !> \param my_group_L_size ...
77 !> \author Vladimir Rybkin, 07/2016
78 ! **************************************************************************************************
79  SUBROUTINE gamma_fm_to_dbcsr(fm_mat_Gamma_3, dbcsr_Gamma_3, para_env_RPA, para_env_sub, &
80  homo, virtual, mo_coeff_o, ngroup, my_group_L_start, my_group_L_end, &
81  my_group_L_size)
82  TYPE(cp_fm_type), INTENT(INOUT) :: fm_mat_gamma_3
83  TYPE(dbcsr_p_type), DIMENSION(:), POINTER :: dbcsr_gamma_3
84  TYPE(mp_para_env_type), INTENT(IN) :: para_env_rpa
85  TYPE(mp_para_env_type), INTENT(IN), POINTER :: para_env_sub
86  INTEGER, INTENT(IN) :: homo, virtual
87  TYPE(dbcsr_type), POINTER :: mo_coeff_o
88  INTEGER, INTENT(IN) :: ngroup, my_group_l_start, &
89  my_group_l_end, my_group_l_size
90 
91  CHARACTER(LEN=*), PARAMETER :: routinen = 'gamma_fm_to_dbcsr'
92 
93  INTEGER :: dimen_ia, dummy_proc, handle, i_global, i_local, iaia, iib, iii, itmp(2), &
94  j_global, j_local, jjb, jjj, kkb, my_ia_end, my_ia_size, my_ia_start, mypcol, myprow, &
95  ncol_block, ncol_local, npcol, nprow, nrow_block, nrow_local, number_of_rec, &
96  number_of_send, proc_receive, proc_send, proc_shift, rec_counter, rec_iaia_end, &
97  rec_iaia_size, rec_iaia_start, rec_pcol, rec_prow, ref_send_pcol, ref_send_prow, &
98  send_counter, send_pcol, send_prow, size_rec_buffer, size_send_buffer
99  INTEGER, ALLOCATABLE, DIMENSION(:) :: iii_vet, map_rec_size, map_send_size
100  INTEGER, ALLOCATABLE, DIMENSION(:, :) :: grid_2_mepos, grid_ref_2_send_pos, &
101  group_grid_2_mepos, indices_map_my, &
102  mepos_2_grid, mepos_2_grid_group
103  INTEGER, DIMENSION(:), POINTER :: col_indices, row_indices
104  REAL(kind=dp) :: part_ia
105  REAL(kind=dp), ALLOCATABLE, DIMENSION(:, :) :: gamma_2d
106  TYPE(cp_blacs_env_type), POINTER :: blacs_env
107  TYPE(cp_fm_struct_type), POINTER :: fm_struct
108  TYPE(cp_fm_type) :: fm_ia
109  TYPE(group_dist_d1_type) :: gd_ia
110  TYPE(index_map), ALLOCATABLE, DIMENSION(:) :: indices_rec
111  TYPE(integ_mat_buffer_type), ALLOCATABLE, &
112  DIMENSION(:) :: buffer_rec, buffer_send
113  TYPE(mp_request_type), ALLOCATABLE, DIMENSION(:) :: req_send
114 
115  CALL timeset(routinen, handle)
116 
117  dimen_ia = virtual*homo
118 
119  ! Prepare sizes for a 2D array
120  CALL create_group_dist(gd_ia, para_env_sub%num_pe, dimen_ia)
121  CALL get_group_dist(gd_ia, para_env_sub%mepos, my_ia_start, my_ia_end, my_ia_size)
122 
123  ! Make a 2D array intermediate
124 
125  CALL prepare_redistribution(para_env_rpa, para_env_sub, ngroup, &
126  group_grid_2_mepos, mepos_2_grid_group)
127 
128  ! fm_mat_Gamma_3 is released here
129  CALL fm2array(gamma_2d, my_ia_size, my_ia_start, my_ia_end, &
130  my_group_l_size, my_group_l_start, my_group_l_end, &
131  group_grid_2_mepos, mepos_2_grid_group, &
132  para_env_sub%num_pe, ngroup, &
133  fm_mat_gamma_3)
134 
135  ! create sub blacs env
136  NULLIFY (blacs_env)
137  CALL cp_blacs_env_create(blacs_env=blacs_env, para_env=para_env_sub)
138 
139  ! create the fm_ia buffer matrix
140  NULLIFY (fm_struct)
141  CALL cp_fm_struct_create(fm_struct, context=blacs_env, nrow_global=homo, &
142  ncol_global=virtual, para_env=para_env_sub)
143  CALL cp_fm_create(fm_ia, fm_struct, name="fm_ia")
144 
145  ! release structure
146  CALL cp_fm_struct_release(fm_struct)
147  ! release blacs_env
148  CALL cp_blacs_env_release(blacs_env)
149 
150  ! get array information
151  CALL cp_fm_get_info(matrix=fm_ia, &
152  nrow_local=nrow_local, &
153  ncol_local=ncol_local, &
154  row_indices=row_indices, &
155  col_indices=col_indices, &
156  nrow_block=nrow_block, &
157  ncol_block=ncol_block)
158  myprow = fm_ia%matrix_struct%context%mepos(1)
159  mypcol = fm_ia%matrix_struct%context%mepos(2)
160  nprow = fm_ia%matrix_struct%context%num_pe(1)
161  npcol = fm_ia%matrix_struct%context%num_pe(2)
162 
163  ! 0) create array containing the processes position and supporting infos
164  ALLOCATE (grid_2_mepos(0:nprow - 1, 0:npcol - 1))
165  grid_2_mepos = 0
166  ALLOCATE (mepos_2_grid(2, 0:para_env_sub%num_pe - 1))
167  ! fill the info array
168  grid_2_mepos(myprow, mypcol) = para_env_sub%mepos
169  ! sum infos
170  CALL para_env_sub%sum(grid_2_mepos)
171  CALL para_env_sub%allgather([myprow, mypcol], mepos_2_grid)
172 
173  ! loop over local index range and define the sending map
174  ALLOCATE (map_send_size(0:para_env_sub%num_pe - 1))
175  map_send_size = 0
176  dummy_proc = 0
177  DO iaia = my_ia_start, my_ia_end
178  i_global = (iaia - 1)/virtual + 1
179  j_global = mod(iaia - 1, virtual) + 1
180  send_prow = cp_fm_indxg2p(i_global, nrow_block, dummy_proc, &
181  fm_ia%matrix_struct%first_p_pos(1), nprow)
182  send_pcol = cp_fm_indxg2p(j_global, ncol_block, dummy_proc, &
183  fm_ia%matrix_struct%first_p_pos(2), npcol)
184  proc_send = grid_2_mepos(send_prow, send_pcol)
185  map_send_size(proc_send) = map_send_size(proc_send) + 1
186  END DO
187 
188  ! loop over local data of fm_ia and define the receiving map
189  ALLOCATE (map_rec_size(0:para_env_sub%num_pe - 1))
190  map_rec_size = 0
191  part_ia = real(dimen_ia, kind=dp)/real(para_env_sub%num_pe, kind=dp)
192 
193  DO iib = 1, nrow_local
194  i_global = row_indices(iib)
195  DO jjb = 1, ncol_local
196  j_global = col_indices(jjb)
197  iaia = (i_global - 1)*virtual + j_global
198  proc_receive = int(real(iaia - 1, kind=dp)/part_ia)
199  proc_receive = max(0, proc_receive)
200  proc_receive = min(proc_receive, para_env_sub%num_pe - 1)
201  DO
202  itmp = get_limit(dimen_ia, para_env_sub%num_pe, proc_receive)
203  IF (iaia >= itmp(1) .AND. iaia <= itmp(2)) EXIT
204  IF (iaia < itmp(1)) proc_receive = proc_receive - 1
205  IF (iaia > itmp(2)) proc_receive = proc_receive + 1
206  END DO
207  map_rec_size(proc_receive) = map_rec_size(proc_receive) + 1
208  END DO
209  END DO
210 
211  ! allocate the buffer for sending data
212  number_of_send = 0
213  DO proc_shift = 1, para_env_sub%num_pe - 1
214  proc_send = modulo(para_env_sub%mepos + proc_shift, para_env_sub%num_pe)
215  IF (map_send_size(proc_send) > 0) THEN
216  number_of_send = number_of_send + 1
217  END IF
218  END DO
219  ! allocate the structure that will hold the messages to be sent
220  ALLOCATE (buffer_send(number_of_send))
221  ! and the map from the grid of processess to the message position
222  ALLOCATE (grid_ref_2_send_pos(0:nprow - 1, 0:npcol - 1))
223  grid_ref_2_send_pos = 0
224  ! finally allocate each message
225  send_counter = 0
226  DO proc_shift = 1, para_env_sub%num_pe - 1
227  proc_send = modulo(para_env_sub%mepos + proc_shift, para_env_sub%num_pe)
228  size_send_buffer = map_send_size(proc_send)
229  IF (map_send_size(proc_send) > 0) THEN
230  send_counter = send_counter + 1
231  ! allocate the sending buffer (msg)
232  ALLOCATE (buffer_send(send_counter)%msg(size_send_buffer))
233  buffer_send(send_counter)%proc = proc_send
234  ! get the pointer to prow, pcol of the process that has
235  ! to receive this message
236  ref_send_prow = mepos_2_grid(1, proc_send)
237  ref_send_pcol = mepos_2_grid(2, proc_send)
238  ! save the rank of the process that has to receive this message
239  grid_ref_2_send_pos(ref_send_prow, ref_send_pcol) = send_counter
240  END IF
241  END DO
242 
243  ! allocate the buffer for receiving data
244  number_of_rec = 0
245  DO proc_shift = 1, para_env_sub%num_pe - 1
246  proc_receive = modulo(para_env_sub%mepos - proc_shift, para_env_sub%num_pe)
247  IF (map_rec_size(proc_receive) > 0) THEN
248  number_of_rec = number_of_rec + 1
249  END IF
250  END DO
251 
252  ! allocate the structure that will hold the messages to be received
253  ! and relative indeces
254  ALLOCATE (buffer_rec(number_of_rec))
255  ALLOCATE (indices_rec(number_of_rec))
256  ! finally allocate each message and fill the array of indeces
257  rec_counter = 0
258  DO proc_shift = 1, para_env_sub%num_pe - 1
259  proc_receive = modulo(para_env_sub%mepos - proc_shift, para_env_sub%num_pe)
260  size_rec_buffer = map_rec_size(proc_receive)
261  IF (map_rec_size(proc_receive) > 0) THEN
262  rec_counter = rec_counter + 1
263  ! prepare the buffer for receive
264  ALLOCATE (buffer_rec(rec_counter)%msg(size_rec_buffer))
265  buffer_rec(rec_counter)%proc = proc_receive
266  ! create the indices array
267  ALLOCATE (indices_rec(rec_counter)%map(2, size_rec_buffer))
268  indices_rec(rec_counter)%map = 0
269  CALL get_group_dist(gd_ia, proc_receive, rec_iaia_start, rec_iaia_end, rec_iaia_size)
270  iii = 0
271  DO iaia = rec_iaia_start, rec_iaia_end
272  i_global = (iaia - 1)/virtual + 1
273  j_global = mod(iaia - 1, virtual) + 1
274  rec_prow = cp_fm_indxg2p(i_global, nrow_block, dummy_proc, &
275  fm_ia%matrix_struct%first_p_pos(1), nprow)
276  rec_pcol = cp_fm_indxg2p(j_global, ncol_block, dummy_proc, &
277  fm_ia%matrix_struct%first_p_pos(2), npcol)
278  IF (grid_2_mepos(rec_prow, rec_pcol) /= para_env_sub%mepos) cycle
279  iii = iii + 1
280  i_local = cp_fm_indxg2l(i_global, nrow_block, dummy_proc, &
281  fm_ia%matrix_struct%first_p_pos(1), nprow)
282  j_local = cp_fm_indxg2l(j_global, ncol_block, dummy_proc, &
283  fm_ia%matrix_struct%first_p_pos(2), npcol)
284  indices_rec(rec_counter)%map(1, iii) = i_local
285  indices_rec(rec_counter)%map(2, iii) = j_local
286  END DO
287  END IF
288  END DO
289 
290  ! and create the index map for my local data
291  IF (map_rec_size(para_env_sub%mepos) > 0) THEN
292  size_rec_buffer = map_rec_size(para_env_sub%mepos)
293  ALLOCATE (indices_map_my(2, size_rec_buffer))
294  indices_map_my = 0
295  iii = 0
296  DO iaia = my_ia_start, my_ia_end
297  i_global = (iaia - 1)/virtual + 1
298  j_global = mod(iaia - 1, virtual) + 1
299  rec_prow = cp_fm_indxg2p(i_global, nrow_block, dummy_proc, &
300  fm_ia%matrix_struct%first_p_pos(1), nprow)
301  rec_pcol = cp_fm_indxg2p(j_global, ncol_block, dummy_proc, &
302  fm_ia%matrix_struct%first_p_pos(2), npcol)
303  IF (grid_2_mepos(rec_prow, rec_pcol) /= para_env_sub%mepos) cycle
304  iii = iii + 1
305  i_local = cp_fm_indxg2l(i_global, nrow_block, dummy_proc, &
306  fm_ia%matrix_struct%first_p_pos(1), nprow)
307  j_local = cp_fm_indxg2l(j_global, ncol_block, dummy_proc, &
308  fm_ia%matrix_struct%first_p_pos(2), npcol)
309  indices_map_my(1, iii) = i_local
310  indices_map_my(2, iii) = j_local
311  END DO
312  END IF
313 
314  ! Allocate dbcsr_Gamma_3
315  NULLIFY (dbcsr_gamma_3)
316 
317  !CALL dbcsr_allocate_matrix_set(dbcsr_Gamma_3, ncol_local)
318  CALL dbcsr_allocate_matrix_set(dbcsr_gamma_3, my_group_l_size)
319 
320  ! auxiliary vector of indices for the send buffer
321  ALLOCATE (iii_vet(number_of_send))
322  ! vector for the send requests
323  ALLOCATE (req_send(number_of_send))
324  ! loop over auxiliary basis function and redistribute into a fm
325  ! and then compy the fm into a dbcsr matrix
326 
327  !DO kkB = 1, ncol_local
328  DO kkb = 1, my_group_l_size
329  ! zero the matries of the buffers and post the messages to be received
330  CALL cp_fm_set_all(matrix=fm_ia, alpha=0.0_dp)
331  rec_counter = 0
332  DO proc_shift = 1, para_env_sub%num_pe - 1
333  proc_receive = modulo(para_env_sub%mepos - proc_shift, para_env_sub%num_pe)
334  IF (map_rec_size(proc_receive) > 0) THEN
335  rec_counter = rec_counter + 1
336  buffer_rec(rec_counter)%msg = 0.0_dp
337  CALL para_env_sub%irecv(buffer_rec(rec_counter)%msg, proc_receive, &
338  buffer_rec(rec_counter)%msg_req)
339  END IF
340  END DO
341  ! fill the sending buffer and send the messages
342  DO send_counter = 1, number_of_send
343  buffer_send(send_counter)%msg = 0.0_dp
344  END DO
345  iii_vet = 0
346  jjj = 0
347  DO iaia = my_ia_start, my_ia_end
348  i_global = (iaia - 1)/virtual + 1
349  j_global = mod(iaia - 1, virtual) + 1
350  send_prow = cp_fm_indxg2p(i_global, nrow_block, dummy_proc, &
351  fm_ia%matrix_struct%first_p_pos(1), nprow)
352  send_pcol = cp_fm_indxg2p(j_global, ncol_block, dummy_proc, &
353  fm_ia%matrix_struct%first_p_pos(2), npcol)
354  proc_send = grid_2_mepos(send_prow, send_pcol)
355  ! we don't need to send to ourselves
356  IF (grid_2_mepos(send_prow, send_pcol) == para_env_sub%mepos) THEN
357  ! filling fm_ia with local data
358  jjj = jjj + 1
359  i_local = indices_map_my(1, jjj)
360  j_local = indices_map_my(2, jjj)
361  fm_ia%local_data(i_local, j_local) = &
362  gamma_2d(iaia - my_ia_start + 1, kkb)
363 
364  ELSE
365  send_counter = grid_ref_2_send_pos(send_prow, send_pcol)
366  iii_vet(send_counter) = iii_vet(send_counter) + 1
367  iii = iii_vet(send_counter)
368  buffer_send(send_counter)%msg(iii) = &
369  gamma_2d(iaia - my_ia_start + 1, kkb)
370  END IF
371  END DO
372  req_send = mp_request_null
373  send_counter = 0
374  DO proc_shift = 1, para_env_sub%num_pe - 1
375  proc_send = modulo(para_env_sub%mepos + proc_shift, para_env_sub%num_pe)
376  IF (map_send_size(proc_send) > 0) THEN
377  send_counter = send_counter + 1
378  CALL para_env_sub%isend(buffer_send(send_counter)%msg, proc_send, &
379  buffer_send(send_counter)%msg_req)
380  req_send(send_counter) = buffer_send(send_counter)%msg_req
381  END IF
382  END DO
383 
384  ! receive the messages and fill the fm_ia
385  rec_counter = 0
386  DO proc_shift = 1, para_env_sub%num_pe - 1
387  proc_receive = modulo(para_env_sub%mepos - proc_shift, para_env_sub%num_pe)
388  size_rec_buffer = map_rec_size(proc_receive)
389  IF (map_rec_size(proc_receive) > 0) THEN
390  rec_counter = rec_counter + 1
391  ! wait for the message
392  CALL buffer_rec(rec_counter)%msg_req%wait()
393  DO iii = 1, size_rec_buffer
394  i_local = indices_rec(rec_counter)%map(1, iii)
395  j_local = indices_rec(rec_counter)%map(2, iii)
396  fm_ia%local_data(i_local, j_local) = buffer_rec(rec_counter)%msg(iii)
397  END DO
398  END IF
399  END DO
400 
401  ! wait all
402  CALL mp_waitall(req_send(:))
403 
404  ! now create the DBCSR matrix and copy fm_ia into it
405  ALLOCATE (dbcsr_gamma_3(kkb)%matrix)
406  CALL cp_dbcsr_m_by_n_from_template(dbcsr_gamma_3(kkb)%matrix, &
407  template=mo_coeff_o, &
408  m=homo, n=virtual, sym=dbcsr_type_no_symmetry)
409  CALL copy_fm_to_dbcsr(fm_ia, dbcsr_gamma_3(kkb)%matrix, keep_sparsity=.false.)
410 
411  END DO
412 
413  ! Deallocate memory
414 
415  DEALLOCATE (gamma_2d)
416  DEALLOCATE (iii_vet)
417  DEALLOCATE (req_send)
418  IF (map_rec_size(para_env_sub%mepos) > 0) THEN
419  DEALLOCATE (indices_map_my)
420  END IF
421  DO rec_counter = 1, number_of_rec
422  DEALLOCATE (indices_rec(rec_counter)%map)
423  DEALLOCATE (buffer_rec(rec_counter)%msg)
424  END DO
425  DEALLOCATE (indices_rec)
426  DEALLOCATE (buffer_rec)
427  DO send_counter = 1, number_of_send
428  DEALLOCATE (buffer_send(send_counter)%msg)
429  END DO
430  DEALLOCATE (buffer_send)
431  DEALLOCATE (map_send_size)
432  DEALLOCATE (map_rec_size)
433  DEALLOCATE (grid_2_mepos)
434  DEALLOCATE (mepos_2_grid)
435  CALL release_group_dist(gd_ia)
436 
437  ! release buffer matrix
438  CALL cp_fm_release(fm_ia)
439 
440  CALL timestop(handle)
441 
442  END SUBROUTINE gamma_fm_to_dbcsr
443 
444 ! **************************************************************************************************
445 !> \brief ...
446 !> \param para_env ...
447 !> \param num_entries_rec ...
448 !> \param num_entries_send ...
449 !> \param buffer_rec ...
450 !> \param buffer_send ...
451 !> \param req_array ...
452 !> \param do_indx ...
453 !> \param do_msg ...
454 ! **************************************************************************************************
455  SUBROUTINE communicate_buffer(para_env, num_entries_rec, num_entries_send, buffer_rec, buffer_send, &
456  req_array, do_indx, do_msg)
457 
458  TYPE(mp_para_env_type), INTENT(IN) :: para_env
459  INTEGER, ALLOCATABLE, DIMENSION(:), INTENT(IN) :: num_entries_rec, num_entries_send
460  TYPE(integ_mat_buffer_type), ALLOCATABLE, &
461  DIMENSION(:), INTENT(INOUT) :: buffer_rec, buffer_send
462  TYPE(mp_request_type), DIMENSION(:, :), POINTER :: req_array
463  LOGICAL, INTENT(IN), OPTIONAL :: do_indx, do_msg
464 
465  CHARACTER(LEN=*), PARAMETER :: routinen = 'communicate_buffer'
466 
467  INTEGER :: handle, imepos, rec_counter, send_counter
468  LOGICAL :: my_do_indx, my_do_msg
469 
470  CALL timeset(routinen, handle)
471 
472  my_do_indx = .true.
473  IF (PRESENT(do_indx)) my_do_indx = do_indx
474  my_do_msg = .true.
475  IF (PRESENT(do_msg)) my_do_msg = do_msg
476 
477  IF (para_env%num_pe > 1) THEN
478 
479  send_counter = 0
480  rec_counter = 0
481 
482  DO imepos = 0, para_env%num_pe - 1
483  IF (num_entries_rec(imepos) > 0) THEN
484  rec_counter = rec_counter + 1
485  IF (my_do_indx) THEN
486  CALL para_env%irecv(buffer_rec(imepos)%indx, imepos, req_array(rec_counter, 3), tag=4)
487  END IF
488  IF (my_do_msg) THEN
489  CALL para_env%irecv(buffer_rec(imepos)%msg, imepos, req_array(rec_counter, 4), tag=7)
490  END IF
491  END IF
492  END DO
493 
494  DO imepos = 0, para_env%num_pe - 1
495  IF (num_entries_send(imepos) > 0) THEN
496  send_counter = send_counter + 1
497  IF (my_do_indx) THEN
498  CALL para_env%isend(buffer_send(imepos)%indx, imepos, req_array(send_counter, 1), tag=4)
499  END IF
500  IF (my_do_msg) THEN
501  CALL para_env%isend(buffer_send(imepos)%msg, imepos, req_array(send_counter, 2), tag=7)
502  END IF
503  END IF
504  END DO
505 
506  IF (my_do_indx) THEN
507  CALL mp_waitall(req_array(1:send_counter, 1))
508  CALL mp_waitall(req_array(1:rec_counter, 3))
509  END IF
510 
511  IF (my_do_msg) THEN
512  CALL mp_waitall(req_array(1:send_counter, 2))
513  CALL mp_waitall(req_array(1:rec_counter, 4))
514  END IF
515 
516  ELSE
517 
518  buffer_rec(0)%indx(:, :) = buffer_send(0)%indx
519  buffer_rec(0)%msg(:) = buffer_send(0)%msg
520 
521  END IF
522 
523  CALL timestop(handle)
524 
525  END SUBROUTINE communicate_buffer
526 
527 END MODULE rpa_communication
static GRID_HOST_DEVICE int modulo(int a, int m)
Equivalent of Fortran's MODULO, which always return a positive number. https://gcc....
Definition: grid_common.h:117
methods related to the blacs parallel environment
Definition: cp_blacs_env.F:15
subroutine, public cp_blacs_env_release(blacs_env)
releases the given blacs_env
Definition: cp_blacs_env.F:282
subroutine, public cp_blacs_env_create(blacs_env, para_env, blacs_grid_layout, blacs_repeatable, row_major, grid_2d)
allocates and initializes a type that represent a blacs context
Definition: cp_blacs_env.F:123
DBCSR operations in CP2K.
subroutine, public cp_dbcsr_m_by_n_from_template(matrix, template, m, n, sym, data_type)
Utility function to create an arbitrary shaped dbcsr matrix with the same processor grid as the templ...
subroutine, public copy_fm_to_dbcsr(fm, matrix, keep_sparsity)
Copy a BLACS matrix to a dbcsr matrix.
represent the structure of a full matrix
Definition: cp_fm_struct.F:14
subroutine, public cp_fm_struct_create(fmstruct, para_env, context, nrow_global, ncol_global, nrow_block, ncol_block, descriptor, first_p_pos, local_leading_dimension, template_fmstruct, square_blocks, force_block)
allocates and initializes a full matrix structure
Definition: cp_fm_struct.F:125
subroutine, public cp_fm_struct_release(fmstruct)
releases a full matrix structure
Definition: cp_fm_struct.F:320
represent a full matrix distributed on many processors
Definition: cp_fm_types.F:15
integer function, public cp_fm_indxg2l(INDXGLOB, NB, IPROC, ISRCPROC, NPROCS)
wrapper to scalapack function INDXG2L that computes the local index of a distributed matrix entry poi...
Definition: cp_fm_types.F:2525
subroutine, public cp_fm_get_info(matrix, name, nrow_global, ncol_global, nrow_block, ncol_block, nrow_local, ncol_local, row_indices, col_indices, local_data, context, nrow_locals, ncol_locals, matrix_struct, para_env)
returns all kind of information about the full matrix
Definition: cp_fm_types.F:1016
integer function, public cp_fm_indxg2p(INDXGLOB, NB, IPROC, ISRCPROC, NPROCS)
wrapper to scalapack function INDXG2P that computes the process coordinate which possesses the entry ...
Definition: cp_fm_types.F:2466
subroutine, public cp_fm_set_all(matrix, alpha, beta)
set all elements of a matrix to the same value, and optionally the diagonal to a different one
Definition: cp_fm_types.F:535
subroutine, public cp_fm_create(matrix, matrix_struct, name, use_sp)
creates a new full matrix with the given structure
Definition: cp_fm_types.F:167
Types to describe group distributions.
Defines the basic variable types.
Definition: kinds.F:23
integer, parameter, public dp
Definition: kinds.F:34
Interface to the message passing library MPI.
type(mp_request_type), parameter, public mp_request_null
Routines for calculating RI-MP2 gradients.
subroutine, public prepare_redistribution(para_env, para_env_sub, ngroup, group_grid_2_mepos, mepos_2_grid_group, pos_info)
prepare array for redistribution
subroutine, public fm2array(mat2D, my_rows, my_start_row, my_end_row, my_cols, my_start_col, my_end_col, group_grid_2_mepos, mepos_2_grid_group, ngroup_row, ngroup_col, fm_mat)
redistribute fm to local part of array
Types needed for MP2 calculations.
Definition: mp2_types.F:14
Auxiliary routines necessary to redistribute an fm_matrix from a given blacs_env to another.
subroutine, public gamma_fm_to_dbcsr(fm_mat_Gamma_3, dbcsr_Gamma_3, para_env_RPA, para_env_sub, homo, virtual, mo_coeff_o, ngroup, my_group_L_start, my_group_L_end, my_group_L_size)
Redistribute RPA-AXK Gamma_3 density matrices: from fm to dbcsr.
subroutine, public communicate_buffer(para_env, num_entries_rec, num_entries_send, buffer_rec, buffer_send, req_array, do_indx, do_msg)
...
All kind of helpful little routines.
Definition: util.F:14
pure integer function, dimension(2), public get_limit(m, n, me)
divide m entries into n parts, return size of part me
Definition: util.F:333