(git:ed6f26b)
Loading...
Searching...
No Matches
rpa_communication.F
Go to the documentation of this file.
1!--------------------------------------------------------------------------------------------------!
2! CP2K: A general program to perform molecular dynamics simulations !
3! Copyright 2000-2025 CP2K developers group <https://cp2k.org> !
4! !
5! SPDX-License-Identifier: GPL-2.0-or-later !
6!--------------------------------------------------------------------------------------------------!
7
8! **************************************************************************************************
9!> \brief Auxiliary routines necessary to redistribute an fm_matrix from a
10!> given blacs_env to another
11!> \par History
12!> 12.2012 created [Mauro Del Ben]
13! **************************************************************************************************
18 USE cp_dbcsr_api, ONLY: dbcsr_type,&
19 dbcsr_type_no_symmetry
25 USE cp_fm_types, ONLY: cp_fm_create,&
34 USE kinds, ONLY: dp
39 USE mp2_ri_grad_util, ONLY: fm2array,&
42 USE util, ONLY: get_limit
43#include "./base/base_uses.f90"
44
45 IMPLICIT NONE
46
47 PRIVATE
48
49 TYPE index_map
50 INTEGER, DIMENSION(:, :), ALLOCATABLE :: map
51 END TYPE
52
53 CHARACTER(len=*), PARAMETER, PRIVATE :: moduleN = 'rpa_communication'
54
55 PUBLIC :: gamma_fm_to_dbcsr, &
57
58CONTAINS
59
60! **************************************************************************************************
61!> \brief Redistribute RPA-AXK Gamma_3 density matrices: from fm to dbcsr
62!> \param fm_mat_Gamma_3 ... ia*dime_RI sized density matrix (fm type on para_env_RPA)
63!> \param dbcsr_Gamma_3 ... redistributed Gamma_3 (dbcsr array): dimen_RI of i*a: i*a on subgroup, L distributed in RPA_group
64!> \param para_env_RPA ...
65!> \param para_env_sub ...
66!> \param homo ...
67!> \param virtual ...
68!> \param mo_coeff_o ... dbcsr on a subgroup
69!> \param ngroup ...
70!> \param my_group_L_start ...
71!> \param my_group_L_end ...
72!> \param my_group_L_size ...
73!> \author Vladimir Rybkin, 07/2016
74! **************************************************************************************************
75 SUBROUTINE gamma_fm_to_dbcsr(fm_mat_Gamma_3, dbcsr_Gamma_3, para_env_RPA, para_env_sub, &
76 homo, virtual, mo_coeff_o, ngroup, my_group_L_start, my_group_L_end, &
77 my_group_L_size)
78 TYPE(cp_fm_type), INTENT(INOUT) :: fm_mat_gamma_3
79 TYPE(dbcsr_type), ALLOCATABLE, DIMENSION(:) :: dbcsr_gamma_3
80 TYPE(mp_para_env_type), INTENT(IN) :: para_env_rpa
81 TYPE(mp_para_env_type), INTENT(IN), POINTER :: para_env_sub
82 INTEGER, INTENT(IN) :: homo, virtual
83 TYPE(dbcsr_type), INTENT(INOUT) :: mo_coeff_o
84 INTEGER, INTENT(IN) :: ngroup, my_group_l_start, &
85 my_group_l_end, my_group_l_size
86
87 CHARACTER(LEN=*), PARAMETER :: routinen = 'gamma_fm_to_dbcsr'
88
89 INTEGER :: dimen_ia, dummy_proc, handle, i_global, i_local, iaia, iib, iii, itmp(2), &
90 j_global, j_local, jjb, jjj, kkb, my_ia_end, my_ia_size, my_ia_start, mypcol, myprow, &
91 ncol_local, npcol, nprow, nrow_local, number_of_rec, number_of_send, proc_receive, &
92 proc_send, proc_shift, rec_counter, rec_iaia_end, rec_iaia_size, rec_iaia_start, &
93 rec_pcol, rec_prow, ref_send_pcol, ref_send_prow, send_counter, send_pcol, send_prow, &
94 size_rec_buffer, size_send_buffer
95 INTEGER, ALLOCATABLE, DIMENSION(:) :: iii_vet, map_rec_size, map_send_size
96 INTEGER, ALLOCATABLE, DIMENSION(:, :) :: grid_2_mepos, grid_ref_2_send_pos, &
97 group_grid_2_mepos, indices_map_my, &
98 mepos_2_grid, mepos_2_grid_group
99 INTEGER, DIMENSION(:), POINTER :: col_indices, row_indices
100 REAL(kind=dp) :: part_ia
101 REAL(kind=dp), ALLOCATABLE, DIMENSION(:, :) :: gamma_2d
102 TYPE(cp_blacs_env_type), POINTER :: blacs_env
103 TYPE(cp_fm_struct_type), POINTER :: fm_struct
104 TYPE(cp_fm_type) :: fm_ia
105 TYPE(group_dist_d1_type) :: gd_ia
106 TYPE(index_map), ALLOCATABLE, DIMENSION(:) :: indices_rec
107 TYPE(integ_mat_buffer_type), ALLOCATABLE, &
108 DIMENSION(:) :: buffer_rec, buffer_send
109 TYPE(mp_request_type), ALLOCATABLE, DIMENSION(:) :: req_send
110
111 CALL timeset(routinen, handle)
112
113 dimen_ia = virtual*homo
114
115 ! Prepare sizes for a 2D array
116 CALL create_group_dist(gd_ia, para_env_sub%num_pe, dimen_ia)
117 CALL get_group_dist(gd_ia, para_env_sub%mepos, my_ia_start, my_ia_end, my_ia_size)
118
119 ! Make a 2D array intermediate
120
121 CALL prepare_redistribution(para_env_rpa, para_env_sub, ngroup, &
122 group_grid_2_mepos, mepos_2_grid_group)
123
124 ! fm_mat_Gamma_3 is released here
125 CALL fm2array(gamma_2d, my_ia_size, my_ia_start, my_ia_end, &
126 my_group_l_size, my_group_l_start, my_group_l_end, &
127 group_grid_2_mepos, mepos_2_grid_group, &
128 para_env_sub%num_pe, ngroup, &
129 fm_mat_gamma_3)
130
131 ! create sub blacs env
132 NULLIFY (blacs_env)
133 CALL cp_blacs_env_create(blacs_env=blacs_env, para_env=para_env_sub)
134
135 ! create the fm_ia buffer matrix
136 NULLIFY (fm_struct)
137 CALL cp_fm_struct_create(fm_struct, context=blacs_env, nrow_global=homo, &
138 ncol_global=virtual, para_env=para_env_sub)
139 CALL cp_fm_create(fm_ia, fm_struct, name="fm_ia")
140
141 ! release structure
142 CALL cp_fm_struct_release(fm_struct)
143 ! release blacs_env
144 CALL cp_blacs_env_release(blacs_env)
145
146 ! get array information
147 CALL cp_fm_get_info(matrix=fm_ia, &
148 nrow_local=nrow_local, &
149 ncol_local=ncol_local, &
150 row_indices=row_indices, &
151 col_indices=col_indices)
152 myprow = fm_ia%matrix_struct%context%mepos(1)
153 mypcol = fm_ia%matrix_struct%context%mepos(2)
154 nprow = fm_ia%matrix_struct%context%num_pe(1)
155 npcol = fm_ia%matrix_struct%context%num_pe(2)
156
157 ! 0) create array containing the processes position and supporting infos
158 ALLOCATE (grid_2_mepos(0:nprow - 1, 0:npcol - 1))
159 grid_2_mepos = 0
160 ALLOCATE (mepos_2_grid(2, 0:para_env_sub%num_pe - 1))
161 ! fill the info array
162 grid_2_mepos(myprow, mypcol) = para_env_sub%mepos
163 ! sum infos
164 CALL para_env_sub%sum(grid_2_mepos)
165 CALL para_env_sub%allgather([myprow, mypcol], mepos_2_grid)
166
167 ! loop over local index range and define the sending map
168 ALLOCATE (map_send_size(0:para_env_sub%num_pe - 1))
169 map_send_size = 0
170 dummy_proc = 0
171 DO iaia = my_ia_start, my_ia_end
172 i_global = (iaia - 1)/virtual + 1
173 j_global = mod(iaia - 1, virtual) + 1
174 send_prow = fm_ia%matrix_struct%g2p_row(i_global)
175 send_pcol = fm_ia%matrix_struct%g2p_col(j_global)
176 proc_send = grid_2_mepos(send_prow, send_pcol)
177 map_send_size(proc_send) = map_send_size(proc_send) + 1
178 END DO
179
180 ! loop over local data of fm_ia and define the receiving map
181 ALLOCATE (map_rec_size(0:para_env_sub%num_pe - 1))
182 map_rec_size = 0
183 part_ia = real(dimen_ia, kind=dp)/real(para_env_sub%num_pe, kind=dp)
184
185 DO iib = 1, nrow_local
186 i_global = row_indices(iib)
187 DO jjb = 1, ncol_local
188 j_global = col_indices(jjb)
189 iaia = (i_global - 1)*virtual + j_global
190 proc_receive = int(real(iaia - 1, kind=dp)/part_ia)
191 proc_receive = max(0, proc_receive)
192 proc_receive = min(proc_receive, para_env_sub%num_pe - 1)
193 DO
194 itmp = get_limit(dimen_ia, para_env_sub%num_pe, proc_receive)
195 IF (iaia >= itmp(1) .AND. iaia <= itmp(2)) EXIT
196 IF (iaia < itmp(1)) proc_receive = proc_receive - 1
197 IF (iaia > itmp(2)) proc_receive = proc_receive + 1
198 END DO
199 map_rec_size(proc_receive) = map_rec_size(proc_receive) + 1
200 END DO
201 END DO
202
203 ! allocate the buffer for sending data
204 number_of_send = 0
205 DO proc_shift = 1, para_env_sub%num_pe - 1
206 proc_send = modulo(para_env_sub%mepos + proc_shift, para_env_sub%num_pe)
207 IF (map_send_size(proc_send) > 0) THEN
208 number_of_send = number_of_send + 1
209 END IF
210 END DO
211 ! allocate the structure that will hold the messages to be sent
212 ALLOCATE (buffer_send(number_of_send))
213 ! and the map from the grid of processess to the message position
214 ALLOCATE (grid_ref_2_send_pos(0:nprow - 1, 0:npcol - 1))
215 grid_ref_2_send_pos = 0
216 ! finally allocate each message
217 send_counter = 0
218 DO proc_shift = 1, para_env_sub%num_pe - 1
219 proc_send = modulo(para_env_sub%mepos + proc_shift, para_env_sub%num_pe)
220 size_send_buffer = map_send_size(proc_send)
221 IF (map_send_size(proc_send) > 0) THEN
222 send_counter = send_counter + 1
223 ! allocate the sending buffer (msg)
224 ALLOCATE (buffer_send(send_counter)%msg(size_send_buffer))
225 buffer_send(send_counter)%proc = proc_send
226 ! get the pointer to prow, pcol of the process that has
227 ! to receive this message
228 ref_send_prow = mepos_2_grid(1, proc_send)
229 ref_send_pcol = mepos_2_grid(2, proc_send)
230 ! save the rank of the process that has to receive this message
231 grid_ref_2_send_pos(ref_send_prow, ref_send_pcol) = send_counter
232 END IF
233 END DO
234
235 ! allocate the buffer for receiving data
236 number_of_rec = 0
237 DO proc_shift = 1, para_env_sub%num_pe - 1
238 proc_receive = modulo(para_env_sub%mepos - proc_shift, para_env_sub%num_pe)
239 IF (map_rec_size(proc_receive) > 0) THEN
240 number_of_rec = number_of_rec + 1
241 END IF
242 END DO
243
244 ! allocate the structure that will hold the messages to be received
245 ! and relative indeces
246 ALLOCATE (buffer_rec(number_of_rec))
247 ALLOCATE (indices_rec(number_of_rec))
248 ! finally allocate each message and fill the array of indeces
249 rec_counter = 0
250 DO proc_shift = 1, para_env_sub%num_pe - 1
251 proc_receive = modulo(para_env_sub%mepos - proc_shift, para_env_sub%num_pe)
252 size_rec_buffer = map_rec_size(proc_receive)
253 IF (map_rec_size(proc_receive) > 0) THEN
254 rec_counter = rec_counter + 1
255 ! prepare the buffer for receive
256 ALLOCATE (buffer_rec(rec_counter)%msg(size_rec_buffer))
257 buffer_rec(rec_counter)%proc = proc_receive
258 ! create the indices array
259 ALLOCATE (indices_rec(rec_counter)%map(2, size_rec_buffer))
260 indices_rec(rec_counter)%map = 0
261 CALL get_group_dist(gd_ia, proc_receive, rec_iaia_start, rec_iaia_end, rec_iaia_size)
262 iii = 0
263 DO iaia = rec_iaia_start, rec_iaia_end
264 i_global = (iaia - 1)/virtual + 1
265 j_global = mod(iaia - 1, virtual) + 1
266 rec_prow = fm_ia%matrix_struct%g2p_row(i_global)
267 rec_pcol = fm_ia%matrix_struct%g2p_col(j_global)
268 IF (grid_2_mepos(rec_prow, rec_pcol) /= para_env_sub%mepos) cycle
269 iii = iii + 1
270 i_local = fm_ia%matrix_struct%g2l_row(i_global)
271 j_local = fm_ia%matrix_struct%g2l_col(j_global)
272 indices_rec(rec_counter)%map(1, iii) = i_local
273 indices_rec(rec_counter)%map(2, iii) = j_local
274 END DO
275 END IF
276 END DO
277
278 ! and create the index map for my local data
279 IF (map_rec_size(para_env_sub%mepos) > 0) THEN
280 size_rec_buffer = map_rec_size(para_env_sub%mepos)
281 ALLOCATE (indices_map_my(2, size_rec_buffer))
282 indices_map_my = 0
283 iii = 0
284 DO iaia = my_ia_start, my_ia_end
285 i_global = (iaia - 1)/virtual + 1
286 j_global = mod(iaia - 1, virtual) + 1
287 rec_prow = fm_ia%matrix_struct%g2p_row(i_global)
288 rec_pcol = fm_ia%matrix_struct%g2p_col(j_global)
289 IF (grid_2_mepos(rec_prow, rec_pcol) /= para_env_sub%mepos) cycle
290 iii = iii + 1
291 i_local = fm_ia%matrix_struct%g2l_row(i_global)
292 j_local = fm_ia%matrix_struct%g2l_col(j_global)
293 indices_map_my(1, iii) = i_local
294 indices_map_my(2, iii) = j_local
295 END DO
296 END IF
297
298 ! Allocate dbcsr_Gamma_3
299 ALLOCATE (dbcsr_gamma_3(my_group_l_size))
300
301 ! auxiliary vector of indices for the send buffer
302 ALLOCATE (iii_vet(number_of_send))
303 ! vector for the send requests
304 ALLOCATE (req_send(number_of_send))
305 ! loop over auxiliary basis function and redistribute into a fm
306 ! and then compy the fm into a dbcsr matrix
307
308 !DO kkB = 1, ncol_local
309 DO kkb = 1, my_group_l_size
310 ! zero the matries of the buffers and post the messages to be received
311 CALL cp_fm_set_all(matrix=fm_ia, alpha=0.0_dp)
312 rec_counter = 0
313 DO proc_shift = 1, para_env_sub%num_pe - 1
314 proc_receive = modulo(para_env_sub%mepos - proc_shift, para_env_sub%num_pe)
315 IF (map_rec_size(proc_receive) > 0) THEN
316 rec_counter = rec_counter + 1
317 buffer_rec(rec_counter)%msg = 0.0_dp
318 CALL para_env_sub%irecv(buffer_rec(rec_counter)%msg, proc_receive, &
319 buffer_rec(rec_counter)%msg_req)
320 END IF
321 END DO
322 ! fill the sending buffer and send the messages
323 DO send_counter = 1, number_of_send
324 buffer_send(send_counter)%msg = 0.0_dp
325 END DO
326 iii_vet = 0
327 jjj = 0
328 DO iaia = my_ia_start, my_ia_end
329 i_global = (iaia - 1)/virtual + 1
330 j_global = mod(iaia - 1, virtual) + 1
331 send_prow = fm_ia%matrix_struct%g2p_row(i_global)
332 send_pcol = fm_ia%matrix_struct%g2p_col(j_global)
333 proc_send = grid_2_mepos(send_prow, send_pcol)
334 ! we don't need to send to ourselves
335 IF (grid_2_mepos(send_prow, send_pcol) == para_env_sub%mepos) THEN
336 ! filling fm_ia with local data
337 jjj = jjj + 1
338 i_local = indices_map_my(1, jjj)
339 j_local = indices_map_my(2, jjj)
340 fm_ia%local_data(i_local, j_local) = &
341 gamma_2d(iaia - my_ia_start + 1, kkb)
342
343 ELSE
344 send_counter = grid_ref_2_send_pos(send_prow, send_pcol)
345 iii_vet(send_counter) = iii_vet(send_counter) + 1
346 iii = iii_vet(send_counter)
347 buffer_send(send_counter)%msg(iii) = &
348 gamma_2d(iaia - my_ia_start + 1, kkb)
349 END IF
350 END DO
351 req_send = mp_request_null
352 send_counter = 0
353 DO proc_shift = 1, para_env_sub%num_pe - 1
354 proc_send = modulo(para_env_sub%mepos + proc_shift, para_env_sub%num_pe)
355 IF (map_send_size(proc_send) > 0) THEN
356 send_counter = send_counter + 1
357 CALL para_env_sub%isend(buffer_send(send_counter)%msg, proc_send, &
358 buffer_send(send_counter)%msg_req)
359 req_send(send_counter) = buffer_send(send_counter)%msg_req
360 END IF
361 END DO
362
363 ! receive the messages and fill the fm_ia
364 rec_counter = 0
365 DO proc_shift = 1, para_env_sub%num_pe - 1
366 proc_receive = modulo(para_env_sub%mepos - proc_shift, para_env_sub%num_pe)
367 size_rec_buffer = map_rec_size(proc_receive)
368 IF (map_rec_size(proc_receive) > 0) THEN
369 rec_counter = rec_counter + 1
370 ! wait for the message
371 CALL buffer_rec(rec_counter)%msg_req%wait()
372 DO iii = 1, size_rec_buffer
373 i_local = indices_rec(rec_counter)%map(1, iii)
374 j_local = indices_rec(rec_counter)%map(2, iii)
375 fm_ia%local_data(i_local, j_local) = buffer_rec(rec_counter)%msg(iii)
376 END DO
377 END IF
378 END DO
379
380 ! wait all
381 CALL mp_waitall(req_send(:))
382
383 ! now create the DBCSR matrix and copy fm_ia into it
384 CALL cp_dbcsr_m_by_n_from_template(dbcsr_gamma_3(kkb), template=mo_coeff_o, &
385 m=homo, n=virtual, sym=dbcsr_type_no_symmetry)
386 CALL copy_fm_to_dbcsr(fm_ia, dbcsr_gamma_3(kkb), keep_sparsity=.false.)
387
388 END DO
389
390 ! Deallocate memory
391
392 DEALLOCATE (gamma_2d)
393 DEALLOCATE (iii_vet)
394 DEALLOCATE (req_send)
395 IF (map_rec_size(para_env_sub%mepos) > 0) THEN
396 DEALLOCATE (indices_map_my)
397 END IF
398 DO rec_counter = 1, number_of_rec
399 DEALLOCATE (indices_rec(rec_counter)%map)
400 DEALLOCATE (buffer_rec(rec_counter)%msg)
401 END DO
402 DEALLOCATE (indices_rec)
403 DEALLOCATE (buffer_rec)
404 DO send_counter = 1, number_of_send
405 DEALLOCATE (buffer_send(send_counter)%msg)
406 END DO
407 DEALLOCATE (buffer_send)
408 DEALLOCATE (map_send_size)
409 DEALLOCATE (map_rec_size)
410 DEALLOCATE (grid_2_mepos)
411 DEALLOCATE (mepos_2_grid)
412 CALL release_group_dist(gd_ia)
413
414 ! release buffer matrix
415 CALL cp_fm_release(fm_ia)
416
417 CALL timestop(handle)
418
419 END SUBROUTINE gamma_fm_to_dbcsr
420
421! **************************************************************************************************
422!> \brief ...
423!> \param para_env ...
424!> \param num_entries_rec ...
425!> \param num_entries_send ...
426!> \param buffer_rec ...
427!> \param buffer_send ...
428!> \param req_array ...
429!> \param do_indx ...
430!> \param do_msg ...
431! **************************************************************************************************
432 SUBROUTINE communicate_buffer(para_env, num_entries_rec, num_entries_send, buffer_rec, buffer_send, &
433 req_array, do_indx, do_msg)
434
435 TYPE(mp_para_env_type), INTENT(IN) :: para_env
436 INTEGER, ALLOCATABLE, DIMENSION(:), INTENT(IN) :: num_entries_rec, num_entries_send
437 TYPE(integ_mat_buffer_type), ALLOCATABLE, &
438 DIMENSION(:), INTENT(INOUT) :: buffer_rec, buffer_send
439 TYPE(mp_request_type), DIMENSION(:, :), POINTER :: req_array
440 LOGICAL, INTENT(IN), OPTIONAL :: do_indx, do_msg
441
442 CHARACTER(LEN=*), PARAMETER :: routinen = 'communicate_buffer'
443
444 INTEGER :: handle, imepos, rec_counter, send_counter
445 LOGICAL :: my_do_indx, my_do_msg
446
447 CALL timeset(routinen, handle)
448
449 my_do_indx = .true.
450 IF (PRESENT(do_indx)) my_do_indx = do_indx
451 my_do_msg = .true.
452 IF (PRESENT(do_msg)) my_do_msg = do_msg
453
454 IF (para_env%num_pe > 1) THEN
455
456 send_counter = 0
457 rec_counter = 0
458
459 DO imepos = 0, para_env%num_pe - 1
460 IF (num_entries_rec(imepos) > 0) THEN
461 rec_counter = rec_counter + 1
462 IF (my_do_indx) THEN
463 CALL para_env%irecv(buffer_rec(imepos)%indx, imepos, req_array(rec_counter, 3), tag=4)
464 END IF
465 IF (my_do_msg) THEN
466 CALL para_env%irecv(buffer_rec(imepos)%msg, imepos, req_array(rec_counter, 4), tag=7)
467 END IF
468 END IF
469 END DO
470
471 DO imepos = 0, para_env%num_pe - 1
472 IF (num_entries_send(imepos) > 0) THEN
473 send_counter = send_counter + 1
474 IF (my_do_indx) THEN
475 CALL para_env%isend(buffer_send(imepos)%indx, imepos, req_array(send_counter, 1), tag=4)
476 END IF
477 IF (my_do_msg) THEN
478 CALL para_env%isend(buffer_send(imepos)%msg, imepos, req_array(send_counter, 2), tag=7)
479 END IF
480 END IF
481 END DO
482
483 IF (my_do_indx) THEN
484 CALL mp_waitall(req_array(1:send_counter, 1))
485 CALL mp_waitall(req_array(1:rec_counter, 3))
486 END IF
487
488 IF (my_do_msg) THEN
489 CALL mp_waitall(req_array(1:send_counter, 2))
490 CALL mp_waitall(req_array(1:rec_counter, 4))
491 END IF
492
493 ELSE
494
495 buffer_rec(0)%indx(:, :) = buffer_send(0)%indx
496 buffer_rec(0)%msg(:) = buffer_send(0)%msg
497
498 END IF
499
500 CALL timestop(handle)
501
502 END SUBROUTINE communicate_buffer
503
504END MODULE rpa_communication
static GRID_HOST_DEVICE int modulo(int a, int m)
Equivalent of Fortran's MODULO, which always return a positive number. https://gcc....
methods related to the blacs parallel environment
subroutine, public cp_blacs_env_release(blacs_env)
releases the given blacs_env
subroutine, public cp_blacs_env_create(blacs_env, para_env, blacs_grid_layout, blacs_repeatable, row_major, grid_2d)
allocates and initializes a type that represent a blacs context
DBCSR operations in CP2K.
subroutine, public cp_dbcsr_m_by_n_from_template(matrix, template, m, n, sym)
Utility function to create an arbitrary shaped dbcsr matrix with the same processor grid as the templ...
subroutine, public copy_fm_to_dbcsr(fm, matrix, keep_sparsity)
Copy a BLACS matrix to a dbcsr matrix.
represent the structure of a full matrix
subroutine, public cp_fm_struct_create(fmstruct, para_env, context, nrow_global, ncol_global, nrow_block, ncol_block, descriptor, first_p_pos, local_leading_dimension, template_fmstruct, square_blocks, force_block)
allocates and initializes a full matrix structure
subroutine, public cp_fm_struct_release(fmstruct)
releases a full matrix structure
represent a full matrix distributed on many processors
Definition cp_fm_types.F:15
subroutine, public cp_fm_get_info(matrix, name, nrow_global, ncol_global, nrow_block, ncol_block, nrow_local, ncol_local, row_indices, col_indices, local_data, context, nrow_locals, ncol_locals, matrix_struct, para_env)
returns all kind of information about the full matrix
subroutine, public cp_fm_set_all(matrix, alpha, beta)
set all elements of a matrix to the same value, and optionally the diagonal to a different one
subroutine, public cp_fm_create(matrix, matrix_struct, name, use_sp)
creates a new full matrix with the given structure
Types to describe group distributions.
Defines the basic variable types.
Definition kinds.F:23
integer, parameter, public dp
Definition kinds.F:34
Interface to the message passing library MPI.
type(mp_request_type), parameter, public mp_request_null
Routines for calculating RI-MP2 gradients.
subroutine, public prepare_redistribution(para_env, para_env_sub, ngroup, group_grid_2_mepos, mepos_2_grid_group, pos_info)
prepare array for redistribution
subroutine, public fm2array(mat2d, my_rows, my_start_row, my_end_row, my_cols, my_start_col, my_end_col, group_grid_2_mepos, mepos_2_grid_group, ngroup_row, ngroup_col, fm_mat)
redistribute fm to local part of array
Types needed for MP2 calculations.
Definition mp2_types.F:14
Auxiliary routines necessary to redistribute an fm_matrix from a given blacs_env to another.
subroutine, public communicate_buffer(para_env, num_entries_rec, num_entries_send, buffer_rec, buffer_send, req_array, do_indx, do_msg)
...
subroutine, public gamma_fm_to_dbcsr(fm_mat_gamma_3, dbcsr_gamma_3, para_env_rpa, para_env_sub, homo, virtual, mo_coeff_o, ngroup, my_group_l_start, my_group_l_end, my_group_l_size)
Redistribute RPA-AXK Gamma_3 density matrices: from fm to dbcsr.
All kind of helpful little routines.
Definition util.F:14
pure integer function, dimension(2), public get_limit(m, n, me)
divide m entries into n parts, return size of part me
Definition util.F:333
represent a blacs multidimensional parallel environment (for the mpi corrispective see cp_paratypes/m...
keeps the information about the structure of a full matrix
represent a full matrix
stores all the informations relevant to an mpi environment