(git:1f285aa)
dbt_test.F
Go to the documentation of this file.
1 !--------------------------------------------------------------------------------------------------!
2 ! CP2K: A general program to perform molecular dynamics simulations !
3 ! Copyright 2000-2024 CP2K developers group <https://cp2k.org> !
4 ! !
5 ! SPDX-License-Identifier: GPL-2.0-or-later !
6 !--------------------------------------------------------------------------------------------------!
7 
8 ! **************************************************************************************************
9 !> \brief General methods for testing DBT tensors.
10 !> \author Patrick Seewald
11 ! **************************************************************************************************
12 MODULE dbt_test
13 
14 
15  USE dbt_tas_base, ONLY: dbt_tas_info
17  USE dbt_methods, ONLY: &
18  dbt_copy, dbt_get_block, dbt_iterator_type, dbt_iterator_blocks_left, &
19  dbt_iterator_next_block, dbt_iterator_start, dbt_iterator_stop, &
20  dbt_reserve_blocks, dbt_get_stored_coordinates, dbt_put_block, &
21  dbt_contract, dbt_inverse_order
22  USE dbt_block, ONLY: block_nd
23  USE dbt_types, ONLY: &
24  dbt_create, dbt_destroy, dbt_type, dbt_distribution_type, &
28  USE dbt_io, ONLY: &
30  USE kinds, ONLY: dp, default_string_length, int_8, dp
31  USE dbt_allocate_wrap, ONLY: allocate_any
32  USE dbt_index, ONLY: &
35  USE message_passing, ONLY: mp_comm_type
36 
37 #include "../base/base_uses.f90"
38 
39  IMPLICIT NONE
40  PRIVATE
41  CHARACTER(len=*), PARAMETER, PRIVATE :: moduleN = 'dbt_test'
42 
43  PUBLIC :: &
47  dbt_checksum, &
49 
50  INTERFACE dist_sparse_tensor_to_repl_dense_array
51  MODULE PROCEDURE dist_sparse_tensor_to_repl_dense_2d_array
52  MODULE PROCEDURE dist_sparse_tensor_to_repl_dense_3d_array
53  MODULE PROCEDURE dist_sparse_tensor_to_repl_dense_4d_array
54  END INTERFACE
55 
56  INTEGER, SAVE :: randmat_counter = 0
57  INTEGER, PARAMETER, PRIVATE :: rand_seed_init = 12341313
58 
59 CONTAINS
60 
61 ! **************************************************************************************************
62 !> \brief check if two (arbitrarily mapped and distributed) tensors are equal.
63 !> \author Patrick Seewald
64 ! **************************************************************************************************
65  FUNCTION dbt_equal(tensor1, tensor2)
66  TYPE(dbt_type), INTENT(INOUT) :: tensor1, tensor2
67  LOGICAL :: dbt_equal
68 
69  TYPE(dbt_type) :: tensor2_tmp
70  TYPE(dbt_iterator_type) :: iter
71  TYPE(block_nd) :: blk_data1, blk_data2
72  INTEGER, DIMENSION(ndims_tensor(tensor1)) :: blk_size, ind_nd
73  LOGICAL :: found
74 
75  ! create a copy of tensor2 that has exact same data format as tensor1
76  CALL dbt_create(tensor1, tensor2_tmp)
77 
78  CALL dbt_reserve_blocks(tensor1, tensor2_tmp)
79  CALL dbt_copy(tensor2, tensor2_tmp)
80 
81  dbt_equal = .true.
82 
83 !$OMP PARALLEL DEFAULT(NONE) SHARED(tensor1,tensor2_tmp,dbt_equal) &
84 !$OMP PRIVATE(iter,ind_nd, blk_size,blk_data1,blk_data2,found)
85  CALL dbt_iterator_start(iter, tensor1)
86 
87  DO WHILE (dbt_iterator_blocks_left(iter))
88  CALL dbt_iterator_next_block(iter, ind_nd, blk_size=blk_size)
89  CALL dbt_get_block(tensor1, ind_nd, blk_data1, found)
90  IF (.NOT. found) cpabort("Tensor block 1 not found")
91  CALL dbt_get_block(tensor2_tmp, ind_nd, blk_data2, found)
92  IF (.NOT. found) cpabort("Tensor block 2 not found")
93 
94  IF (.NOT. blocks_equal(blk_data1, blk_data2)) THEN
95 !$OMP CRITICAL
96  dbt_equal = .false.
97 !$OMP END CRITICAL
98  END IF
99  END DO
100 
101  CALL dbt_iterator_stop(iter)
102 !$OMP END PARALLEL
103 
104  CALL dbt_destroy(tensor2_tmp)
105  END FUNCTION
106 
107 ! **************************************************************************************************
108 !> \brief check if two blocks are equal
109 !> \author Patrick Seewald
110 ! **************************************************************************************************
111  PURE FUNCTION blocks_equal(block1, block2)
112  TYPE(block_nd), INTENT(IN) :: block1, block2
113  LOGICAL :: blocks_equal
114 
115  blocks_equal = maxval(abs(block1%blk - block2%blk)) .LT. 1.0e-12_dp
116 
117  END FUNCTION
118 
119 ! **************************************************************************************************
120 !> \brief Compute factorial
121 !> \author Patrick Seewald
122 ! **************************************************************************************************
123  PURE FUNCTION factorial(n)
124  INTEGER, INTENT(IN) :: n
125  INTEGER :: k
126  INTEGER :: factorial
127  factorial = product((/(k, k=1, n)/))
128  END FUNCTION
129 
130 ! **************************************************************************************************
131 !> \brief Compute all permutations p of (1, 2, ..., n)
132 !> \author Patrick Seewald
133 ! **************************************************************************************************
134  SUBROUTINE permute(n, p)
135  INTEGER, INTENT(IN) :: n
136  INTEGER :: i, c
137  INTEGER, DIMENSION(n) :: pp
138  INTEGER, DIMENSION(n, factorial(n)), INTENT(OUT) :: p
139 
140  pp = [(i, i=1, n)]
141  c = 1
142  CALL perm(1)
143  CONTAINS
144  RECURSIVE SUBROUTINE perm(i)
145  INTEGER, INTENT(IN) :: i
146  INTEGER :: j, t
147  IF (i == n) THEN
148  p(:, c) = pp(:)
149  c = c + 1
150  ELSE
151  DO j = i, n
152  t = pp(i)
153  pp(i) = pp(j)
154  pp(j) = t
155  call perm(i + 1)
156  t = pp(i)
157  pp(i) = pp(j)
158  pp(j) = t
159  END DO
160  END IF
161  END SUBROUTINE
162  END SUBROUTINE
163 
164 ! **************************************************************************************************
165 !> \brief Test equivalence of all tensor formats, using a random distribution.
166 !> \param blk_size_i block sizes along respective dimension
167 !> \param blk_ind_i index along respective dimension of non-zero blocks
168 !> \param ndims tensor rank
169 !> \param unit_nr output unit, needs to be a valid unit number on all mpi ranks
170 !> \param verbose if .TRUE., print all tensor blocks
171 !> \author Patrick Seewald
172 ! **************************************************************************************************
173  SUBROUTINE dbt_test_formats(ndims, mp_comm, unit_nr, verbose, &
174  blk_size_1, blk_size_2, blk_size_3, blk_size_4, &
175  blk_ind_1, blk_ind_2, blk_ind_3, blk_ind_4)
176  INTEGER, DIMENSION(:), INTENT(IN), OPTIONAL :: blk_size_1, blk_size_2, blk_size_3, blk_size_4
177  INTEGER, DIMENSION(:), INTENT(IN), OPTIONAL :: blk_ind_1, blk_ind_2, blk_ind_3, blk_ind_4
178  INTEGER, INTENT(IN) :: ndims
179  INTEGER, INTENT(IN) :: unit_nr
180  LOGICAL, INTENT(IN) :: verbose
181  TYPE(mp_comm_type), INTENT(IN) :: mp_comm
182  TYPE(dbt_distribution_type) :: dist1, dist2
183  TYPE(dbt_type) :: tensor1, tensor2
184  INTEGER :: isep, iblk
185  INTEGER, DIMENSION(:), ALLOCATABLE :: dist1_1, dist1_2, dist1_3, dist1_4, &
186  dist2_1, dist2_2, dist2_3, dist2_4
187  INTEGER :: nblks, imap
188  INTEGER, DIMENSION(ndims) :: pdims, myploc
189  LOGICAL :: eql
190  INTEGER :: iperm, idist, icount
191  INTEGER, DIMENSION(:), ALLOCATABLE :: map1, map2, map1_ref, map2_ref
192  INTEGER, DIMENSION(ndims, factorial(ndims)) :: perm
193  INTEGER :: io_unit
194  INTEGER :: mynode
195  TYPE(dbt_pgrid_type) :: comm_nd
196  CHARACTER(LEN=default_string_length) :: tensor_name
197 
198  ! Process grid
199  pdims(:) = 0
200  CALL dbt_pgrid_create(mp_comm, pdims, comm_nd)
201  mynode = mp_comm%mepos
202 
203  io_unit = 0
204  IF (mynode .EQ. 0) io_unit = unit_nr
205 
206  CALL permute(ndims, perm)
207  ALLOCATE (map1_ref, source=perm(1:ndims/2, 1))
208  ALLOCATE (map2_ref, source=perm(ndims/2 + 1:ndims, 1))
209 
210  IF (io_unit > 0) THEN
211  WRITE (io_unit, *)
212  WRITE (io_unit, '(A)') repeat("-", 80)
213  WRITE (io_unit, '(A,1X,I1)') "Testing matrix representations of tensor rank", ndims
214  WRITE (io_unit, '(A)') repeat("-", 80)
215  WRITE (io_unit, '(A)') "Block sizes:"
216 
217  IF (ndims >= 1) THEN
218  WRITE (io_unit, '(T4,A,1X,I1,A,1X)', advance='no') 'Dim', 1, ':'
219  DO iblk = 1, SIZE(blk_size_1)
220  WRITE (io_unit, '(I2,1X)', advance='no') blk_size_1(iblk)
221  END DO
222  WRITE (io_unit, *)
223  END IF
224  IF (ndims >= 2) THEN
225  WRITE (io_unit, '(T4,A,1X,I1,A,1X)', advance='no') 'Dim', 2, ':'
226  DO iblk = 1, SIZE(blk_size_2)
227  WRITE (io_unit, '(I2,1X)', advance='no') blk_size_2(iblk)
228  END DO
229  WRITE (io_unit, *)
230  END IF
231  IF (ndims >= 3) THEN
232  WRITE (io_unit, '(T4,A,1X,I1,A,1X)', advance='no') 'Dim', 3, ':'
233  DO iblk = 1, SIZE(blk_size_3)
234  WRITE (io_unit, '(I2,1X)', advance='no') blk_size_3(iblk)
235  END DO
236  WRITE (io_unit, *)
237  END IF
238  IF (ndims >= 4) THEN
239  WRITE (io_unit, '(T4,A,1X,I1,A,1X)', advance='no') 'Dim', 4, ':'
240  DO iblk = 1, SIZE(blk_size_4)
241  WRITE (io_unit, '(I2,1X)', advance='no') blk_size_4(iblk)
242  END DO
243  WRITE (io_unit, *)
244  END IF
245 
246  WRITE (io_unit, '(A)') "Non-zero blocks:"
247  DO iblk = 1, SIZE(blk_ind_1)
248  IF (ndims == 2) THEN
249  WRITE (io_unit, '(T4,A, I3, A, 2I3, 1X, A)') &
250  'Block', iblk, ': (', blk_ind_1(iblk), blk_ind_2(iblk), ')'
251  END IF
252  IF (ndims == 3) THEN
253  WRITE (io_unit, '(T4,A, I3, A, 3I3, 1X, A)') &
254  'Block', iblk, ': (', blk_ind_1(iblk), blk_ind_2(iblk), blk_ind_3(iblk), ')'
255  END IF
256  IF (ndims == 4) THEN
257  WRITE (io_unit, '(T4,A, I3, A, 4I3, 1X, A)') &
258  'Block', iblk, ': (', blk_ind_1(iblk), blk_ind_2(iblk), blk_ind_3(iblk), blk_ind_4(iblk), ')'
259  END IF
260  END DO
261 
262  WRITE (io_unit, *)
263  WRITE (io_unit, '(A,1X)', advance='no') "Reference map:"
264  WRITE (io_unit, '(A1,1X)', advance='no') "("
265  DO imap = 1, SIZE(map1_ref)
266  WRITE (io_unit, '(I1,1X)', advance='no') map1_ref(imap)
267  END DO
268  WRITE (io_unit, '(A1,1X)', advance='no') "|"
269  DO imap = 1, SIZE(map2_ref)
270  WRITE (io_unit, '(I1,1X)', advance='no') map2_ref(imap)
271  END DO
272  WRITE (io_unit, '(A1)') ")"
273 
274  END IF
275 
276  icount = 0
277  DO iperm = 1, factorial(ndims)
278  DO isep = 1, ndims - 1
279  icount = icount + 1
280 
281  ALLOCATE (map1, source=perm(1:isep, iperm))
282  ALLOCATE (map2, source=perm(isep + 1:ndims, iperm))
283 
284  mynode = mp_comm%mepos
285  CALL mp_environ_pgrid(comm_nd, pdims, myploc)
286 
287  IF (1 <= ndims) THEN
288  nblks = SIZE(blk_size_1)
289  ALLOCATE (dist1_1(nblks))
290  ALLOCATE (dist2_1(nblks))
291  CALL dbt_default_distvec(nblks, pdims(1), blk_size_1, dist1_1)
292  CALL dbt_default_distvec(nblks, pdims(1), blk_size_1, dist2_1)
293  END IF
294  IF (2 <= ndims) THEN
295  nblks = SIZE(blk_size_2)
296  ALLOCATE (dist1_2(nblks))
297  ALLOCATE (dist2_2(nblks))
298  CALL dbt_default_distvec(nblks, pdims(2), blk_size_2, dist1_2)
299  CALL dbt_default_distvec(nblks, pdims(2), blk_size_2, dist2_2)
300  END IF
301  IF (3 <= ndims) THEN
302  nblks = SIZE(blk_size_3)
303  ALLOCATE (dist1_3(nblks))
304  ALLOCATE (dist2_3(nblks))
305  CALL dbt_default_distvec(nblks, pdims(3), blk_size_3, dist1_3)
306  CALL dbt_default_distvec(nblks, pdims(3), blk_size_3, dist2_3)
307  END IF
308  IF (4 <= ndims) THEN
309  nblks = SIZE(blk_size_4)
310  ALLOCATE (dist1_4(nblks))
311  ALLOCATE (dist2_4(nblks))
312  CALL dbt_default_distvec(nblks, pdims(4), blk_size_4, dist1_4)
313  CALL dbt_default_distvec(nblks, pdims(4), blk_size_4, dist2_4)
314  END IF
315 
316  WRITE (tensor_name, '(A,1X,I3,1X)') "Test", icount
317 
318  IF (io_unit > 0) THEN
319  WRITE (io_unit, *)
320  WRITE (io_unit, '(A,A,1X)', advance='no') trim(tensor_name), ':'
321  WRITE (io_unit, '(A1,1X)', advance='no') "("
322  DO imap = 1, SIZE(map1)
323  WRITE (io_unit, '(I1,1X)', advance='no') map1(imap)
324  END DO
325  WRITE (io_unit, '(A1,1X)', advance='no') "|"
326  DO imap = 1, SIZE(map2)
327  WRITE (io_unit, '(I1,1X)', advance='no') map2(imap)
328  END DO
329  WRITE (io_unit, '(A1)') ")"
330 
331  WRITE (io_unit, '(T4,A)') "Reference distribution:"
332  IF (1 <= ndims) THEN
333  WRITE (io_unit, '(T7,A,1X)', advance='no') "Dist vec 1:"
334  DO idist = 1, SIZE(dist2_1)
335  WRITE (io_unit, '(I2,1X)', advance='no') dist2_1(idist)
336  END DO
337  WRITE (io_unit, *)
338  END IF
339  IF (2 <= ndims) THEN
340  WRITE (io_unit, '(T7,A,1X)', advance='no') "Dist vec 2:"
341  DO idist = 1, SIZE(dist2_2)
342  WRITE (io_unit, '(I2,1X)', advance='no') dist2_2(idist)
343  END DO
344  WRITE (io_unit, *)
345  END IF
346  IF (3 <= ndims) THEN
347  WRITE (io_unit, '(T7,A,1X)', advance='no') "Dist vec 3:"
348  DO idist = 1, SIZE(dist2_3)
349  WRITE (io_unit, '(I2,1X)', advance='no') dist2_3(idist)
350  END DO
351  WRITE (io_unit, *)
352  END IF
353  IF (4 <= ndims) THEN
354  WRITE (io_unit, '(T7,A,1X)', advance='no') "Dist vec 4:"
355  DO idist = 1, SIZE(dist2_4)
356  WRITE (io_unit, '(I2,1X)', advance='no') dist2_4(idist)
357  END DO
358  WRITE (io_unit, *)
359  END IF
360 
361  WRITE (io_unit, '(T4,A)') "Test distribution:"
362  IF (1 <= ndims) THEN
363  WRITE (io_unit, '(T7,A,1X)', advance='no') "Dist vec 1:"
364  DO idist = 1, SIZE(dist2_1)
365  WRITE (io_unit, '(I2,1X)', advance='no') dist1_1(idist)
366  END DO
367  WRITE (io_unit, *)
368  END IF
369  IF (2 <= ndims) THEN
370  WRITE (io_unit, '(T7,A,1X)', advance='no') "Dist vec 2:"
371  DO idist = 1, SIZE(dist2_2)
372  WRITE (io_unit, '(I2,1X)', advance='no') dist1_2(idist)
373  END DO
374  WRITE (io_unit, *)
375  END IF
376  IF (3 <= ndims) THEN
377  WRITE (io_unit, '(T7,A,1X)', advance='no') "Dist vec 3:"
378  DO idist = 1, SIZE(dist2_3)
379  WRITE (io_unit, '(I2,1X)', advance='no') dist1_3(idist)
380  END DO
381  WRITE (io_unit, *)
382  END IF
383  IF (4 <= ndims) THEN
384  WRITE (io_unit, '(T7,A,1X)', advance='no') "Dist vec 4:"
385  DO idist = 1, SIZE(dist2_4)
386  WRITE (io_unit, '(I2,1X)', advance='no') dist1_4(idist)
387  END DO
388  WRITE (io_unit, *)
389  END IF
390  END IF
391 
392  IF (ndims == 2) THEN
393  CALL dbt_distribution_new(dist2, comm_nd, dist2_1, dist2_2)
394  CALL dbt_create(tensor2, "Ref", dist2, map1_ref, map2_ref, &
395  blk_size_1, blk_size_2)
396  CALL dbt_setup_test_tensor(tensor2, comm_nd%mp_comm_2d, .true., blk_ind_1, blk_ind_2)
397  END IF
398  IF (ndims == 3) THEN
399  CALL dbt_distribution_new(dist2, comm_nd, dist2_1, dist2_2, dist2_3)
400  CALL dbt_create(tensor2, "Ref", dist2, map1_ref, map2_ref, &
401  blk_size_1, blk_size_2, blk_size_3)
402  CALL dbt_setup_test_tensor(tensor2, comm_nd%mp_comm_2d, .true., blk_ind_1, blk_ind_2, blk_ind_3)
403  END IF
404  IF (ndims == 4) THEN
405  CALL dbt_distribution_new(dist2, comm_nd, dist2_1, dist2_2, dist2_3, dist2_4)
406  CALL dbt_create(tensor2, "Ref", dist2, map1_ref, map2_ref, &
407  blk_size_1, blk_size_2, blk_size_3, blk_size_4)
408  CALL dbt_setup_test_tensor(tensor2, comm_nd%mp_comm_2d, .true., blk_ind_1, blk_ind_2, blk_ind_3, blk_ind_4)
409  END IF
410 
411  IF (verbose) CALL dbt_write_blocks(tensor2, io_unit, unit_nr)
412 
413  IF (ndims == 2) THEN
414  CALL dbt_distribution_new(dist1, comm_nd, dist1_1, dist1_2)
415  CALL dbt_create(tensor1, tensor_name, dist1, map1, map2, &
416  blk_size_1, blk_size_2)
417  CALL dbt_setup_test_tensor(tensor1, comm_nd%mp_comm_2d, .true., blk_ind_1, blk_ind_2)
418  END IF
419  IF (ndims == 3) THEN
420  CALL dbt_distribution_new(dist1, comm_nd, dist1_1, dist1_2, dist1_3)
421  CALL dbt_create(tensor1, tensor_name, dist1, map1, map2, &
422  blk_size_1, blk_size_2, blk_size_3)
423  CALL dbt_setup_test_tensor(tensor1, comm_nd%mp_comm_2d, .true., blk_ind_1, blk_ind_2, blk_ind_3)
424  END IF
425  IF (ndims == 4) THEN
426  CALL dbt_distribution_new(dist1, comm_nd, dist1_1, dist1_2, dist1_3, dist1_4)
427  CALL dbt_create(tensor1, tensor_name, dist1, map1, map2, &
428  blk_size_1, blk_size_2, blk_size_3, blk_size_4)
429  CALL dbt_setup_test_tensor(tensor1, comm_nd%mp_comm_2d, .true., blk_ind_1, blk_ind_2, blk_ind_3, blk_ind_4)
430  END IF
431 
432  IF (verbose) CALL dbt_write_blocks(tensor1, io_unit, unit_nr)
433 
434  eql = dbt_equal(tensor1, tensor2)
435 
436  IF (.NOT. eql) THEN
437  IF (io_unit > 0) WRITE (io_unit, '(A,1X,A)') trim(tensor_name), 'Test failed!'
438  cpabort('')
439  ELSE
440  IF (io_unit > 0) WRITE (io_unit, '(A,1X,A)') trim(tensor_name), 'Test passed!'
441  END IF
442  DEALLOCATE (map1, map2)
443 
444  CALL dbt_destroy(tensor1)
445  CALL dbt_distribution_destroy(dist1)
446 
447  CALL dbt_destroy(tensor2)
448  CALL dbt_distribution_destroy(dist2)
449 
450  IF (1 <= ndims) THEN
451  DEALLOCATE (dist1_1, dist2_1)
452  END IF
453  IF (2 <= ndims) THEN
454  DEALLOCATE (dist1_2, dist2_2)
455  END IF
456  IF (3 <= ndims) THEN
457  DEALLOCATE (dist1_3, dist2_3)
458  END IF
459  IF (4 <= ndims) THEN
460  DEALLOCATE (dist1_4, dist2_4)
461  END IF
462 
463  END DO
464  END DO
465  CALL dbt_pgrid_destroy(comm_nd)
466  END SUBROUTINE
467 
468 ! **************************************************************************************************
469 !> \brief Allocate and fill test tensor - entries are enumerated by their index s.t. they only depend
470 !> on global properties of the tensor but not on distribution, matrix representation, etc.
471 !> \param mp_comm communicator
472 !> \param blk_ind_i index along respective dimension of non-zero blocks
473 !> \author Patrick Seewald
474 ! **************************************************************************************************
475  SUBROUTINE dbt_setup_test_tensor(tensor, mp_comm, enumerate, blk_ind_1, blk_ind_2, blk_ind_3, blk_ind_4)
476  TYPE(dbt_type), INTENT(INOUT) :: tensor
477  CLASS(mp_comm_type), INTENT(IN) :: mp_comm
478  LOGICAL, INTENT(IN) :: enumerate
479  INTEGER, DIMENSION(:), INTENT(IN), OPTIONAL :: blk_ind_1, blk_ind_2, blk_ind_3, blk_ind_4
480  INTEGER :: mynode
481 
482  INTEGER :: i, ib, my_nblks_alloc, nblks_alloc, proc, nze
483  INTEGER, ALLOCATABLE, DIMENSION(:) :: my_blk_ind_1, my_blk_ind_2, my_blk_ind_3, my_blk_ind_4
484  INTEGER, DIMENSION(ndims_tensor(tensor)) :: blk_index, blk_offset, blk_size, &
485  tensor_dims
486  INTEGER, DIMENSION(:, :), ALLOCATABLE :: ind_nd
487  REAL(kind=dp), ALLOCATABLE, &
488  DIMENSION(:,:) :: blk_values_2
489  REAL(kind=dp), ALLOCATABLE, &
490  DIMENSION(:,:,:) :: blk_values_3
491  REAL(kind=dp), ALLOCATABLE, &
492  DIMENSION(:,:,:,:) :: blk_values_4
493  TYPE(dbt_iterator_type) :: iterator
494  INTEGER, DIMENSION(4) :: iseed
495  INTEGER, DIMENSION(2) :: blk_index_2d, nblks_2d
496 
497  nblks_alloc = SIZE(blk_ind_1)
498  mynode = mp_comm%mepos
499 
500  IF (.NOT. enumerate) THEN
501  cpassert(randmat_counter .NE. 0)
502 
503  randmat_counter = randmat_counter + 1
504  END IF
505 
506  ALLOCATE (ind_nd(nblks_alloc, ndims_tensor(tensor)))
507 
508 !$OMP PARALLEL DEFAULT(NONE) SHARED(ind_nd,nblks_alloc,tensor,mynode,enumerate,randmat_counter) &
509 !$OMP SHARED(blk_ind_1, blk_ind_2, blk_ind_3, blk_ind_4) &
510 !$OMP PRIVATE(my_nblks_alloc,ib,proc,i,iterator,blk_offset,blk_size,blk_index) &
511 !$OMP PRIVATE(blk_index_2d,nblks_2d,iseed,nze,tensor_dims) &
512 !$OMP PRIVATE(blk_values_2, blk_values_3, blk_values_4) &
513 !$OMP PRIVATE(my_blk_ind_1, my_blk_ind_2, my_blk_ind_3, my_blk_ind_4)
514 
515  my_nblks_alloc = 0
516 !$OMP DO
517  DO ib = 1, nblks_alloc
518  IF (ndims_tensor(tensor) == 2) THEN
519  ind_nd(ib, :) = [blk_ind_1(ib), blk_ind_2(ib)]
520  END IF
521  IF (ndims_tensor(tensor) == 3) THEN
522  ind_nd(ib, :) = [blk_ind_1(ib), blk_ind_2(ib), blk_ind_3(ib)]
523  END IF
524  IF (ndims_tensor(tensor) == 4) THEN
525  ind_nd(ib, :) = [blk_ind_1(ib), blk_ind_2(ib), blk_ind_3(ib), blk_ind_4(ib)]
526  END IF
527  CALL dbt_get_stored_coordinates(tensor, ind_nd(ib, :), proc)
528  IF (proc == mynode) THEN
529  my_nblks_alloc = my_nblks_alloc + 1
530  END IF
531  END DO
532 !$OMP END DO
533 
534  IF (ndims_tensor(tensor) >= 1) THEN
535  ALLOCATE (my_blk_ind_1(my_nblks_alloc))
536  END IF
537  IF (ndims_tensor(tensor) >= 2) THEN
538  ALLOCATE (my_blk_ind_2(my_nblks_alloc))
539  END IF
540  IF (ndims_tensor(tensor) >= 3) THEN
541  ALLOCATE (my_blk_ind_3(my_nblks_alloc))
542  END IF
543  IF (ndims_tensor(tensor) >= 4) THEN
544  ALLOCATE (my_blk_ind_4(my_nblks_alloc))
545  END IF
546 
547  i = 0
548 !$OMP DO
549  DO ib = 1, nblks_alloc
550  CALL dbt_get_stored_coordinates(tensor, ind_nd(ib, :), proc)
551  IF (proc == mynode) THEN
552  i = i + 1
553  IF (ndims_tensor(tensor) >= 1) THEN
554  my_blk_ind_1(i) = blk_ind_1(ib)
555  END IF
556  IF (ndims_tensor(tensor) >= 2) THEN
557  my_blk_ind_2(i) = blk_ind_2(ib)
558  END IF
559  IF (ndims_tensor(tensor) >= 3) THEN
560  my_blk_ind_3(i) = blk_ind_3(ib)
561  END IF
562  IF (ndims_tensor(tensor) >= 4) THEN
563  my_blk_ind_4(i) = blk_ind_4(ib)
564  END IF
565  END IF
566  END DO
567 !$OMP END DO
568 
569  IF (ndims_tensor(tensor) == 2) THEN
570  CALL dbt_reserve_blocks(tensor, my_blk_ind_1, my_blk_ind_2)
571  END IF
572  IF (ndims_tensor(tensor) == 3) THEN
573  CALL dbt_reserve_blocks(tensor, my_blk_ind_1, my_blk_ind_2, my_blk_ind_3)
574  END IF
575  IF (ndims_tensor(tensor) == 4) THEN
576  CALL dbt_reserve_blocks(tensor, my_blk_ind_1, my_blk_ind_2, my_blk_ind_3, my_blk_ind_4)
577  END IF
578 
579  CALL dbt_iterator_start(iterator, tensor)
580  DO WHILE (dbt_iterator_blocks_left(iterator))
581  CALL dbt_iterator_next_block(iterator, blk_index, blk_size=blk_size, blk_offset=blk_offset)
582 
583  IF (.NOT. enumerate) THEN
584  blk_index_2d = int(get_2d_indices_tensor(tensor%nd_index_blk, blk_index))
585  CALL dbt_get_mapping_info(tensor%nd_index_blk, dims_2d=nblks_2d)
586  iseed = generate_larnv_seed(blk_index_2d(1), nblks_2d(1), blk_index_2d(2), nblks_2d(2), randmat_counter)
587  nze = product(blk_size)
588  END IF
589 
590  IF (ndims_tensor(tensor) == 2) THEN
591  CALL allocate_any(blk_values_2, shape_spec=blk_size)
592  CALL dims_tensor(tensor, tensor_dims)
593  IF (enumerate) THEN
594  CALL enumerate_block_elements(blk_size, blk_offset, tensor_dims, blk_2=blk_values_2)
595  ELSE
596  CALL dlarnv(1, iseed, nze, blk_values_2)
597  END IF
598  CALL dbt_put_block(tensor, blk_index, blk_size, blk_values_2)
599  DEALLOCATE (blk_values_2)
600  END IF
601  IF (ndims_tensor(tensor) == 3) THEN
602  CALL allocate_any(blk_values_3, shape_spec=blk_size)
603  CALL dims_tensor(tensor, tensor_dims)
604  IF (enumerate) THEN
605  CALL enumerate_block_elements(blk_size, blk_offset, tensor_dims, blk_3=blk_values_3)
606  ELSE
607  CALL dlarnv(1, iseed, nze, blk_values_3)
608  END IF
609  CALL dbt_put_block(tensor, blk_index, blk_size, blk_values_3)
610  DEALLOCATE (blk_values_3)
611  END IF
612  IF (ndims_tensor(tensor) == 4) THEN
613  CALL allocate_any(blk_values_4, shape_spec=blk_size)
614  CALL dims_tensor(tensor, tensor_dims)
615  IF (enumerate) THEN
616  CALL enumerate_block_elements(blk_size, blk_offset, tensor_dims, blk_4=blk_values_4)
617  ELSE
618  CALL dlarnv(1, iseed, nze, blk_values_4)
619  END IF
620  CALL dbt_put_block(tensor, blk_index, blk_size, blk_values_4)
621  DEALLOCATE (blk_values_4)
622  END IF
623  END DO
624  CALL dbt_iterator_stop(iterator)
625 !$OMP END PARALLEL
626 
627  END SUBROUTINE
628 
629 ! **************************************************************************************************
630 !> \brief Enumerate tensor entries in block
631 !> \param blk_2 block values for 2 dimensions
632 !> \param blk_3 block values for 3 dimensions
633 !> \param blk_size size of block
634 !> \param blk_offset block offset (indices of first element)
635 !> \param tensor_size global tensor sizes
636 !> \author Patrick Seewald
637 ! **************************************************************************************************
638  SUBROUTINE enumerate_block_elements(blk_size, blk_offset, tensor_size, blk_2, blk_3, blk_4)
639  INTEGER, DIMENSION(:), INTENT(IN) :: blk_size, blk_offset, tensor_size
640  REAL(kind=dp), DIMENSION(:,:), &
641  OPTIONAL, INTENT(OUT) :: blk_2
642  REAL(kind=dp), DIMENSION(:,:,:), &
643  OPTIONAL, INTENT(OUT) :: blk_3
644  REAL(kind=dp), DIMENSION(:,:,:,:), &
645  OPTIONAL, INTENT(OUT) :: blk_4
646  INTEGER :: ndim
647  INTEGER, DIMENSION(SIZE(blk_size)) :: arr_ind, tens_ind
648  INTEGER :: i_1, i_2, i_3, i_4
649 
650  ndim = SIZE(tensor_size)
651 
652  IF (ndim == 2) THEN
653  DO i_2 = 1, blk_size(2)
654  DO i_1 = 1, blk_size(1)
655  arr_ind(:) = [i_1, i_2]
656  tens_ind(:) = arr_ind(:) + blk_offset(:) - 1
657  blk_2(arr_ind(1), arr_ind(2)) = combine_tensor_index(tens_ind, tensor_size)
658  END DO
659  END DO
660  END IF
661  IF (ndim == 3) THEN
662  DO i_3 = 1, blk_size(3)
663  DO i_2 = 1, blk_size(2)
664  DO i_1 = 1, blk_size(1)
665  arr_ind(:) = [i_1, i_2, i_3]
666  tens_ind(:) = arr_ind(:) + blk_offset(:) - 1
667  blk_3(arr_ind(1), arr_ind(2), arr_ind(3)) = combine_tensor_index(tens_ind, tensor_size)
668  END DO
669  END DO
670  END DO
671  END IF
672  IF (ndim == 4) THEN
673  DO i_4 = 1, blk_size(4)
674  DO i_3 = 1, blk_size(3)
675  DO i_2 = 1, blk_size(2)
676  DO i_1 = 1, blk_size(1)
677  arr_ind(:) = [i_1, i_2, i_3, i_4]
678  tens_ind(:) = arr_ind(:) + blk_offset(:) - 1
679  blk_4(arr_ind(1), arr_ind(2), arr_ind(3), arr_ind(4)) = combine_tensor_index(tens_ind, tensor_size)
680  END DO
681  END DO
682  END DO
683  END DO
684  END IF
685 
686  END SUBROUTINE
687 
688 ! **************************************************************************************************
689 !> \brief Transform a distributed sparse tensor to a replicated dense array. This is only useful for
690 !> testing tensor contraction by matrix multiplication of dense arrays.
691 !> \author Patrick Seewald
692 ! **************************************************************************************************
693  SUBROUTINE dist_sparse_tensor_to_repl_dense_2d_array(tensor, array)
694 
695  TYPE(dbt_type), INTENT(INOUT) :: tensor
696  REAL(dp), ALLOCATABLE, DIMENSION(:,:), &
697  INTENT(OUT) :: array
698  REAL(dp), ALLOCATABLE, DIMENSION(:,:) :: block
699  INTEGER, DIMENSION(ndims_tensor(tensor)) :: dims_nd, ind_nd, blk_size, blk_offset
700  TYPE(dbt_iterator_type) :: iterator
701  INTEGER :: idim
702  INTEGER, DIMENSION(ndims_tensor(tensor)) :: blk_start, blk_end
703  LOGICAL :: found
704 
705  cpassert(ndims_tensor(tensor) .EQ. 2)
706  CALL dbt_get_info(tensor, nfull_total=dims_nd)
707  CALL allocate_any(array, shape_spec=dims_nd)
708  array(:,:) = 0.0_dp
709 
710 !$OMP PARALLEL DEFAULT(NONE) SHARED(tensor,array) &
711 !$OMP PRIVATE(iterator,ind_nd,blk_size,blk_offset,block,found,idim,blk_start,blk_end)
712  CALL dbt_iterator_start(iterator, tensor)
713  DO WHILE (dbt_iterator_blocks_left(iterator))
714  CALL dbt_iterator_next_block(iterator, ind_nd, blk_size=blk_size, blk_offset=blk_offset)
715  CALL dbt_get_block(tensor, ind_nd, block, found)
716  cpassert(found)
717 
718  DO idim = 1, ndims_tensor(tensor)
719  blk_start(idim) = blk_offset(idim)
720  blk_end(idim) = blk_offset(idim) + blk_size(idim) - 1
721  END DO
722  array(blk_start(1):blk_end(1), blk_start(2):blk_end(2)) = &
723  block(:,:)
724 
725  DEALLOCATE (block)
726  END DO
727  CALL dbt_iterator_stop(iterator)
728 !$OMP END PARALLEL
729  CALL tensor%pgrid%mp_comm_2d%sum(array)
730 
731  END SUBROUTINE
732 ! **************************************************************************************************
733 !> \brief Transform a distributed sparse tensor to a replicated dense array. This is only useful for
734 !> testing tensor contraction by matrix multiplication of dense arrays.
735 !> \author Patrick Seewald
736 ! **************************************************************************************************
737  SUBROUTINE dist_sparse_tensor_to_repl_dense_3d_array(tensor, array)
738 
739  TYPE(dbt_type), INTENT(INOUT) :: tensor
740  REAL(dp), ALLOCATABLE, DIMENSION(:,:,:), &
741  INTENT(OUT) :: array
742  REAL(dp), ALLOCATABLE, DIMENSION(:,:,:) :: block
743  INTEGER, DIMENSION(ndims_tensor(tensor)) :: dims_nd, ind_nd, blk_size, blk_offset
744  TYPE(dbt_iterator_type) :: iterator
745  INTEGER :: idim
746  INTEGER, DIMENSION(ndims_tensor(tensor)) :: blk_start, blk_end
747  LOGICAL :: found
748 
749  cpassert(ndims_tensor(tensor) .EQ. 3)
750  CALL dbt_get_info(tensor, nfull_total=dims_nd)
751  CALL allocate_any(array, shape_spec=dims_nd)
752  array(:,:,:) = 0.0_dp
753 
754 !$OMP PARALLEL DEFAULT(NONE) SHARED(tensor,array) &
755 !$OMP PRIVATE(iterator,ind_nd,blk_size,blk_offset,block,found,idim,blk_start,blk_end)
756  CALL dbt_iterator_start(iterator, tensor)
757  DO WHILE (dbt_iterator_blocks_left(iterator))
758  CALL dbt_iterator_next_block(iterator, ind_nd, blk_size=blk_size, blk_offset=blk_offset)
759  CALL dbt_get_block(tensor, ind_nd, block, found)
760  cpassert(found)
761 
762  DO idim = 1, ndims_tensor(tensor)
763  blk_start(idim) = blk_offset(idim)
764  blk_end(idim) = blk_offset(idim) + blk_size(idim) - 1
765  END DO
766  array(blk_start(1):blk_end(1), blk_start(2):blk_end(2), blk_start(3):blk_end(3)) = &
767  block(:,:,:)
768 
769  DEALLOCATE (block)
770  END DO
771  CALL dbt_iterator_stop(iterator)
772 !$OMP END PARALLEL
773  CALL tensor%pgrid%mp_comm_2d%sum(array)
774 
775  END SUBROUTINE
776 ! **************************************************************************************************
777 !> \brief Transform a distributed sparse tensor to a replicated dense array. This is only useful for
778 !> testing tensor contraction by matrix multiplication of dense arrays.
779 !> \author Patrick Seewald
780 ! **************************************************************************************************
781  SUBROUTINE dist_sparse_tensor_to_repl_dense_4d_array(tensor, array)
782 
783  TYPE(dbt_type), INTENT(INOUT) :: tensor
784  REAL(dp), ALLOCATABLE, DIMENSION(:,:,:,:), &
785  INTENT(OUT) :: array
786  REAL(dp), ALLOCATABLE, DIMENSION(:,:,:,:) :: block
787  INTEGER, DIMENSION(ndims_tensor(tensor)) :: dims_nd, ind_nd, blk_size, blk_offset
788  TYPE(dbt_iterator_type) :: iterator
789  INTEGER :: idim
790  INTEGER, DIMENSION(ndims_tensor(tensor)) :: blk_start, blk_end
791  LOGICAL :: found
792 
793  cpassert(ndims_tensor(tensor) .EQ. 4)
794  CALL dbt_get_info(tensor, nfull_total=dims_nd)
795  CALL allocate_any(array, shape_spec=dims_nd)
796  array(:,:,:,:) = 0.0_dp
797 
798 !$OMP PARALLEL DEFAULT(NONE) SHARED(tensor,array) &
799 !$OMP PRIVATE(iterator,ind_nd,blk_size,blk_offset,block,found,idim,blk_start,blk_end)
800  CALL dbt_iterator_start(iterator, tensor)
801  DO WHILE (dbt_iterator_blocks_left(iterator))
802  CALL dbt_iterator_next_block(iterator, ind_nd, blk_size=blk_size, blk_offset=blk_offset)
803  CALL dbt_get_block(tensor, ind_nd, block, found)
804  cpassert(found)
805 
806  DO idim = 1, ndims_tensor(tensor)
807  blk_start(idim) = blk_offset(idim)
808  blk_end(idim) = blk_offset(idim) + blk_size(idim) - 1
809  END DO
810  array(blk_start(1):blk_end(1), blk_start(2):blk_end(2), blk_start(3):blk_end(3), blk_start(4):blk_end(4)) = &
811  block(:,:,:,:)
812 
813  DEALLOCATE (block)
814  END DO
815  CALL dbt_iterator_stop(iterator)
816 !$OMP END PARALLEL
817  CALL tensor%pgrid%mp_comm_2d%sum(array)
818 
819  END SUBROUTINE
820 
821 ! **************************************************************************************************
822 !> \brief test tensor contraction
823 !> \note for testing/debugging, simply replace a call to dbt_contract with a call to this routine
824 !> \author Patrick Seewald
825 ! **************************************************************************************************
826  SUBROUTINE dbt_contract_test(alpha, tensor_1, tensor_2, beta, tensor_3, &
827  contract_1, notcontract_1, &
828  contract_2, notcontract_2, &
829  map_1, map_2, &
830  unit_nr, &
831  bounds_1, bounds_2, bounds_3, &
832  log_verbose, write_int)
833 
834  REAL(dp), INTENT(IN) :: alpha
835  TYPE(dbt_type), INTENT(INOUT) :: tensor_1, tensor_2, tensor_3
836  REAL(dp), INTENT(IN) :: beta
837  INTEGER, DIMENSION(:), INTENT(IN) :: contract_1, contract_2, &
838  notcontract_1, notcontract_2, &
839  map_1, map_2
840  INTEGER, INTENT(IN) :: unit_nr
841  INTEGER, DIMENSION(2, SIZE(contract_1)), &
842  OPTIONAL :: bounds_1
843  INTEGER, DIMENSION(2, SIZE(notcontract_1)), &
844  OPTIONAL :: bounds_2
845  INTEGER, DIMENSION(2, SIZE(notcontract_2)), &
846  OPTIONAL :: bounds_3
847  LOGICAL, INTENT(IN), OPTIONAL :: log_verbose
848  LOGICAL, INTENT(IN), OPTIONAL :: write_int
849  INTEGER :: io_unit, mynode
850  TYPE(mp_comm_type) :: mp_comm
851  INTEGER, DIMENSION(:), ALLOCATABLE :: size_1, size_2, size_3, &
852  order_t1, order_t2, order_t3
853  INTEGER, DIMENSION(2, ndims_tensor(tensor_1)) :: bounds_t1
854  INTEGER, DIMENSION(2, ndims_tensor(tensor_2)) :: bounds_t2
855 
856  REAL(kind=dp), ALLOCATABLE, &
857  DIMENSION(:,:) :: array_1_2d, &
858  array_2_2d, &
859  array_3_2d, &
860  array_1_2d_full, &
861  array_2_2d_full, &
862  array_3_0_2d, &
863  array_1_rs2d, &
864  array_2_rs2d, &
865  array_3_rs2d, &
866  array_3_0_rs2d
867  REAL(kind=dp), ALLOCATABLE, &
868  DIMENSION(:,:,:) :: array_1_3d, &
869  array_2_3d, &
870  array_3_3d, &
871  array_1_3d_full, &
872  array_2_3d_full, &
873  array_3_0_3d, &
874  array_1_rs3d, &
875  array_2_rs3d, &
876  array_3_rs3d, &
877  array_3_0_rs3d
878  REAL(kind=dp), ALLOCATABLE, &
879  DIMENSION(:,:,:,:) :: array_1_4d, &
880  array_2_4d, &
881  array_3_4d, &
882  array_1_4d_full, &
883  array_2_4d_full, &
884  array_3_0_4d, &
885  array_1_rs4d, &
886  array_2_rs4d, &
887  array_3_rs4d, &
888  array_3_0_rs4d
889  REAL(kind=dp), ALLOCATABLE, &
890  DIMENSION(:, :) :: array_1_mm, &
891  array_2_mm, &
892  array_3_mm, &
893  array_3_test_mm
894  LOGICAL :: eql, notzero
895  LOGICAL, PARAMETER :: debug = .false.
896  REAL(kind=dp) :: cs_1, cs_2, cs_3, eql_diff
897  LOGICAL :: do_crop_1, do_crop_2
898 
899  mp_comm = tensor_1%pgrid%mp_comm_2d
900  mynode = mp_comm%mepos
901  io_unit = -1
902  IF (mynode .EQ. 0) io_unit = unit_nr
903 
904  cs_1 = dbt_checksum(tensor_1)
905  cs_2 = dbt_checksum(tensor_2)
906  cs_3 = dbt_checksum(tensor_3)
907 
908  IF (io_unit > 0) THEN
909  WRITE (io_unit, *)
910  WRITE (io_unit, '(A)') repeat("-", 80)
911  WRITE (io_unit, '(A,1X,A,1X,A,1X,A,1X,A,1X,A)') "Testing tensor contraction", &
912  trim(tensor_1%name), "x", trim(tensor_2%name), "=", trim(tensor_3%name)
913  WRITE (io_unit, '(A)') repeat("-", 80)
914  END IF
915 
916  IF (debug) THEN
917  IF (io_unit > 0) THEN
918  WRITE (io_unit, "(A, E9.2)") "checksum ", trim(tensor_1%name), cs_1
919  WRITE (io_unit, "(A, E9.2)") "checksum ", trim(tensor_2%name), cs_2
920  WRITE (io_unit, "(A, E9.2)") "checksum ", trim(tensor_3%name), cs_3
921  END IF
922  END IF
923 
924  IF (debug) THEN
925  CALL dbt_write_block_indices(tensor_1, io_unit, unit_nr)
926  CALL dbt_write_blocks(tensor_1, io_unit, unit_nr, write_int)
927  END IF
928 
929  SELECT CASE (ndims_tensor(tensor_3))
930  CASE (2)
931  CALL dist_sparse_tensor_to_repl_dense_array(tensor_3, array_3_0_2d)
932  CASE (3)
933  CALL dist_sparse_tensor_to_repl_dense_array(tensor_3, array_3_0_3d)
934  CASE (4)
935  CALL dist_sparse_tensor_to_repl_dense_array(tensor_3, array_3_0_4d)
936  END SELECT
937 
938  CALL dbt_contract(alpha, tensor_1, tensor_2, beta, tensor_3, &
939  contract_1, notcontract_1, &
940  contract_2, notcontract_2, &
941  map_1, map_2, &
942  bounds_1=bounds_1, bounds_2=bounds_2, bounds_3=bounds_3, &
943  filter_eps=1.0e-12_dp, &
944  unit_nr=io_unit, log_verbose=log_verbose)
945 
946  cs_3 = dbt_checksum(tensor_3)
947 
948  IF (debug) THEN
949  IF (io_unit > 0) THEN
950  WRITE (io_unit, "(A, E9.2)") "checksum ", trim(tensor_3%name), cs_3
951  END IF
952  END IF
953 
954  do_crop_1 = .false.; do_crop_2 = .false.!; do_crop_3 = .FALSE.
955 
956  ! crop tensor as first step
957  bounds_t1(1, :) = 1
958  CALL dbt_get_info(tensor_1, nfull_total=bounds_t1(2, :))
959 
960  bounds_t2(1, :) = 1
961  CALL dbt_get_info(tensor_2, nfull_total=bounds_t2(2, :))
962 
963  IF (PRESENT(bounds_1)) THEN
964  bounds_t1(:, contract_1) = bounds_1
965  do_crop_1 = .true.
966  bounds_t2(:, contract_2) = bounds_1
967  do_crop_2 = .true.
968  END IF
969 
970  IF (PRESENT(bounds_2)) THEN
971  bounds_t1(:, notcontract_1) = bounds_2
972  do_crop_1 = .true.
973  END IF
974 
975  IF (PRESENT(bounds_3)) THEN
976  bounds_t2(:, notcontract_2) = bounds_3
977  do_crop_2 = .true.
978  END IF
979 
980  ! Convert tensors to simple multidimensional arrays
981  SELECT CASE (ndims_tensor(tensor_1))
982  CASE (2)
983  CALL dist_sparse_tensor_to_repl_dense_array(tensor_1, array_1_2d_full)
984  CALL allocate_any(array_1_2d, shape_spec=shape(array_1_2d_full))
985  array_1_2d = 0.0_dp
986  array_1_2d(bounds_t1(1, 1):bounds_t1(2, 1), bounds_t1(1, 2):bounds_t1(2, 2)) = &
987  array_1_2d_full(bounds_t1(1, 1):bounds_t1(2, 1), bounds_t1(1, 2):bounds_t1(2, 2))
988 
989  CASE (3)
990  CALL dist_sparse_tensor_to_repl_dense_array(tensor_1, array_1_3d_full)
991  CALL allocate_any(array_1_3d, shape_spec=shape(array_1_3d_full))
992  array_1_3d = 0.0_dp
993  array_1_3d(bounds_t1(1, 1):bounds_t1(2, 1), bounds_t1(1, 2):bounds_t1(2, 2), bounds_t1(1, 3):bounds_t1(2, 3)) = &
994  array_1_3d_full(bounds_t1(1, 1):bounds_t1(2, 1), bounds_t1(1, 2):bounds_t1(2, 2), bounds_t1(1, 3):bounds_t1(2, 3))
995 
996  CASE (4)
997  CALL dist_sparse_tensor_to_repl_dense_array(tensor_1, array_1_4d_full)
998  CALL allocate_any(array_1_4d, shape_spec=shape(array_1_4d_full))
999  array_1_4d = 0.0_dp
1000  array_1_4d(bounds_t1(1, 1):bounds_t1(2, 1), bounds_t1(1, 2):bounds_t1(2, 2), bounds_t1(1, 3):bounds_t1(2, 3),&
1001  & bounds_t1(1, 4):bounds_t1(2, 4)) = &
1002  array_1_4d_full(bounds_t1(1, 1):bounds_t1(2, 1), bounds_t1(1, 2):bounds_t1(2, 2), bounds_t1(1, 3):bounds_t1(2, 3),&
1003  & bounds_t1(1, 4):bounds_t1(2, 4))
1004 
1005  END SELECT
1006  SELECT CASE (ndims_tensor(tensor_2))
1007  CASE (2)
1008  CALL dist_sparse_tensor_to_repl_dense_array(tensor_2, array_2_2d_full)
1009  CALL allocate_any(array_2_2d, shape_spec=shape(array_2_2d_full))
1010  array_2_2d = 0.0_dp
1011  array_2_2d(bounds_t2(1, 1):bounds_t2(2, 1), bounds_t2(1, 2):bounds_t2(2, 2)) = &
1012  array_2_2d_full(bounds_t2(1, 1):bounds_t2(2, 1), bounds_t2(1, 2):bounds_t2(2, 2))
1013 
1014  CASE (3)
1015  CALL dist_sparse_tensor_to_repl_dense_array(tensor_2, array_2_3d_full)
1016  CALL allocate_any(array_2_3d, shape_spec=shape(array_2_3d_full))
1017  array_2_3d = 0.0_dp
1018  array_2_3d(bounds_t2(1, 1):bounds_t2(2, 1), bounds_t2(1, 2):bounds_t2(2, 2), bounds_t2(1, 3):bounds_t2(2, 3)) = &
1019  array_2_3d_full(bounds_t2(1, 1):bounds_t2(2, 1), bounds_t2(1, 2):bounds_t2(2, 2), bounds_t2(1, 3):bounds_t2(2, 3))
1020 
1021  CASE (4)
1022  CALL dist_sparse_tensor_to_repl_dense_array(tensor_2, array_2_4d_full)
1023  CALL allocate_any(array_2_4d, shape_spec=shape(array_2_4d_full))
1024  array_2_4d = 0.0_dp
1025  array_2_4d(bounds_t2(1, 1):bounds_t2(2, 1), bounds_t2(1, 2):bounds_t2(2, 2), bounds_t2(1, 3):bounds_t2(2, 3),&
1026  & bounds_t2(1, 4):bounds_t2(2, 4)) = &
1027  array_2_4d_full(bounds_t2(1, 1):bounds_t2(2, 1), bounds_t2(1, 2):bounds_t2(2, 2), bounds_t2(1, 3):bounds_t2(2, 3),&
1028  & bounds_t2(1, 4):bounds_t2(2, 4))
1029 
1030  END SELECT
1031  SELECT CASE (ndims_tensor(tensor_3))
1032  CASE (2)
1033  CALL dist_sparse_tensor_to_repl_dense_array(tensor_3, array_3_2d)
1034 
1035  CASE (3)
1036  CALL dist_sparse_tensor_to_repl_dense_array(tensor_3, array_3_3d)
1037 
1038  CASE (4)
1039  CALL dist_sparse_tensor_to_repl_dense_array(tensor_3, array_3_4d)
1040 
1041  END SELECT
1042 
1043  ! Get array sizes
1044 
1045  SELECT CASE (ndims_tensor(tensor_1))
1046  CASE (2)
1047  ALLOCATE (size_1, source=shape(array_1_2d))
1048 
1049  CASE (3)
1050  ALLOCATE (size_1, source=shape(array_1_3d))
1051 
1052  CASE (4)
1053  ALLOCATE (size_1, source=shape(array_1_4d))
1054 
1055  END SELECT
1056  SELECT CASE (ndims_tensor(tensor_2))
1057  CASE (2)
1058  ALLOCATE (size_2, source=shape(array_2_2d))
1059 
1060  CASE (3)
1061  ALLOCATE (size_2, source=shape(array_2_3d))
1062 
1063  CASE (4)
1064  ALLOCATE (size_2, source=shape(array_2_4d))
1065 
1066  END SELECT
1067  SELECT CASE (ndims_tensor(tensor_3))
1068  CASE (2)
1069  ALLOCATE (size_3, source=shape(array_3_2d))
1070 
1071  CASE (3)
1072  ALLOCATE (size_3, source=shape(array_3_3d))
1073 
1074  CASE (4)
1075  ALLOCATE (size_3, source=shape(array_3_4d))
1076 
1077  END SELECT
1078 
1079  ALLOCATE (order_t1(ndims_tensor(tensor_1)))
1080  ALLOCATE (order_t2(ndims_tensor(tensor_2)))
1081  ALLOCATE (order_t3(ndims_tensor(tensor_3)))
1082 
1083  associate(map_t1_1 => notcontract_1, map_t1_2 => contract_1, &
1084  map_t2_1 => notcontract_2, map_t2_2 => contract_2, &
1085  map_t3_1 => map_1, map_t3_2 => map_2)
1086 
1087  order_t1(:) = dbt_inverse_order([map_t1_1, map_t1_2])
1088 
1089  SELECT CASE (ndims_tensor(tensor_1))
1090  CASE (2)
1091  CALL allocate_any(array_1_rs2d, source=array_1_2d, order=order_t1)
1092  CALL allocate_any(array_1_mm, sizes_2d(size_1, map_t1_1, map_t1_2))
1093  array_1_mm(:, :) = reshape(array_1_rs2d, shape(array_1_mm))
1094  CASE (3)
1095  CALL allocate_any(array_1_rs3d, source=array_1_3d, order=order_t1)
1096  CALL allocate_any(array_1_mm, sizes_2d(size_1, map_t1_1, map_t1_2))
1097  array_1_mm(:, :) = reshape(array_1_rs3d, shape(array_1_mm))
1098  CASE (4)
1099  CALL allocate_any(array_1_rs4d, source=array_1_4d, order=order_t1)
1100  CALL allocate_any(array_1_mm, sizes_2d(size_1, map_t1_1, map_t1_2))
1101  array_1_mm(:, :) = reshape(array_1_rs4d, shape(array_1_mm))
1102  END SELECT
1103  order_t2(:) = dbt_inverse_order([map_t2_1, map_t2_2])
1104 
1105  SELECT CASE (ndims_tensor(tensor_2))
1106  CASE (2)
1107  CALL allocate_any(array_2_rs2d, source=array_2_2d, order=order_t2)
1108  CALL allocate_any(array_2_mm, sizes_2d(size_2, map_t2_1, map_t2_2))
1109  array_2_mm(:, :) = reshape(array_2_rs2d, shape(array_2_mm))
1110  CASE (3)
1111  CALL allocate_any(array_2_rs3d, source=array_2_3d, order=order_t2)
1112  CALL allocate_any(array_2_mm, sizes_2d(size_2, map_t2_1, map_t2_2))
1113  array_2_mm(:, :) = reshape(array_2_rs3d, shape(array_2_mm))
1114  CASE (4)
1115  CALL allocate_any(array_2_rs4d, source=array_2_4d, order=order_t2)
1116  CALL allocate_any(array_2_mm, sizes_2d(size_2, map_t2_1, map_t2_2))
1117  array_2_mm(:, :) = reshape(array_2_rs4d, shape(array_2_mm))
1118  END SELECT
1119  order_t3(:) = dbt_inverse_order([map_t3_1, map_t3_2])
1120 
1121  SELECT CASE (ndims_tensor(tensor_3))
1122  CASE (2)
1123  CALL allocate_any(array_3_rs2d, source=array_3_2d, order=order_t3)
1124  CALL allocate_any(array_3_mm, sizes_2d(size_3, map_t3_1, map_t3_2))
1125  array_3_mm(:, :) = reshape(array_3_rs2d, shape(array_3_mm))
1126  CASE (3)
1127  CALL allocate_any(array_3_rs3d, source=array_3_3d, order=order_t3)
1128  CALL allocate_any(array_3_mm, sizes_2d(size_3, map_t3_1, map_t3_2))
1129  array_3_mm(:, :) = reshape(array_3_rs3d, shape(array_3_mm))
1130  CASE (4)
1131  CALL allocate_any(array_3_rs4d, source=array_3_4d, order=order_t3)
1132  CALL allocate_any(array_3_mm, sizes_2d(size_3, map_t3_1, map_t3_2))
1133  array_3_mm(:, :) = reshape(array_3_rs4d, shape(array_3_mm))
1134  END SELECT
1135 
1136  SELECT CASE (ndims_tensor(tensor_3))
1137  CASE (2)
1138  CALL allocate_any(array_3_0_rs2d, source=array_3_0_2d, order=order_t3)
1139  CALL allocate_any(array_3_test_mm, sizes_2d(size_3, map_t3_1, map_t3_2))
1140  array_3_test_mm(:, :) = reshape(array_3_0_rs2d, shape(array_3_mm))
1141  CASE (3)
1142  CALL allocate_any(array_3_0_rs3d, source=array_3_0_3d, order=order_t3)
1143  CALL allocate_any(array_3_test_mm, sizes_2d(size_3, map_t3_1, map_t3_2))
1144  array_3_test_mm(:, :) = reshape(array_3_0_rs3d, shape(array_3_mm))
1145  CASE (4)
1146  CALL allocate_any(array_3_0_rs4d, source=array_3_0_4d, order=order_t3)
1147  CALL allocate_any(array_3_test_mm, sizes_2d(size_3, map_t3_1, map_t3_2))
1148  array_3_test_mm(:, :) = reshape(array_3_0_rs4d, shape(array_3_mm))
1149  END SELECT
1150 
1151  array_3_test_mm(:, :) = beta*array_3_test_mm(:, :) + alpha*matmul(array_1_mm, transpose(array_2_mm))
1152 
1153  END associate
1154 
1155  eql_diff = maxval(abs(array_3_test_mm(:, :) - array_3_mm(:, :)))
1156  notzero = maxval(abs(array_3_test_mm(:, :))) .GT. 1.0e-12_dp
1157 
1158  eql = eql_diff .LT. 1.0e-11_dp
1159 
1160  IF (.NOT. eql .OR. .NOT. notzero) THEN
1161  IF (io_unit > 0) WRITE (io_unit, *) 'Test failed!', eql_diff
1162  cpabort('')
1163  ELSE
1164  IF (io_unit > 0) WRITE (io_unit, *) 'Test passed!', eql_diff
1165  END IF
1166 
1167  END SUBROUTINE
1168 
1169 ! **************************************************************************************************
1170 !> \brief mapped sizes in 2d
1171 !> \author Patrick Seewald
1172 ! **************************************************************************************************
1173  FUNCTION sizes_2d(nd_sizes, map1, map2)
1174  INTEGER, DIMENSION(:), INTENT(IN) :: nd_sizes, map1, map2
1175  INTEGER, DIMENSION(2) :: sizes_2d
1176  sizes_2d(1) = product(nd_sizes(map1))
1177  sizes_2d(2) = product(nd_sizes(map2))
1178  END FUNCTION
1179 
1180 ! **************************************************************************************************
1181 !> \brief checksum of a tensor consistent with block_checksum
1182 !> \author Patrick Seewald
1183 ! **************************************************************************************************
1184  FUNCTION dbt_checksum(tensor)
1185  TYPE(dbt_type), INTENT(IN) :: tensor
1186  REAL(kind=dp) :: dbt_checksum
1187  dbt_checksum = dbt_tas_checksum(tensor%matrix_rep)
1188  END FUNCTION
1189 
1190 ! **************************************************************************************************
1191 !> \brief Reset the seed used for generating random matrices to default value
1192 !> \author Patrick Seewald
1193 ! **************************************************************************************************
1195  randmat_counter = rand_seed_init
1196  END SUBROUTINE
1197 
1198  END MODULE
struct tensor_ tensor
integer function, dimension(4), public generate_larnv_seed(irow, nrow, icol, ncol, ival)
Generate a seed respecting the lapack constraints,.
Definition: dbm_tests.F:435
Wrapper for allocating, copying and reshaping arrays.
Methods to operate on n-dimensional tensor blocks.
Definition: dbt_block.F:12
tensor index and mapping to DBM index
Definition: dbt_index.F:12
pure integer(kind=int_8) function, dimension(2), public get_2d_indices_tensor(map, ind_in)
transform nd index to 2d index, using info from index mapping.
Definition: dbt_index.F:318
pure integer(kind=int_8) function, public combine_tensor_index(ind_in, dims)
transform nd index to flat index
Definition: dbt_index.F:235
pure subroutine, public dbt_get_mapping_info(map, ndim_nd, ndim1_2d, ndim2_2d, dims_2d_i8, dims_2d, dims_nd, dims1_2d, dims2_2d, map1_2d, map2_2d, map_nd, base, col_major)
get mapping info
Definition: dbt_index.F:176
DBT tensor Input / Output.
Definition: dbt_io.F:12
subroutine, public dbt_write_blocks(tensor, io_unit_master, io_unit_all, write_int)
Write all tensor blocks.
Definition: dbt_io.F:212
subroutine, public dbt_write_block_indices(tensor, io_unit_master, io_unit_all)
Definition: dbt_io.F:365
DBT tensor framework for block-sparse tensor contraction. Representation of n-rank tensors as DBT tal...
Definition: dbt_methods.F:16
subroutine, public dbt_copy(tensor_in, tensor_out, order, summation, bounds, move_data, unit_nr)
Copy tensor data. Redistributes tensor data according to distributions of target and source tensor....
Definition: dbt_methods.F:114
subroutine, public dbt_contract(alpha, tensor_1, tensor_2, beta, tensor_3, contract_1, notcontract_1, contract_2, notcontract_2, map_1, map_2, bounds_1, bounds_2, bounds_3, optimize_dist, pgrid_opt_1, pgrid_opt_2, pgrid_opt_3, filter_eps, flop, move_data, retain_sparsity, unit_nr, log_verbose)
Contract tensors by multiplying matrix representations. tensor_3(map_1, map_2) := alpha * tensor_1(no...
Definition: dbt_methods.F:499
Tall-and-skinny matrices: base routines similar to DBM API, mostly wrappers around existing DBM routi...
Definition: dbt_tas_base.F:13
type(dbt_tas_split_info) function, public dbt_tas_info(matrix)
get info on mpi grid splitting
Definition: dbt_tas_base.F:822
testing infrastructure for tall-and-skinny matrices
Definition: dbt_tas_test.F:12
real(kind=dp) function, public dbt_tas_checksum(matrix)
Calculate checksum of tall-and-skinny matrix consistent with dbm_checksum.
Definition: dbt_tas_test.F:475
General methods for testing DBT tensors.
Definition: dbt_test.F:12
real(kind=dp) function, public dbt_checksum(tensor)
checksum of a tensor consistent with block_checksum
Definition: dbt_test.F:1185
subroutine, public dbt_reset_randmat_seed()
Reset the seed used for generating random matrices to default value.
Definition: dbt_test.F:1195
subroutine, public dbt_test_formats(ndims, mp_comm, unit_nr, verbose, blk_size_1, blk_size_2, blk_size_3, blk_size_4, blk_ind_1, blk_ind_2, blk_ind_3, blk_ind_4)
Test equivalence of all tensor formats, using a random distribution.
Definition: dbt_test.F:176
subroutine, public dbt_contract_test(alpha, tensor_1, tensor_2, beta, tensor_3, contract_1, notcontract_1, contract_2, notcontract_2, map_1, map_2, unit_nr, bounds_1, bounds_2, bounds_3, log_verbose, write_int)
test tensor contraction
Definition: dbt_test.F:833
subroutine, public dbt_setup_test_tensor(tensor, mp_comm, enumerate, blk_ind_1, blk_ind_2, blk_ind_3, blk_ind_4)
Allocate and fill test tensor - entries are enumerated by their index s.t. they only depend on global...
Definition: dbt_test.F:476
DBT tensor framework for block-sparse tensor contraction: Types and create/destroy routines.
Definition: dbt_types.F:12
subroutine, public dbt_pgrid_destroy(pgrid, keep_comm)
destroy process grid
Definition: dbt_types.F:905
subroutine, public dbt_distribution_new(dist, pgrid, nd_dist_1, nd_dist_2, nd_dist_3, nd_dist_4)
Create a tensor distribution.
Definition: dbt_types.F:886
subroutine, public dims_tensor(tensor, dims)
tensor dimensions
Definition: dbt_types.F:1238
subroutine, public dbt_destroy(tensor)
Destroy a tensor.
Definition: dbt_types.F:1410
subroutine, public dbt_get_info(tensor, nblks_total, nfull_total, nblks_local, nfull_local, pdims, my_ploc, blks_local_1, blks_local_2, blks_local_3, blks_local_4, proc_dist_1, proc_dist_2, proc_dist_3, proc_dist_4, blk_size_1, blk_size_2, blk_size_3, blk_size_4, blk_offset_1, blk_offset_2, blk_offset_3, blk_offset_4, distribution, name)
As block_get_info but for tensors.
Definition: dbt_types.F:1656
pure integer function, public ndims_tensor(tensor)
tensor rank
Definition: dbt_types.F:1227
subroutine, public dbt_default_distvec(nblk, nproc, blk_size, dist)
get a load-balanced and randomized distribution along one tensor dimension
Definition: dbt_types.F:1876
subroutine, public mp_environ_pgrid(pgrid, dims, task_coor)
as mp_environ but for special pgrid type
Definition: dbt_types.F:768
subroutine, public dbt_pgrid_create(mp_comm, dims, pgrid, tensor_dims)
Definition: dbt_types.F:1525
subroutine, public dbt_distribution_destroy(dist)
Destroy tensor distribution.
Definition: dbt_types.F:926
Defines the basic variable types.
Definition: kinds.F:23
integer, parameter, public int_8
Definition: kinds.F:54
integer, parameter, public dp
Definition: kinds.F:34
integer, parameter, public default_string_length
Definition: kinds.F:57
Interface to the message passing library MPI.