18 dbt_copy, dbt_get_block, dbt_iterator_type, dbt_iterator_blocks_left, &
19 dbt_iterator_next_block, dbt_iterator_start, dbt_iterator_stop, &
20 dbt_reserve_blocks, dbt_get_stored_coordinates, dbt_put_block, &
24 dbt_create,
dbt_destroy, dbt_type, dbt_distribution_type, &
37 #include "../base/base_uses.f90"
41 CHARACTER(len=*),
PARAMETER,
PRIVATE :: moduleN =
'dbt_test'
50 INTERFACE dist_sparse_tensor_to_repl_dense_array
51 MODULE PROCEDURE dist_sparse_tensor_to_repl_dense_2d_array
52 MODULE PROCEDURE dist_sparse_tensor_to_repl_dense_3d_array
53 MODULE PROCEDURE dist_sparse_tensor_to_repl_dense_4d_array
56 INTEGER,
SAVE :: randmat_counter = 0
57 INTEGER,
PARAMETER,
PRIVATE :: rand_seed_init = 12341313
65 FUNCTION dbt_equal(tensor1, tensor2)
66 TYPE(dbt_type),
INTENT(INOUT) :: tensor1, tensor2
69 TYPE(dbt_type) :: tensor2_tmp
70 TYPE(dbt_iterator_type) :: iter
71 TYPE(block_nd) :: blk_data1, blk_data2
72 INTEGER,
DIMENSION(ndims_tensor(tensor1)) :: blk_size, ind_nd
76 CALL dbt_create(tensor1, tensor2_tmp)
78 CALL dbt_reserve_blocks(tensor1, tensor2_tmp)
85 CALL dbt_iterator_start(iter, tensor1)
87 DO WHILE (dbt_iterator_blocks_left(iter))
88 CALL dbt_iterator_next_block(iter, ind_nd, blk_size=blk_size)
89 CALL dbt_get_block(tensor1, ind_nd, blk_data1, found)
90 IF (.NOT. found) cpabort(
"Tensor block 1 not found")
91 CALL dbt_get_block(tensor2_tmp, ind_nd, blk_data2, found)
92 IF (.NOT. found) cpabort(
"Tensor block 2 not found")
94 IF (.NOT. blocks_equal(blk_data1, blk_data2))
THEN
101 CALL dbt_iterator_stop(iter)
111 PURE FUNCTION blocks_equal(block1, block2)
112 TYPE(block_nd),
INTENT(IN) :: block1, block2
113 LOGICAL :: blocks_equal
115 blocks_equal = maxval(abs(block1%blk - block2%blk)) .LT. 1.0e-12_dp
123 PURE FUNCTION factorial(n)
124 INTEGER,
INTENT(IN) :: n
127 factorial = product((/(k, k=1, n)/))
134 SUBROUTINE permute(n, p)
135 INTEGER,
INTENT(IN) :: n
137 INTEGER,
DIMENSION(n) :: pp
138 INTEGER,
DIMENSION(n, factorial(n)),
INTENT(OUT) :: p
144 RECURSIVE SUBROUTINE perm(i)
145 INTEGER,
INTENT(IN) :: i
174 blk_size_1, blk_size_2, blk_size_3, blk_size_4, &
175 blk_ind_1, blk_ind_2, blk_ind_3, blk_ind_4)
176 INTEGER,
DIMENSION(:),
INTENT(IN),
OPTIONAL :: blk_size_1, blk_size_2, blk_size_3, blk_size_4
177 INTEGER,
DIMENSION(:),
INTENT(IN),
OPTIONAL :: blk_ind_1, blk_ind_2, blk_ind_3, blk_ind_4
178 INTEGER,
INTENT(IN) :: ndims
179 INTEGER,
INTENT(IN) :: unit_nr
180 LOGICAL,
INTENT(IN) :: verbose
181 TYPE(mp_comm_type),
INTENT(IN) :: mp_comm
182 TYPE(dbt_distribution_type) :: dist1, dist2
183 TYPE(dbt_type) :: tensor1, tensor2
184 INTEGER :: isep, iblk
185 INTEGER,
DIMENSION(:),
ALLOCATABLE :: dist1_1, dist1_2, dist1_3, dist1_4, &
186 dist2_1, dist2_2, dist2_3, dist2_4
187 INTEGER :: nblks, imap
188 INTEGER,
DIMENSION(ndims) :: pdims, myploc
190 INTEGER :: iperm, idist, icount
191 INTEGER,
DIMENSION(:),
ALLOCATABLE :: map1, map2, map1_ref, map2_ref
192 INTEGER,
DIMENSION(ndims, factorial(ndims)) :: perm
195 TYPE(dbt_pgrid_type) :: comm_nd
196 CHARACTER(LEN=default_string_length) :: tensor_name
201 mynode = mp_comm%mepos
204 IF (mynode .EQ. 0) io_unit = unit_nr
206 CALL permute(ndims, perm)
207 ALLOCATE (map1_ref, source=perm(1:ndims/2, 1))
208 ALLOCATE (map2_ref, source=perm(ndims/2 + 1:ndims, 1))
210 IF (io_unit > 0)
THEN
212 WRITE (io_unit,
'(A)') repeat(
"-", 80)
213 WRITE (io_unit,
'(A,1X,I1)')
"Testing matrix representations of tensor rank", ndims
214 WRITE (io_unit,
'(A)') repeat(
"-", 80)
215 WRITE (io_unit,
'(A)')
"Block sizes:"
218 WRITE (io_unit,
'(T4,A,1X,I1,A,1X)', advance=
'no')
'Dim', 1,
':'
219 DO iblk = 1,
SIZE(blk_size_1)
220 WRITE (io_unit,
'(I2,1X)', advance=
'no') blk_size_1(iblk)
225 WRITE (io_unit,
'(T4,A,1X,I1,A,1X)', advance=
'no')
'Dim', 2,
':'
226 DO iblk = 1,
SIZE(blk_size_2)
227 WRITE (io_unit,
'(I2,1X)', advance=
'no') blk_size_2(iblk)
232 WRITE (io_unit,
'(T4,A,1X,I1,A,1X)', advance=
'no')
'Dim', 3,
':'
233 DO iblk = 1,
SIZE(blk_size_3)
234 WRITE (io_unit,
'(I2,1X)', advance=
'no') blk_size_3(iblk)
239 WRITE (io_unit,
'(T4,A,1X,I1,A,1X)', advance=
'no')
'Dim', 4,
':'
240 DO iblk = 1,
SIZE(blk_size_4)
241 WRITE (io_unit,
'(I2,1X)', advance=
'no') blk_size_4(iblk)
246 WRITE (io_unit,
'(A)')
"Non-zero blocks:"
247 DO iblk = 1,
SIZE(blk_ind_1)
249 WRITE (io_unit,
'(T4,A, I3, A, 2I3, 1X, A)') &
250 'Block', iblk,
': (', blk_ind_1(iblk), blk_ind_2(iblk),
')'
253 WRITE (io_unit,
'(T4,A, I3, A, 3I3, 1X, A)') &
254 'Block', iblk,
': (', blk_ind_1(iblk), blk_ind_2(iblk), blk_ind_3(iblk),
')'
257 WRITE (io_unit,
'(T4,A, I3, A, 4I3, 1X, A)') &
258 'Block', iblk,
': (', blk_ind_1(iblk), blk_ind_2(iblk), blk_ind_3(iblk), blk_ind_4(iblk),
')'
263 WRITE (io_unit,
'(A,1X)', advance=
'no')
"Reference map:"
264 WRITE (io_unit,
'(A1,1X)', advance=
'no')
"("
265 DO imap = 1,
SIZE(map1_ref)
266 WRITE (io_unit,
'(I1,1X)', advance=
'no') map1_ref(imap)
268 WRITE (io_unit,
'(A1,1X)', advance=
'no')
"|"
269 DO imap = 1,
SIZE(map2_ref)
270 WRITE (io_unit,
'(I1,1X)', advance=
'no') map2_ref(imap)
272 WRITE (io_unit,
'(A1)')
")"
277 DO iperm = 1, factorial(ndims)
278 DO isep = 1, ndims - 1
281 ALLOCATE (map1, source=perm(1:isep, iperm))
282 ALLOCATE (map2, source=perm(isep + 1:ndims, iperm))
284 mynode = mp_comm%mepos
288 nblks =
SIZE(blk_size_1)
289 ALLOCATE (dist1_1(nblks))
290 ALLOCATE (dist2_1(nblks))
295 nblks =
SIZE(blk_size_2)
296 ALLOCATE (dist1_2(nblks))
297 ALLOCATE (dist2_2(nblks))
302 nblks =
SIZE(blk_size_3)
303 ALLOCATE (dist1_3(nblks))
304 ALLOCATE (dist2_3(nblks))
309 nblks =
SIZE(blk_size_4)
310 ALLOCATE (dist1_4(nblks))
311 ALLOCATE (dist2_4(nblks))
316 WRITE (tensor_name,
'(A,1X,I3,1X)')
"Test", icount
318 IF (io_unit > 0)
THEN
320 WRITE (io_unit,
'(A,A,1X)', advance=
'no') trim(tensor_name),
':'
321 WRITE (io_unit,
'(A1,1X)', advance=
'no')
"("
322 DO imap = 1,
SIZE(map1)
323 WRITE (io_unit,
'(I1,1X)', advance=
'no') map1(imap)
325 WRITE (io_unit,
'(A1,1X)', advance=
'no')
"|"
326 DO imap = 1,
SIZE(map2)
327 WRITE (io_unit,
'(I1,1X)', advance=
'no') map2(imap)
329 WRITE (io_unit,
'(A1)')
")"
331 WRITE (io_unit,
'(T4,A)')
"Reference distribution:"
333 WRITE (io_unit,
'(T7,A,1X)', advance=
'no')
"Dist vec 1:"
334 DO idist = 1,
SIZE(dist2_1)
335 WRITE (io_unit,
'(I2,1X)', advance=
'no') dist2_1(idist)
340 WRITE (io_unit,
'(T7,A,1X)', advance=
'no')
"Dist vec 2:"
341 DO idist = 1,
SIZE(dist2_2)
342 WRITE (io_unit,
'(I2,1X)', advance=
'no') dist2_2(idist)
347 WRITE (io_unit,
'(T7,A,1X)', advance=
'no')
"Dist vec 3:"
348 DO idist = 1,
SIZE(dist2_3)
349 WRITE (io_unit,
'(I2,1X)', advance=
'no') dist2_3(idist)
354 WRITE (io_unit,
'(T7,A,1X)', advance=
'no')
"Dist vec 4:"
355 DO idist = 1,
SIZE(dist2_4)
356 WRITE (io_unit,
'(I2,1X)', advance=
'no') dist2_4(idist)
361 WRITE (io_unit,
'(T4,A)')
"Test distribution:"
363 WRITE (io_unit,
'(T7,A,1X)', advance=
'no')
"Dist vec 1:"
364 DO idist = 1,
SIZE(dist2_1)
365 WRITE (io_unit,
'(I2,1X)', advance=
'no') dist1_1(idist)
370 WRITE (io_unit,
'(T7,A,1X)', advance=
'no')
"Dist vec 2:"
371 DO idist = 1,
SIZE(dist2_2)
372 WRITE (io_unit,
'(I2,1X)', advance=
'no') dist1_2(idist)
377 WRITE (io_unit,
'(T7,A,1X)', advance=
'no')
"Dist vec 3:"
378 DO idist = 1,
SIZE(dist2_3)
379 WRITE (io_unit,
'(I2,1X)', advance=
'no') dist1_3(idist)
384 WRITE (io_unit,
'(T7,A,1X)', advance=
'no')
"Dist vec 4:"
385 DO idist = 1,
SIZE(dist2_4)
386 WRITE (io_unit,
'(I2,1X)', advance=
'no') dist1_4(idist)
394 CALL dbt_create(tensor2,
"Ref", dist2, map1_ref, map2_ref, &
395 blk_size_1, blk_size_2)
400 CALL dbt_create(tensor2,
"Ref", dist2, map1_ref, map2_ref, &
401 blk_size_1, blk_size_2, blk_size_3)
406 CALL dbt_create(tensor2,
"Ref", dist2, map1_ref, map2_ref, &
407 blk_size_1, blk_size_2, blk_size_3, blk_size_4)
408 CALL dbt_setup_test_tensor(tensor2, comm_nd%mp_comm_2d, .true., blk_ind_1, blk_ind_2, blk_ind_3, blk_ind_4)
415 CALL dbt_create(tensor1, tensor_name, dist1, map1, map2, &
416 blk_size_1, blk_size_2)
421 CALL dbt_create(tensor1, tensor_name, dist1, map1, map2, &
422 blk_size_1, blk_size_2, blk_size_3)
427 CALL dbt_create(tensor1, tensor_name, dist1, map1, map2, &
428 blk_size_1, blk_size_2, blk_size_3, blk_size_4)
429 CALL dbt_setup_test_tensor(tensor1, comm_nd%mp_comm_2d, .true., blk_ind_1, blk_ind_2, blk_ind_3, blk_ind_4)
434 eql = dbt_equal(tensor1, tensor2)
437 IF (io_unit > 0)
WRITE (io_unit,
'(A,1X,A)') trim(tensor_name),
'Test failed!'
440 IF (io_unit > 0)
WRITE (io_unit,
'(A,1X,A)') trim(tensor_name),
'Test passed!'
442 DEALLOCATE (map1, map2)
451 DEALLOCATE (dist1_1, dist2_1)
454 DEALLOCATE (dist1_2, dist2_2)
457 DEALLOCATE (dist1_3, dist2_3)
460 DEALLOCATE (dist1_4, dist2_4)
476 TYPE(dbt_type),
INTENT(INOUT) ::
tensor
477 CLASS(mp_comm_type),
INTENT(IN) :: mp_comm
478 LOGICAL,
INTENT(IN) :: enumerate
479 INTEGER,
DIMENSION(:),
INTENT(IN),
OPTIONAL :: blk_ind_1, blk_ind_2, blk_ind_3, blk_ind_4
482 INTEGER :: i, ib, my_nblks_alloc, nblks_alloc, proc, nze
483 INTEGER,
ALLOCATABLE,
DIMENSION(:) :: my_blk_ind_1, my_blk_ind_2, my_blk_ind_3, my_blk_ind_4
484 INTEGER,
DIMENSION(ndims_tensor(tensor)) :: blk_index, blk_offset, blk_size, &
486 INTEGER,
DIMENSION(:, :),
ALLOCATABLE :: ind_nd
487 REAL(kind=
dp),
ALLOCATABLE, &
488 DIMENSION(:,:) :: blk_values_2
489 REAL(kind=
dp),
ALLOCATABLE, &
490 DIMENSION(:,:,:) :: blk_values_3
491 REAL(kind=
dp),
ALLOCATABLE, &
492 DIMENSION(:,:,:,:) :: blk_values_4
493 TYPE(dbt_iterator_type) :: iterator
494 INTEGER,
DIMENSION(4) :: iseed
495 INTEGER,
DIMENSION(2) :: blk_index_2d, nblks_2d
497 nblks_alloc =
SIZE(blk_ind_1)
498 mynode = mp_comm%mepos
500 IF (.NOT. enumerate)
THEN
501 cpassert(randmat_counter .NE. 0)
503 randmat_counter = randmat_counter + 1
517 DO ib = 1, nblks_alloc
519 ind_nd(ib, :) = [blk_ind_1(ib), blk_ind_2(ib)]
522 ind_nd(ib, :) = [blk_ind_1(ib), blk_ind_2(ib), blk_ind_3(ib)]
525 ind_nd(ib, :) = [blk_ind_1(ib), blk_ind_2(ib), blk_ind_3(ib), blk_ind_4(ib)]
527 CALL dbt_get_stored_coordinates(
tensor, ind_nd(ib, :), proc)
528 IF (proc == mynode)
THEN
529 my_nblks_alloc = my_nblks_alloc + 1
535 ALLOCATE (my_blk_ind_1(my_nblks_alloc))
538 ALLOCATE (my_blk_ind_2(my_nblks_alloc))
541 ALLOCATE (my_blk_ind_3(my_nblks_alloc))
544 ALLOCATE (my_blk_ind_4(my_nblks_alloc))
549 DO ib = 1, nblks_alloc
550 CALL dbt_get_stored_coordinates(
tensor, ind_nd(ib, :), proc)
551 IF (proc == mynode)
THEN
554 my_blk_ind_1(i) = blk_ind_1(ib)
557 my_blk_ind_2(i) = blk_ind_2(ib)
560 my_blk_ind_3(i) = blk_ind_3(ib)
563 my_blk_ind_4(i) = blk_ind_4(ib)
570 CALL dbt_reserve_blocks(
tensor, my_blk_ind_1, my_blk_ind_2)
573 CALL dbt_reserve_blocks(
tensor, my_blk_ind_1, my_blk_ind_2, my_blk_ind_3)
576 CALL dbt_reserve_blocks(
tensor, my_blk_ind_1, my_blk_ind_2, my_blk_ind_3, my_blk_ind_4)
579 CALL dbt_iterator_start(iterator,
tensor)
580 DO WHILE (dbt_iterator_blocks_left(iterator))
581 CALL dbt_iterator_next_block(iterator, blk_index, blk_size=blk_size, blk_offset=blk_offset)
583 IF (.NOT. enumerate)
THEN
586 iseed =
generate_larnv_seed(blk_index_2d(1), nblks_2d(1), blk_index_2d(2), nblks_2d(2), randmat_counter)
587 nze = product(blk_size)
591 CALL allocate_any(blk_values_2, shape_spec=blk_size)
594 CALL enumerate_block_elements(blk_size, blk_offset, tensor_dims, blk_2=blk_values_2)
596 CALL dlarnv(1, iseed, nze, blk_values_2)
598 CALL dbt_put_block(
tensor, blk_index, blk_size, blk_values_2)
599 DEALLOCATE (blk_values_2)
602 CALL allocate_any(blk_values_3, shape_spec=blk_size)
605 CALL enumerate_block_elements(blk_size, blk_offset, tensor_dims, blk_3=blk_values_3)
607 CALL dlarnv(1, iseed, nze, blk_values_3)
609 CALL dbt_put_block(
tensor, blk_index, blk_size, blk_values_3)
610 DEALLOCATE (blk_values_3)
613 CALL allocate_any(blk_values_4, shape_spec=blk_size)
616 CALL enumerate_block_elements(blk_size, blk_offset, tensor_dims, blk_4=blk_values_4)
618 CALL dlarnv(1, iseed, nze, blk_values_4)
620 CALL dbt_put_block(
tensor, blk_index, blk_size, blk_values_4)
621 DEALLOCATE (blk_values_4)
624 CALL dbt_iterator_stop(iterator)
638 SUBROUTINE enumerate_block_elements(blk_size, blk_offset, tensor_size, blk_2, blk_3, blk_4)
639 INTEGER,
DIMENSION(:),
INTENT(IN) :: blk_size, blk_offset, tensor_size
640 REAL(kind=
dp),
DIMENSION(:,:), &
641 OPTIONAL,
INTENT(OUT) :: blk_2
642 REAL(kind=
dp),
DIMENSION(:,:,:), &
643 OPTIONAL,
INTENT(OUT) :: blk_3
644 REAL(kind=
dp),
DIMENSION(:,:,:,:), &
645 OPTIONAL,
INTENT(OUT) :: blk_4
647 INTEGER,
DIMENSION(SIZE(blk_size)) :: arr_ind, tens_ind
648 INTEGER :: i_1, i_2, i_3, i_4
650 ndim =
SIZE(tensor_size)
653 DO i_2 = 1, blk_size(2)
654 DO i_1 = 1, blk_size(1)
655 arr_ind(:) = [i_1, i_2]
656 tens_ind(:) = arr_ind(:) + blk_offset(:) - 1
662 DO i_3 = 1, blk_size(3)
663 DO i_2 = 1, blk_size(2)
664 DO i_1 = 1, blk_size(1)
665 arr_ind(:) = [i_1, i_2, i_3]
666 tens_ind(:) = arr_ind(:) + blk_offset(:) - 1
673 DO i_4 = 1, blk_size(4)
674 DO i_3 = 1, blk_size(3)
675 DO i_2 = 1, blk_size(2)
676 DO i_1 = 1, blk_size(1)
677 arr_ind(:) = [i_1, i_2, i_3, i_4]
678 tens_ind(:) = arr_ind(:) + blk_offset(:) - 1
679 blk_4(arr_ind(1), arr_ind(2), arr_ind(3), arr_ind(4)) =
combine_tensor_index(tens_ind, tensor_size)
693 SUBROUTINE dist_sparse_tensor_to_repl_dense_2d_array(tensor, array)
695 TYPE(dbt_type),
INTENT(INOUT) ::
tensor
696 REAL(
dp),
ALLOCATABLE,
DIMENSION(:,:), &
698 REAL(
dp),
ALLOCATABLE,
DIMENSION(:,:) :: block
699 INTEGER,
DIMENSION(ndims_tensor(tensor)) :: dims_nd, ind_nd, blk_size, blk_offset
700 TYPE(dbt_iterator_type) :: iterator
702 INTEGER,
DIMENSION(ndims_tensor(tensor)) :: blk_start, blk_end
707 CALL allocate_any(array, shape_spec=dims_nd)
712 CALL dbt_iterator_start(iterator,
tensor)
713 DO WHILE (dbt_iterator_blocks_left(iterator))
714 CALL dbt_iterator_next_block(iterator, ind_nd, blk_size=blk_size, blk_offset=blk_offset)
715 CALL dbt_get_block(
tensor, ind_nd, block, found)
719 blk_start(idim) = blk_offset(idim)
720 blk_end(idim) = blk_offset(idim) + blk_size(idim) - 1
722 array(blk_start(1):blk_end(1), blk_start(2):blk_end(2)) = &
727 CALL dbt_iterator_stop(iterator)
729 CALL tensor%pgrid%mp_comm_2d%sum(array)
737 SUBROUTINE dist_sparse_tensor_to_repl_dense_3d_array(tensor, array)
739 TYPE(dbt_type),
INTENT(INOUT) ::
tensor
740 REAL(
dp),
ALLOCATABLE,
DIMENSION(:,:,:), &
742 REAL(
dp),
ALLOCATABLE,
DIMENSION(:,:,:) :: block
743 INTEGER,
DIMENSION(ndims_tensor(tensor)) :: dims_nd, ind_nd, blk_size, blk_offset
744 TYPE(dbt_iterator_type) :: iterator
746 INTEGER,
DIMENSION(ndims_tensor(tensor)) :: blk_start, blk_end
751 CALL allocate_any(array, shape_spec=dims_nd)
752 array(:,:,:) = 0.0_dp
756 CALL dbt_iterator_start(iterator,
tensor)
757 DO WHILE (dbt_iterator_blocks_left(iterator))
758 CALL dbt_iterator_next_block(iterator, ind_nd, blk_size=blk_size, blk_offset=blk_offset)
759 CALL dbt_get_block(
tensor, ind_nd, block, found)
763 blk_start(idim) = blk_offset(idim)
764 blk_end(idim) = blk_offset(idim) + blk_size(idim) - 1
766 array(blk_start(1):blk_end(1), blk_start(2):blk_end(2), blk_start(3):blk_end(3)) = &
771 CALL dbt_iterator_stop(iterator)
773 CALL tensor%pgrid%mp_comm_2d%sum(array)
781 SUBROUTINE dist_sparse_tensor_to_repl_dense_4d_array(tensor, array)
783 TYPE(dbt_type),
INTENT(INOUT) ::
tensor
784 REAL(
dp),
ALLOCATABLE,
DIMENSION(:,:,:,:), &
786 REAL(
dp),
ALLOCATABLE,
DIMENSION(:,:,:,:) :: block
787 INTEGER,
DIMENSION(ndims_tensor(tensor)) :: dims_nd, ind_nd, blk_size, blk_offset
788 TYPE(dbt_iterator_type) :: iterator
790 INTEGER,
DIMENSION(ndims_tensor(tensor)) :: blk_start, blk_end
795 CALL allocate_any(array, shape_spec=dims_nd)
796 array(:,:,:,:) = 0.0_dp
800 CALL dbt_iterator_start(iterator,
tensor)
801 DO WHILE (dbt_iterator_blocks_left(iterator))
802 CALL dbt_iterator_next_block(iterator, ind_nd, blk_size=blk_size, blk_offset=blk_offset)
803 CALL dbt_get_block(
tensor, ind_nd, block, found)
807 blk_start(idim) = blk_offset(idim)
808 blk_end(idim) = blk_offset(idim) + blk_size(idim) - 1
810 array(blk_start(1):blk_end(1), blk_start(2):blk_end(2), blk_start(3):blk_end(3), blk_start(4):blk_end(4)) = &
815 CALL dbt_iterator_stop(iterator)
817 CALL tensor%pgrid%mp_comm_2d%sum(array)
827 contract_1, notcontract_1, &
828 contract_2, notcontract_2, &
831 bounds_1, bounds_2, bounds_3, &
832 log_verbose, write_int)
834 REAL(
dp),
INTENT(IN) :: alpha
835 TYPE(dbt_type),
INTENT(INOUT) :: tensor_1, tensor_2, tensor_3
836 REAL(
dp),
INTENT(IN) :: beta
837 INTEGER,
DIMENSION(:),
INTENT(IN) :: contract_1, contract_2, &
838 notcontract_1, notcontract_2, &
840 INTEGER,
INTENT(IN) :: unit_nr
841 INTEGER,
DIMENSION(2, SIZE(contract_1)), &
843 INTEGER,
DIMENSION(2, SIZE(notcontract_1)), &
845 INTEGER,
DIMENSION(2, SIZE(notcontract_2)), &
847 LOGICAL,
INTENT(IN),
OPTIONAL :: log_verbose
848 LOGICAL,
INTENT(IN),
OPTIONAL :: write_int
849 INTEGER :: io_unit, mynode
850 TYPE(mp_comm_type) :: mp_comm
851 INTEGER,
DIMENSION(:),
ALLOCATABLE :: size_1, size_2, size_3, &
852 order_t1, order_t2, order_t3
853 INTEGER,
DIMENSION(2, ndims_tensor(tensor_1)) :: bounds_t1
854 INTEGER,
DIMENSION(2, ndims_tensor(tensor_2)) :: bounds_t2
856 REAL(kind=
dp),
ALLOCATABLE, &
857 DIMENSION(:,:) :: array_1_2d, &
867 REAL(kind=
dp),
ALLOCATABLE, &
868 DIMENSION(:,:,:) :: array_1_3d, &
878 REAL(kind=
dp),
ALLOCATABLE, &
879 DIMENSION(:,:,:,:) :: array_1_4d, &
889 REAL(kind=
dp),
ALLOCATABLE, &
890 DIMENSION(:, :) :: array_1_mm, &
894 LOGICAL :: eql, notzero
895 LOGICAL,
PARAMETER :: debug = .false.
896 REAL(kind=
dp) :: cs_1, cs_2, cs_3, eql_diff
897 LOGICAL :: do_crop_1, do_crop_2
899 mp_comm = tensor_1%pgrid%mp_comm_2d
900 mynode = mp_comm%mepos
902 IF (mynode .EQ. 0) io_unit = unit_nr
908 IF (io_unit > 0)
THEN
910 WRITE (io_unit,
'(A)') repeat(
"-", 80)
911 WRITE (io_unit,
'(A,1X,A,1X,A,1X,A,1X,A,1X,A)')
"Testing tensor contraction", &
912 trim(tensor_1%name),
"x", trim(tensor_2%name),
"=", trim(tensor_3%name)
913 WRITE (io_unit,
'(A)') repeat(
"-", 80)
917 IF (io_unit > 0)
THEN
918 WRITE (io_unit,
"(A, E9.2)")
"checksum ", trim(tensor_1%name), cs_1
919 WRITE (io_unit,
"(A, E9.2)")
"checksum ", trim(tensor_2%name), cs_2
920 WRITE (io_unit,
"(A, E9.2)")
"checksum ", trim(tensor_3%name), cs_3
931 CALL dist_sparse_tensor_to_repl_dense_array(tensor_3, array_3_0_2d)
933 CALL dist_sparse_tensor_to_repl_dense_array(tensor_3, array_3_0_3d)
935 CALL dist_sparse_tensor_to_repl_dense_array(tensor_3, array_3_0_4d)
938 CALL dbt_contract(alpha, tensor_1, tensor_2, beta, tensor_3, &
939 contract_1, notcontract_1, &
940 contract_2, notcontract_2, &
942 bounds_1=bounds_1, bounds_2=bounds_2, bounds_3=bounds_3, &
943 filter_eps=1.0e-12_dp, &
944 unit_nr=io_unit, log_verbose=log_verbose)
949 IF (io_unit > 0)
THEN
950 WRITE (io_unit,
"(A, E9.2)")
"checksum ", trim(tensor_3%name), cs_3
954 do_crop_1 = .false.; do_crop_2 = .false.
958 CALL dbt_get_info(tensor_1, nfull_total=bounds_t1(2, :))
961 CALL dbt_get_info(tensor_2, nfull_total=bounds_t2(2, :))
963 IF (
PRESENT(bounds_1))
THEN
964 bounds_t1(:, contract_1) = bounds_1
966 bounds_t2(:, contract_2) = bounds_1
970 IF (
PRESENT(bounds_2))
THEN
971 bounds_t1(:, notcontract_1) = bounds_2
975 IF (
PRESENT(bounds_3))
THEN
976 bounds_t2(:, notcontract_2) = bounds_3
983 CALL dist_sparse_tensor_to_repl_dense_array(tensor_1, array_1_2d_full)
984 CALL allocate_any(array_1_2d, shape_spec=shape(array_1_2d_full))
986 array_1_2d(bounds_t1(1, 1):bounds_t1(2, 1), bounds_t1(1, 2):bounds_t1(2, 2)) = &
987 array_1_2d_full(bounds_t1(1, 1):bounds_t1(2, 1), bounds_t1(1, 2):bounds_t1(2, 2))
990 CALL dist_sparse_tensor_to_repl_dense_array(tensor_1, array_1_3d_full)
991 CALL allocate_any(array_1_3d, shape_spec=shape(array_1_3d_full))
993 array_1_3d(bounds_t1(1, 1):bounds_t1(2, 1), bounds_t1(1, 2):bounds_t1(2, 2), bounds_t1(1, 3):bounds_t1(2, 3)) = &
994 array_1_3d_full(bounds_t1(1, 1):bounds_t1(2, 1), bounds_t1(1, 2):bounds_t1(2, 2), bounds_t1(1, 3):bounds_t1(2, 3))
997 CALL dist_sparse_tensor_to_repl_dense_array(tensor_1, array_1_4d_full)
998 CALL allocate_any(array_1_4d, shape_spec=shape(array_1_4d_full))
1000 array_1_4d(bounds_t1(1, 1):bounds_t1(2, 1), bounds_t1(1, 2):bounds_t1(2, 2), bounds_t1(1, 3):bounds_t1(2, 3),&
1001 & bounds_t1(1, 4):bounds_t1(2, 4)) = &
1002 array_1_4d_full(bounds_t1(1, 1):bounds_t1(2, 1), bounds_t1(1, 2):bounds_t1(2, 2), bounds_t1(1, 3):bounds_t1(2, 3),&
1003 & bounds_t1(1, 4):bounds_t1(2, 4))
1008 CALL dist_sparse_tensor_to_repl_dense_array(tensor_2, array_2_2d_full)
1009 CALL allocate_any(array_2_2d, shape_spec=shape(array_2_2d_full))
1011 array_2_2d(bounds_t2(1, 1):bounds_t2(2, 1), bounds_t2(1, 2):bounds_t2(2, 2)) = &
1012 array_2_2d_full(bounds_t2(1, 1):bounds_t2(2, 1), bounds_t2(1, 2):bounds_t2(2, 2))
1015 CALL dist_sparse_tensor_to_repl_dense_array(tensor_2, array_2_3d_full)
1016 CALL allocate_any(array_2_3d, shape_spec=shape(array_2_3d_full))
1018 array_2_3d(bounds_t2(1, 1):bounds_t2(2, 1), bounds_t2(1, 2):bounds_t2(2, 2), bounds_t2(1, 3):bounds_t2(2, 3)) = &
1019 array_2_3d_full(bounds_t2(1, 1):bounds_t2(2, 1), bounds_t2(1, 2):bounds_t2(2, 2), bounds_t2(1, 3):bounds_t2(2, 3))
1022 CALL dist_sparse_tensor_to_repl_dense_array(tensor_2, array_2_4d_full)
1023 CALL allocate_any(array_2_4d, shape_spec=shape(array_2_4d_full))
1025 array_2_4d(bounds_t2(1, 1):bounds_t2(2, 1), bounds_t2(1, 2):bounds_t2(2, 2), bounds_t2(1, 3):bounds_t2(2, 3),&
1026 & bounds_t2(1, 4):bounds_t2(2, 4)) = &
1027 array_2_4d_full(bounds_t2(1, 1):bounds_t2(2, 1), bounds_t2(1, 2):bounds_t2(2, 2), bounds_t2(1, 3):bounds_t2(2, 3),&
1028 & bounds_t2(1, 4):bounds_t2(2, 4))
1033 CALL dist_sparse_tensor_to_repl_dense_array(tensor_3, array_3_2d)
1036 CALL dist_sparse_tensor_to_repl_dense_array(tensor_3, array_3_3d)
1039 CALL dist_sparse_tensor_to_repl_dense_array(tensor_3, array_3_4d)
1047 ALLOCATE (size_1, source=shape(array_1_2d))
1050 ALLOCATE (size_1, source=shape(array_1_3d))
1053 ALLOCATE (size_1, source=shape(array_1_4d))
1058 ALLOCATE (size_2, source=shape(array_2_2d))
1061 ALLOCATE (size_2, source=shape(array_2_3d))
1064 ALLOCATE (size_2, source=shape(array_2_4d))
1069 ALLOCATE (size_3, source=shape(array_3_2d))
1072 ALLOCATE (size_3, source=shape(array_3_3d))
1075 ALLOCATE (size_3, source=shape(array_3_4d))
1083 associate(map_t1_1 => notcontract_1, map_t1_2 => contract_1, &
1084 map_t2_1 => notcontract_2, map_t2_2 => contract_2, &
1085 map_t3_1 => map_1, map_t3_2 => map_2)
1087 order_t1(:) = dbt_inverse_order([map_t1_1, map_t1_2])
1091 CALL allocate_any(array_1_rs2d, source=array_1_2d, order=order_t1)
1092 CALL allocate_any(array_1_mm, sizes_2d(size_1, map_t1_1, map_t1_2))
1093 array_1_mm(:, :) = reshape(array_1_rs2d, shape(array_1_mm))
1095 CALL allocate_any(array_1_rs3d, source=array_1_3d, order=order_t1)
1096 CALL allocate_any(array_1_mm, sizes_2d(size_1, map_t1_1, map_t1_2))
1097 array_1_mm(:, :) = reshape(array_1_rs3d, shape(array_1_mm))
1099 CALL allocate_any(array_1_rs4d, source=array_1_4d, order=order_t1)
1100 CALL allocate_any(array_1_mm, sizes_2d(size_1, map_t1_1, map_t1_2))
1101 array_1_mm(:, :) = reshape(array_1_rs4d, shape(array_1_mm))
1103 order_t2(:) = dbt_inverse_order([map_t2_1, map_t2_2])
1107 CALL allocate_any(array_2_rs2d, source=array_2_2d, order=order_t2)
1108 CALL allocate_any(array_2_mm, sizes_2d(size_2, map_t2_1, map_t2_2))
1109 array_2_mm(:, :) = reshape(array_2_rs2d, shape(array_2_mm))
1111 CALL allocate_any(array_2_rs3d, source=array_2_3d, order=order_t2)
1112 CALL allocate_any(array_2_mm, sizes_2d(size_2, map_t2_1, map_t2_2))
1113 array_2_mm(:, :) = reshape(array_2_rs3d, shape(array_2_mm))
1115 CALL allocate_any(array_2_rs4d, source=array_2_4d, order=order_t2)
1116 CALL allocate_any(array_2_mm, sizes_2d(size_2, map_t2_1, map_t2_2))
1117 array_2_mm(:, :) = reshape(array_2_rs4d, shape(array_2_mm))
1119 order_t3(:) = dbt_inverse_order([map_t3_1, map_t3_2])
1123 CALL allocate_any(array_3_rs2d, source=array_3_2d, order=order_t3)
1124 CALL allocate_any(array_3_mm, sizes_2d(size_3, map_t3_1, map_t3_2))
1125 array_3_mm(:, :) = reshape(array_3_rs2d, shape(array_3_mm))
1127 CALL allocate_any(array_3_rs3d, source=array_3_3d, order=order_t3)
1128 CALL allocate_any(array_3_mm, sizes_2d(size_3, map_t3_1, map_t3_2))
1129 array_3_mm(:, :) = reshape(array_3_rs3d, shape(array_3_mm))
1131 CALL allocate_any(array_3_rs4d, source=array_3_4d, order=order_t3)
1132 CALL allocate_any(array_3_mm, sizes_2d(size_3, map_t3_1, map_t3_2))
1133 array_3_mm(:, :) = reshape(array_3_rs4d, shape(array_3_mm))
1138 CALL allocate_any(array_3_0_rs2d, source=array_3_0_2d, order=order_t3)
1139 CALL allocate_any(array_3_test_mm, sizes_2d(size_3, map_t3_1, map_t3_2))
1140 array_3_test_mm(:, :) = reshape(array_3_0_rs2d, shape(array_3_mm))
1142 CALL allocate_any(array_3_0_rs3d, source=array_3_0_3d, order=order_t3)
1143 CALL allocate_any(array_3_test_mm, sizes_2d(size_3, map_t3_1, map_t3_2))
1144 array_3_test_mm(:, :) = reshape(array_3_0_rs3d, shape(array_3_mm))
1146 CALL allocate_any(array_3_0_rs4d, source=array_3_0_4d, order=order_t3)
1147 CALL allocate_any(array_3_test_mm, sizes_2d(size_3, map_t3_1, map_t3_2))
1148 array_3_test_mm(:, :) = reshape(array_3_0_rs4d, shape(array_3_mm))
1151 array_3_test_mm(:, :) = beta*array_3_test_mm(:, :) + alpha*matmul(array_1_mm, transpose(array_2_mm))
1155 eql_diff = maxval(abs(array_3_test_mm(:, :) - array_3_mm(:, :)))
1156 notzero = maxval(abs(array_3_test_mm(:, :))) .GT. 1.0e-12_dp
1158 eql = eql_diff .LT. 1.0e-11_dp
1160 IF (.NOT. eql .OR. .NOT. notzero)
THEN
1161 IF (io_unit > 0)
WRITE (io_unit, *)
'Test failed!', eql_diff
1164 IF (io_unit > 0)
WRITE (io_unit, *)
'Test passed!', eql_diff
1173 FUNCTION sizes_2d(nd_sizes, map1, map2)
1174 INTEGER,
DIMENSION(:),
INTENT(IN) :: nd_sizes, map1, map2
1175 INTEGER,
DIMENSION(2) :: sizes_2d
1176 sizes_2d(1) = product(nd_sizes(map1))
1177 sizes_2d(2) = product(nd_sizes(map2))
1185 TYPE(dbt_type),
INTENT(IN) ::
tensor
1195 randmat_counter = rand_seed_init
integer function, dimension(4), public generate_larnv_seed(irow, nrow, icol, ncol, ival)
Generate a seed respecting the lapack constraints,.
Wrapper for allocating, copying and reshaping arrays.
Methods to operate on n-dimensional tensor blocks.
tensor index and mapping to DBM index
pure integer(kind=int_8) function, dimension(2), public get_2d_indices_tensor(map, ind_in)
transform nd index to 2d index, using info from index mapping.
pure integer(kind=int_8) function, public combine_tensor_index(ind_in, dims)
transform nd index to flat index
pure subroutine, public dbt_get_mapping_info(map, ndim_nd, ndim1_2d, ndim2_2d, dims_2d_i8, dims_2d, dims_nd, dims1_2d, dims2_2d, map1_2d, map2_2d, map_nd, base, col_major)
get mapping info
DBT tensor Input / Output.
subroutine, public dbt_write_blocks(tensor, io_unit_master, io_unit_all, write_int)
Write all tensor blocks.
subroutine, public dbt_write_block_indices(tensor, io_unit_master, io_unit_all)
DBT tensor framework for block-sparse tensor contraction. Representation of n-rank tensors as DBT tal...
subroutine, public dbt_copy(tensor_in, tensor_out, order, summation, bounds, move_data, unit_nr)
Copy tensor data. Redistributes tensor data according to distributions of target and source tensor....
subroutine, public dbt_contract(alpha, tensor_1, tensor_2, beta, tensor_3, contract_1, notcontract_1, contract_2, notcontract_2, map_1, map_2, bounds_1, bounds_2, bounds_3, optimize_dist, pgrid_opt_1, pgrid_opt_2, pgrid_opt_3, filter_eps, flop, move_data, retain_sparsity, unit_nr, log_verbose)
Contract tensors by multiplying matrix representations. tensor_3(map_1, map_2) := alpha * tensor_1(no...
Tall-and-skinny matrices: base routines similar to DBM API, mostly wrappers around existing DBM routi...
type(dbt_tas_split_info) function, public dbt_tas_info(matrix)
get info on mpi grid splitting
testing infrastructure for tall-and-skinny matrices
real(kind=dp) function, public dbt_tas_checksum(matrix)
Calculate checksum of tall-and-skinny matrix consistent with dbm_checksum.
General methods for testing DBT tensors.
real(kind=dp) function, public dbt_checksum(tensor)
checksum of a tensor consistent with block_checksum
subroutine, public dbt_reset_randmat_seed()
Reset the seed used for generating random matrices to default value.
subroutine, public dbt_test_formats(ndims, mp_comm, unit_nr, verbose, blk_size_1, blk_size_2, blk_size_3, blk_size_4, blk_ind_1, blk_ind_2, blk_ind_3, blk_ind_4)
Test equivalence of all tensor formats, using a random distribution.
subroutine, public dbt_contract_test(alpha, tensor_1, tensor_2, beta, tensor_3, contract_1, notcontract_1, contract_2, notcontract_2, map_1, map_2, unit_nr, bounds_1, bounds_2, bounds_3, log_verbose, write_int)
test tensor contraction
subroutine, public dbt_setup_test_tensor(tensor, mp_comm, enumerate, blk_ind_1, blk_ind_2, blk_ind_3, blk_ind_4)
Allocate and fill test tensor - entries are enumerated by their index s.t. they only depend on global...
DBT tensor framework for block-sparse tensor contraction: Types and create/destroy routines.
subroutine, public dbt_pgrid_destroy(pgrid, keep_comm)
destroy process grid
subroutine, public dbt_distribution_new(dist, pgrid, nd_dist_1, nd_dist_2, nd_dist_3, nd_dist_4)
Create a tensor distribution.
subroutine, public dims_tensor(tensor, dims)
tensor dimensions
subroutine, public dbt_destroy(tensor)
Destroy a tensor.
subroutine, public dbt_get_info(tensor, nblks_total, nfull_total, nblks_local, nfull_local, pdims, my_ploc, blks_local_1, blks_local_2, blks_local_3, blks_local_4, proc_dist_1, proc_dist_2, proc_dist_3, proc_dist_4, blk_size_1, blk_size_2, blk_size_3, blk_size_4, blk_offset_1, blk_offset_2, blk_offset_3, blk_offset_4, distribution, name)
As block_get_info but for tensors.
pure integer function, public ndims_tensor(tensor)
tensor rank
subroutine, public dbt_default_distvec(nblk, nproc, blk_size, dist)
get a load-balanced and randomized distribution along one tensor dimension
subroutine, public mp_environ_pgrid(pgrid, dims, task_coor)
as mp_environ but for special pgrid type
subroutine, public dbt_pgrid_create(mp_comm, dims, pgrid, tensor_dims)
subroutine, public dbt_distribution_destroy(dist)
Destroy tensor distribution.
Defines the basic variable types.
integer, parameter, public int_8
integer, parameter, public dp
integer, parameter, public default_string_length
Interface to the message passing library MPI.