# 1 "/__w/dbcsr/dbcsr/src/block/dbcsr_index_operations.F" 1 !--------------------------------------------------------------------------------------------------! ! Copyright (C) by the DBCSR developers group - All rights reserved ! ! This file is part of the DBCSR library. ! ! ! ! For information on the license, see the LICENSE file. ! ! For further information please visit https://dbcsr.cp2k.org ! ! SPDX-License-Identifier: GPL-2.0+ ! !--------------------------------------------------------------------------------------------------! MODULE dbcsr_index_operations !! Operations on the DBCSR index USE dbcsr_array_types, ONLY: array_data, & array_size USE dbcsr_config, ONLY: default_resize_factor USE dbcsr_dist_methods, ONLY: dbcsr_distribution_local_cols, & dbcsr_distribution_local_rows, & dbcsr_distribution_ncols, & dbcsr_distribution_nlocal_cols, & dbcsr_distribution_nlocal_rows, & dbcsr_distribution_nrows, & dbcsr_distribution_thread_dist USE dbcsr_dist_operations, ONLY: get_stored_canonical USE dbcsr_kinds, ONLY: int_4, & int_8 USE dbcsr_methods, ONLY: dbcsr_distribution, & dbcsr_get_index_memory_type USE dbcsr_ptr_util, ONLY: ensure_array_size, & memory_allocate, & memory_deallocate USE dbcsr_toollib, ONLY: joaat_hash, & sort, & swap USE dbcsr_types, ONLY: & dbcsr_distribution_obj, dbcsr_meta_size, dbcsr_num_slots, dbcsr_slot_blk_p, & dbcsr_slot_col_i, dbcsr_slot_coo_l, dbcsr_slot_home_prow, dbcsr_slot_home_vprow, & dbcsr_slot_nblkcols_local, dbcsr_slot_nblkcols_total, dbcsr_slot_nblkrows_local, & dbcsr_slot_nblkrows_total, dbcsr_slot_nblks, dbcsr_slot_nfullcols_local, dbcsr_slot_nze, & dbcsr_slot_row_p, dbcsr_slot_size, dbcsr_slot_thr_c, dbcsr_type #include "base/dbcsr_base_uses.f90" !$ USE OMP_LIB, ONLY: omp_get_max_threads, omp_get_thread_num, omp_get_num_threads IMPLICIT NONE PRIVATE CHARACTER(len=*), PARAMETER, PRIVATE :: moduleN = 'dbcsr_index_operations' LOGICAL, PARAMETER :: careful_mod = .FALSE. LOGICAL, PARAMETER :: debug_mod = .FALSE. ! Index transformations PUBLIC :: dbcsr_make_index_canonical, & transpose_index_local PUBLIC :: dbcsr_make_index_local_row, dbcsr_has_local_row_index PUBLIC :: dbcsr_make_index_list ! Dense/Sparse PUBLIC :: make_dense_index, make_undense_index ! Working with DBCSR and linear indices PUBLIC :: dbcsr_make_dbcsr_index, dbcsr_sort_indices, & merge_index_arrays, & dbcsr_expand_row_index, & dbcsr_count_row_index, dbcsr_build_row_index, & dbcsr_index_prune_deleted, & dbcsr_index_compact PUBLIC :: dbcsr_index_checksum ! Index array manipulation PUBLIC :: dbcsr_addto_index_array, dbcsr_clearfrom_index_array, & dbcsr_repoint_index, dbcsr_make_index_exist INTERFACE dbcsr_count_row_index MODULE PROCEDURE dbcsr_count_row_index_copy, & dbcsr_count_row_index_inplace END INTERFACE INTERFACE dbcsr_build_row_index MODULE PROCEDURE dbcsr_build_row_index_copy, & dbcsr_build_row_index_inplace END INTERFACE CONTAINS SUBROUTINE make_index_canonical(new_row_p, new_col_i, new_blk_p, & old_row_p, old_col_i, old_blk_p, matrix) !! Makes a canonical index given the index arrays !! !! Description of canonical ordering !! A non-(anti)symmetric matrix is left as is. Otherwise, the row and column !! are stored in the position prescribed by the distribution. !! @note !! This routine uses hard-coded logic as to what constitutes a !! canonical ordering INTEGER, DIMENSION(:), INTENT(OUT) :: new_row_p, new_col_i, new_blk_p INTEGER, DIMENSION(:), INTENT(IN) :: old_row_p, old_col_i, old_blk_p TYPE(dbcsr_type), INTENT(IN) :: matrix CHARACTER(len=*), PARAMETER :: routineN = 'make_index_canonical' INTEGER :: blk, col, nblks, row, stored_col, & stored_row INTEGER, ALLOCATABLE, DIMENSION(:) :: row_i LOGICAL :: tr ! --------------------------------------------------------------------------- nblks = SIZE(old_blk_p) ALLOCATE (row_i(nblks)) IF (debug_mod) THEN WRITE (*, *) "old row_p", old_row_p WRITE (*, *) "old col_i", old_col_i WRITE (*, *) "old blk_p", old_blk_p END IF DO row = 1, SIZE(old_row_p) - 1 DO blk = old_row_p(row) + 1, old_row_p(row + 1) col = old_col_i(blk) stored_row = row stored_col = col tr = .FALSE. CALL get_stored_canonical(matrix, stored_row, stored_col, tr) IF (debug_mod) & WRITE (*, '(A,2(1X,I5),A,2(1X,I5),";",I7,1X,L1)') & routineN//" X->", row, col, "->", & stored_row, stored_col, blk, tr row_i(blk) = stored_row new_col_i(blk) = stored_col IF (.NOT. tr) THEN new_blk_p(blk) = old_blk_p(blk) ELSE new_blk_p(blk) = -old_blk_p(blk) END IF END DO END DO CALL dbcsr_sort_indices(nblks, row_i, new_col_i, blk_p=new_blk_p) ! Re-create the index CALL dbcsr_make_dbcsr_index(new_row_p, row_i, SIZE(new_row_p) - 1, nblks) IF (debug_mod) THEN WRITE (*, *) "new row_p", new_row_p WRITE (*, *) "new row_i", row_i WRITE (*, *) "new col_i", new_col_i WRITE (*, *) "new blk_p", new_blk_p END IF END SUBROUTINE make_index_canonical SUBROUTINE make_index_triangular(new_row_p, new_col_i, new_blk_p, & old_row_p, old_col_i, old_blk_p, matrix) !! Makes a CP2K triangular index given the index arrays !! !! Description of canonical ordering !! A non-(anti)symmetric matrix is left as is. Otherwise, the row and column !! are stored in the position prescribed by the distribution. !! @note !! This routine uses hard-coded logic as to what constitutes a !! canonical ordering INTEGER, DIMENSION(:), INTENT(OUT) :: new_row_p, new_col_i, new_blk_p INTEGER, DIMENSION(:), INTENT(IN) :: old_row_p, old_col_i, old_blk_p TYPE(dbcsr_type), INTENT(IN) :: matrix CHARACTER(len=*), PARAMETER :: routineN = 'make_index_triangular' INTEGER :: blk, col, nblks, row, stored_col, & stored_row INTEGER, ALLOCATABLE, DIMENSION(:) :: row_i LOGICAL :: tr ! --------------------------------------------------------------------------- nblks = SIZE(old_blk_p) ALLOCATE (row_i(nblks)) IF (debug_mod) THEN WRITE (*, *) "old row_p", old_row_p WRITE (*, *) "old col_i", old_col_i WRITE (*, *) "old blk_p", old_blk_p END IF DO row = 1, SIZE(old_row_p) - 1 DO blk = old_row_p(row) + 1, old_row_p(row + 1) col = old_col_i(blk) stored_row = row stored_col = col tr = .FALSE. CALL get_stored_canonical(matrix, stored_row, stored_col, tr) IF (stored_row .GT. stored_col) THEN CALL swap(stored_row, stored_col) tr = .NOT. tr END IF IF (debug_mod) & WRITE (*, '(A,2(1X,I5),A,2(1X,I5),";",I7,1X,L1)') & routineN//" X->", row, col, "->", & stored_row, stored_col, blk, tr row_i(blk) = stored_row new_col_i(blk) = stored_col IF (.NOT. tr) THEN new_blk_p(blk) = old_blk_p(blk) ELSE new_blk_p(blk) = -old_blk_p(blk) END IF END DO END DO CALL dbcsr_sort_indices(nblks, row_i, new_col_i, blk_p=new_blk_p) ! Re-create the index CALL dbcsr_make_dbcsr_index(new_row_p, row_i, SIZE(new_row_p) - 1, nblks) IF (debug_mod) THEN WRITE (*, *) "new row_p", new_row_p WRITE (*, *) "new row_i", row_i WRITE (*, *) "new col_i", new_col_i WRITE (*, *) "new blk_p", new_blk_p END IF END SUBROUTINE make_index_triangular SUBROUTINE dbcsr_make_dbcsr_index(row_p, row_i, nrows, nblks) !! Collapses a row_p index INTEGER, INTENT(in) :: nrows, nblks INTEGER, DIMENSION(1:nrows + 1), INTENT(out) :: row_p INTEGER, DIMENSION(1:nblks), INTENT(in) :: row_i CHARACTER(len=*), PARAMETER :: routineN = 'dbcsr_make_dbcsr_index' INTEGER :: blk, error_handle, row CALL timeset(routineN, error_handle) row_p(1) = 0 row_p(nrows + 1) = nblks row = 1 blk = 1 DO WHILE (row .LE. nrows) IF (blk .LE. nblks) THEN DO WHILE (row_i(blk) .EQ. row) blk = blk + 1 IF (blk .GT. nblks) THEN row_p(row + 1) = nblks - 1 EXIT END IF END DO END IF row_p(row + 1) = blk - 1 row = row + 1 END DO CALL timestop(error_handle) END SUBROUTINE dbcsr_make_dbcsr_index PURE SUBROUTINE dbcsr_expand_row_index(row_p, row_i, nrows, nblks) !! Expands a row_p index INTEGER, INTENT(IN) :: nrows, nblks INTEGER, DIMENSION(1:nrows + 1), INTENT(IN) :: row_p INTEGER, DIMENSION(1:nblks), INTENT(OUT) :: row_i INTEGER :: row DO row = 1, nrows row_i(row_p(row) + 1:row_p(row + 1)) = row END DO END SUBROUTINE dbcsr_expand_row_index PURE SUBROUTINE dbcsr_expand_row_index_2d(row_p, row_i, nrows, dst_i) !! Expands a row_p index INTEGER, INTENT(IN) :: nrows, dst_i INTEGER, DIMENSION(1:nrows + 1), INTENT(IN) :: row_p INTEGER, DIMENSION(:, :), INTENT(OUT) :: row_i INTEGER :: row DO row = 1, nrows row_i(dst_i, row_p(row) + 1:row_p(row + 1)) = row END DO END SUBROUTINE dbcsr_expand_row_index_2d PURE SUBROUTINE dbcsr_count_row_index_inplace(rows, nrows) !! Counts columns-per-row count from row index array, in-place. INTEGER, INTENT(IN) :: nrows !! number of rows INTEGER, DIMENSION(1:nrows + 1), INTENT(INOUT) :: rows !! the row_p index (input); the count of the number of columns per row (output) INTEGER :: row DO row = 1, nrows rows(row) = rows(row + 1) - rows(row) END DO rows(nrows + 1) = 0 END SUBROUTINE dbcsr_count_row_index_inplace PURE SUBROUTINE dbcsr_count_row_index_copy(rows, counts, nrows) !! Counts columns-per-row count from row index array. INTEGER, INTENT(IN) :: nrows !! number of rows INTEGER, DIMENSION(1:nrows), INTENT(OUT) :: counts !! the count of the number of columns per row INTEGER, DIMENSION(1:nrows + 1), INTENT(IN) :: rows !! the row_p index (input) INTEGER :: row DO row = 1, nrows counts(row) = rows(row + 1) - rows(row) END DO END SUBROUTINE dbcsr_count_row_index_copy PURE SUBROUTINE dbcsr_build_row_index_inplace(rows, nrows) !! Builds row index array from a columns-per-row count, in-place. INTEGER, INTENT(IN) :: nrows !! number of rows INTEGER, DIMENSION(1:nrows + 1), INTENT(INOUT) :: rows !! count of the number of columns per row (input); the row_p index (output) INTEGER :: o, old_count, row old_count = rows(1) rows(1) = 0 IF (nrows .GE. 1) THEN DO row = 2, nrows + 1 o = rows(row) rows(row) = rows(row - 1) + old_count old_count = o END DO END IF END SUBROUTINE dbcsr_build_row_index_inplace PURE SUBROUTINE dbcsr_build_row_index_copy(counts, rows, nrows) !! Builds row index array from a columns-per-row count. INTEGER, INTENT(IN) :: nrows !! number of rows INTEGER, DIMENSION(1:nrows + 1), INTENT(OUT) :: rows !! count of the number of columns per row (input); the row_p index (output) INTEGER, DIMENSION(1:nrows), INTENT(IN) :: counts !! count of the number of columns per row !WTF?!rows(1) = 0 !WTF?!IF (nrows .GE. 1) THEN !WTF?! DO row = 2, nrows+1 !WTF?! rows(row) = rows(row-1) + counts(rows-1) !WTF?! ENDDO !WTF?!ENDIF rows(1:nrows) = counts(1:nrows) CALL dbcsr_build_row_index_inplace(rows, nrows) END SUBROUTINE dbcsr_build_row_index_copy SUBROUTINE dbcsr_addto_index_array(matrix, slot, DATA, reservation, extra) !! Adds data to the index. Increases the index size when necessary. TYPE(dbcsr_type), INTENT(INOUT) :: matrix !! bcsr matrix INTEGER, INTENT(IN) :: slot !! which index array to add (e.g., dbcsr_slot_row_blk_sizes) INTEGER, DIMENSION(:), INTENT(IN), OPTIONAL :: DATA !! array holding the index data to add to the index array INTEGER, INTENT(IN), OPTIONAL :: reservation, extra !! only reserve space for subsequent array !! reserve extra space for later additions CHARACTER(len=*), PARAMETER :: routineN = 'dbcsr_addto_index_array', & routineP = moduleN//':'//routineN INTEGER :: deplus, space, ub, ub_new ! --------------------------------------------------------------------------- IF (debug_mod) THEN IF (.NOT. ASSOCIATED(matrix%index)) & DBCSR_ABORT("Index must be preallocated.") IF (UBOUND(matrix%index, 1) < dbcsr_num_slots) & DBCSR_ABORT("Actual index size less than declared size") IF (.NOT. PRESENT(DATA) .AND. .NOT. PRESENT(reservation)) & DBCSR_ABORT('Either an array or its size must be specified.') WRITE (*, *) routineP//' index', matrix%index(:dbcsr_num_slots) END IF IF (PRESENT(reservation)) THEN space = reservation ELSE space = SIZE(DATA) END IF IF (PRESENT(extra)) THEN deplus = extra ELSE deplus = 0 END IF ub = UBOUND(matrix%index, 1) ! The data area was not defined or the new area is greater than the old: IF (matrix%index(slot) .EQ. 0 .OR. & space .GT. matrix%index(slot + 1) - matrix%index(slot) + 1) THEN IF (debug_mod) WRITE (*, *) routineP//' Slot', slot, 'not filled, adding at', & matrix%index(dbcsr_slot_size) + 1, 'sized', space matrix%index(slot) = matrix%index(dbcsr_slot_size) + 1 matrix%index(slot + 1) = matrix%index(slot) + space - 1 matrix%index(dbcsr_slot_size) = matrix%index(slot + 1) END IF ! Shorten an index entry. IF (space .LT. matrix%index(slot + 1) - matrix%index(slot) + 1) THEN IF (debug_mod) WRITE (*, *) routineP//' Shortening index' matrix%index(slot + 1) = matrix%index(slot) + space - 1 CALL dbcsr_repoint_index(matrix, slot) END IF ub_new = matrix%index(slot + 1) + deplus IF (debug_mod) WRITE (*, *) routineP//' need', space, 'at', matrix%index(slot), & 'to', matrix%index(slot + 1), '(', ub_new, ')', 'have', ub IF (ub_new .GT. ub) THEN IF (debug_mod) WRITE (*, *) routineP//' Reallocating index to ubound', ub_new !CALL reallocate(matrix%index, 1, ub_new) CALL ensure_array_size(matrix%index, lb=1, ub=ub_new, & factor=default_resize_factor, & nocopy=.FALSE., & memory_type=matrix%index_memory_type) CALL dbcsr_repoint_index(matrix) END IF IF (debug_mod) WRITE (*, *) routineP//' Adding slot', slot, 'at', & matrix%index(slot), 'sized', space CALL dbcsr_repoint_index(matrix, slot) IF (PRESENT(DATA)) & matrix%index(matrix%index(slot):matrix%index(slot + 1)) = DATA(:) END SUBROUTINE dbcsr_addto_index_array SUBROUTINE dbcsr_clearfrom_index_array(matrix, slot) !! Removes data from the index. TYPE(dbcsr_type), INTENT(INOUT) :: matrix !! bcsr matrix INTEGER, INTENT(IN) :: slot !! which index array to remove (e.g., dbcsr_slot_row_blk_sizes) CHARACTER(len=*), PARAMETER :: routineN = 'dbcsr_clearfrom_index_array', & routineP = moduleN//':'//routineN INTEGER :: space INTEGER, DIMENSION(5) :: max_extents ! --------------------------------------------------------------------------- IF (.NOT. ASSOCIATED(matrix%index)) & DBCSR_ABORT("Index must be preallocated.") IF (UBOUND(matrix%index, 1) < dbcsr_num_slots) & DBCSR_ABORT("Actual index size less than declared size") IF (debug_mod) WRITE (*, *) routineP//' index', & matrix%index(:dbcsr_num_slots) ! Clear index entry pointer matrix%index(slot) = 1 matrix%index(slot + 1) = 0 CALL dbcsr_repoint_index(matrix, slot) ! Update the declared index size max_extents = (/ & matrix%index(dbcsr_slot_row_p + 1), & matrix%index(dbcsr_slot_col_i + 1), & matrix%index(dbcsr_slot_blk_p + 1), & matrix%index(dbcsr_slot_thr_c + 1), & matrix%index(dbcsr_slot_coo_l + 1)/) space = MAX(MAXVAL(max_extents), dbcsr_num_slots) matrix%index(dbcsr_slot_size) = space END SUBROUTINE dbcsr_clearfrom_index_array SUBROUTINE dbcsr_repoint_index(m, slot) !! Updates the index pointers of a bcsr matrix TYPE(dbcsr_type), INTENT(INOUT) :: m !! matrix for which index pointers are updated INTEGER, INTENT(IN), OPTIONAL :: slot !! only repoint this index INTEGER :: s LOGICAL :: all ! --------------------------------------------------------------------------- IF (m%nblks .NE. m%index(dbcsr_slot_nblks)) THEN m%nblks = m%index(dbcsr_slot_nblks) m%nze = m%index(dbcsr_slot_nze) END IF all = .TRUE. IF (PRESENT(slot)) THEN all = .FALSE. s = slot ELSE s = 0 END IF IF (m%index(dbcsr_slot_row_p) .GT. 0 .AND. all .OR. & s .EQ. dbcsr_slot_row_p) THEN IF (m%index(dbcsr_slot_row_p) .GT. 0) THEN m%row_p => m%index(m%index(dbcsr_slot_row_p): & m%index(dbcsr_slot_row_p + 1)) ELSE NULLIFY (m%row_p) END IF END IF IF (m%index(dbcsr_slot_col_i) .GT. 0 .AND. all .OR. & s .EQ. dbcsr_slot_col_i) THEN IF (m%index(dbcsr_slot_col_i) .GT. 0) THEN m%col_i => m%index(m%index(dbcsr_slot_col_i): & m%index(dbcsr_slot_col_i + 1)) ELSE NULLIFY (m%col_i) END IF END IF IF (m%index(dbcsr_slot_blk_p) .GT. 0 .AND. all .OR. & s .EQ. dbcsr_slot_blk_p) THEN IF (m%index(dbcsr_slot_blk_p) .GT. 0) THEN m%blk_p => m%index(m%index(dbcsr_slot_blk_p): & m%index(dbcsr_slot_blk_p + 1)) ELSE NULLIFY (m%blk_p) END IF END IF IF (m%index(dbcsr_slot_thr_c) .GT. 0 .AND. all .OR. & s .EQ. dbcsr_slot_thr_c) THEN IF (m%index(dbcsr_slot_thr_c) .GT. 0) THEN m%thr_c => m%index(m%index(dbcsr_slot_thr_c): & m%index(dbcsr_slot_thr_c + 1)) ELSE NULLIFY (m%thr_c) END IF END IF IF (m%index(dbcsr_slot_coo_l) .GT. 0 .AND. all .OR. & s .EQ. dbcsr_slot_coo_l) THEN IF (m%index(dbcsr_slot_coo_l) .GT. 0) THEN m%coo_l => m%index(m%index(dbcsr_slot_coo_l): & m%index(dbcsr_slot_coo_l + 1)) ELSE NULLIFY (m%coo_l) END IF END IF IF (all) THEN m%index(dbcsr_slot_nblks) = m%nblks m%index(dbcsr_slot_nze) = m%nze END IF END SUBROUTINE dbcsr_repoint_index SUBROUTINE dbcsr_make_index_exist(m) TYPE(dbcsr_type), INTENT(INOUT) :: m !! Create index for this matrix CHARACTER(len=*), PARAMETER :: routineN = 'dbcsr_make_index_exist' INTEGER :: error_handle ! --------------------------------------------------------------------------- CALL timeset(routineN, error_handle) IF (.NOT. ASSOCIATED(m%index)) & DBCSR_ABORT("Index array does not yet exist.") IF (.NOT. ASSOCIATED(m%row_p)) THEN CALL dbcsr_addto_index_array(m, dbcsr_slot_row_p, & reservation=m%nblkrows_total + 1) m%row_p(:) = 0 END IF IF (.NOT. ASSOCIATED(m%col_i)) THEN CALL dbcsr_addto_index_array(m, dbcsr_slot_col_i, & reservation=0) END IF IF (.NOT. ASSOCIATED(m%blk_p)) THEN CALL dbcsr_addto_index_array(m, dbcsr_slot_blk_p, & reservation=0) END IF CALL dbcsr_repoint_index(m) CALL timestop(error_handle) END SUBROUTINE dbcsr_make_index_exist SUBROUTINE dbcsr_sort_indices(n, row_i, col_i, blk_p, blk_d) !! Sorts the rows & columns of a work matrix !! !! Description !! Sorts the row and column indices so that the rows monotonically !! increase and the columns monotonically increase within each row. !! Passing the blk_p array rearranges the block pointers accordingly. !! This must be done if they are pointing to valid data, otherwise !! they become invalid. INTEGER, INTENT(IN) :: n !! number of blocks (elements) to sort INTEGER, DIMENSION(1:), INTENT(INOUT) :: row_i, col_i !! row indices !! column indices INTEGER, DIMENSION(1:), INTENT(INOUT), OPTIONAL :: blk_p, blk_d !! block pointers !! data storage CHARACTER(len=*), PARAMETER :: routineN = 'dbcsr_sort_indices', & routineP = moduleN//':'//routineN INTEGER(KIND=int_8), PARAMETER :: lmask8 = 4294967295_int_8 INTEGER :: error_handle, i INTEGER(KIND=int_8), ALLOCATABLE, DIMENSION(:) :: sort_keys INTEGER, ALLOCATABLE, DIMENSION(:) :: buf, buf_d ! --------------------------------------------------------------------------- IF (n .LE. 0) RETURN IF (SIZE(row_i) .EQ. 0) RETURN CALL timeset(routineN, error_handle) IF (SIZE(row_i) < n) DBCSR_ABORT('row_i too small') IF (SIZE(col_i) < n) DBCSR_ABORT('col_i too small') IF (PRESENT(blk_p)) THEN IF (SIZE(blk_p) < n) DBCSR_ABORT('blk_p too small') ALLOCATE (buf(n)) buf(1:n) = blk_p(1:n) END IF IF (PRESENT(blk_d)) THEN ALLOCATE (buf_d(n)) buf_d(1:n) = blk_d(1:n) END IF ! Create an ordering for both rows and columns. If the blk_p must ! be rearranged, then the col_i array will be used as a ! permutation vector. ALLOCATE (sort_keys(n)) sort_keys(:) = IOR(ISHFT(INT(row_i(1:n), int_8), 32), INT(col_i(1:n), int_8)) IF (PRESENT(blk_p)) col_i(1:n) = (/(i, i=1, n)/) ! Now do a nice quicksort. CALL sort(sort_keys, n, col_i) ! Since blk_d is usually not present we can have two loops that ! are essentially the same. IF (PRESENT(blk_p)) THEN DO i = 1, n blk_p(i) = buf(col_i(i)) END DO DEALLOCATE (buf) END IF IF (PRESENT(blk_d)) THEN DO i = 1, n blk_d(i) = buf_d(col_i(i)) END DO DEALLOCATE (buf_d) END IF DO i = 1, n col_i(i) = INT(IAND(sort_keys(i), lmask8), int_4) row_i(i) = INT(ISHFT(sort_keys(i), -32), int_4) END DO DEALLOCATE (sort_keys) IF (debug_mod .AND. PRESENT(blk_p)) & WRITE (*, *) routineP//' sort, blk_p =', blk_p CALL timestop(error_handle) END SUBROUTINE dbcsr_sort_indices SUBROUTINE dbcsr_index_prune_deleted(matrix) !! Removes the deleted blocks from the index. !! !! Description TYPE(dbcsr_type), INTENT(INOUT) :: matrix !! Prune the index of this matrix. CHARACTER(len=*), PARAMETER :: routineN = 'dbcsr_index_prune_deleted' INTEGER :: error_handle, nblks_max, new_blk, nrows, & old_blk, row INTEGER, ALLOCATABLE, DIMENSION(:) :: new_blk_p, new_col_i, new_row_count INTEGER, DIMENSION(:), POINTER :: old_blk_p, old_col_i, old_row_p ! --------------------------------------------------------------------------- CALL timeset(routineN, error_handle) ! old_row_p => matrix%row_p old_col_i => matrix%col_i old_blk_p => matrix%blk_p ! nrows = matrix%nblkrows_total nblks_max = old_row_p(nrows + 1) ALLOCATE (new_row_count(nrows)) ALLOCATE (new_col_i(nblks_max)) ALLOCATE (new_blk_p(nblks_max)) ! ! Build up the new index from all non-deleted blocks in the ! existing index. new_blk = 0 DO row = 1, nrows new_row_count(row) = 0 DO old_blk = old_row_p(row) + 1, old_row_p(row + 1) IF (old_blk_p(old_blk) .GT. 0) THEN new_blk = new_blk + 1 new_row_count(row) = new_row_count(row) + 1 new_col_i(new_blk) = old_col_i(old_blk) new_blk_p(new_blk) = old_blk_p(old_blk) END IF END DO END DO ! CALL dbcsr_clearfrom_index_array(matrix, dbcsr_slot_row_p) CALL dbcsr_clearfrom_index_array(matrix, dbcsr_slot_col_i) CALL dbcsr_clearfrom_index_array(matrix, dbcsr_slot_blk_p) CALL dbcsr_addto_index_array(matrix, dbcsr_slot_row_p, & reservation=nrows + 1, extra=2*new_blk) old_row_p => matrix%row_p CALL dbcsr_build_row_index(counts=new_row_count, rows=old_row_p, & nrows=nrows) CALL dbcsr_addto_index_array(matrix, dbcsr_slot_col_i, DATA=new_col_i(1:new_blk)) CALL dbcsr_addto_index_array(matrix, dbcsr_slot_blk_p, DATA=new_blk_p(1:new_blk)) matrix%nblks = new_blk matrix%index(dbcsr_slot_nblks) = new_blk ! DEALLOCATE (new_col_i, new_blk_p, new_row_count) ! CALL timestop(error_handle) END SUBROUTINE dbcsr_index_prune_deleted SUBROUTINE transpose_index_local(new_col_p, new_row_i, old_row_p, & old_col_i, new_blk_p, old_blk_p) !! Re-indexes row_p and blk_i according to columns. !! !! The re-indexing is equivalent to a local-only transpose. INTEGER, DIMENSION(:), INTENT(OUT) :: new_col_p, new_row_i !! new column pointer !! new row index INTEGER, DIMENSION(:), INTENT(IN) :: old_row_p, old_col_i !! old row pointer !! old column index INTEGER, DIMENSION(:), INTENT(OUT), OPTIONAL :: new_blk_p !! new block pointer INTEGER, DIMENSION(:), INTENT(IN), OPTIONAL :: old_blk_p !! old block pointer CHARACTER(len=*), PARAMETER :: routineN = 'transpose_index_local' INTEGER :: error_handle, nblks, ncols_new, nrows_old INTEGER, ALLOCATABLE, DIMENSION(:) :: new_col_i LOGICAL :: blks ! --------------------------------------------------------------------------- CALL timeset(routineN, error_handle) blks = PRESENT(new_blk_p) .AND. PRESENT(old_blk_p) nblks = SIZE(old_col_i) nrows_old = SIZE(old_row_p) - 1 ncols_new = SIZE(new_col_p) - 1 IF (blks) new_blk_p(:) = old_blk_p(:) ALLOCATE (new_col_i(nblks)) CALL dbcsr_expand_row_index(old_row_p, new_row_i, nrows_old, nblks) new_col_i(:) = old_col_i(:) CALL dbcsr_sort_indices(nblks, new_col_i, new_row_i, new_blk_p) CALL dbcsr_make_dbcsr_index(new_col_p, new_col_i, ncols_new, nblks) DEALLOCATE (new_col_i) CALL timestop(error_handle) END SUBROUTINE transpose_index_local SUBROUTINE dbcsr_make_index_canonical(matrix, cp2k) !! Makes a canonical index to the distribution. !! !! Canonical means that it respects the distribution. TYPE(dbcsr_type), INTENT(INOUT) :: matrix !! matrix for which to make canonical index LOGICAL, INTENT(IN), OPTIONAL :: cp2k !! make CP2K triangular index from canonical; default is false INTEGER :: nb, nc, nr INTEGER, ALLOCATABLE, DIMENSION(:) :: new_blk_p, new_col_i, new_row_p LOGICAL :: rev ! --------------------------------------------------------------------------- rev = .FALSE. IF (PRESENT(cp2k)) rev = cp2k nr = SIZE(matrix%row_p) ALLOCATE (new_row_p(nr)) nc = SIZE(matrix%col_i) ALLOCATE (new_col_i(nc)) nb = SIZE(matrix%blk_p) ALLOCATE (new_blk_p(nb)) IF (rev) THEN CALL make_index_triangular(new_row_p, new_col_i, new_blk_p, & matrix%row_p, matrix%col_i, matrix%blk_p, matrix) ELSE CALL make_index_canonical(new_row_p, new_col_i, new_blk_p, & matrix%row_p, matrix%col_i, matrix%blk_p, matrix) END IF matrix%row_p(:) = new_row_p matrix%col_i(:) = new_col_i matrix%blk_p(:) = new_blk_p END SUBROUTINE dbcsr_make_index_canonical SUBROUTINE make_dense_index(row_p, col_i, blk_p, & nblkrows_total, nblkcols_total, myblkrows, myblkcols, & row_blk_offsets, col_blk_offsets, meta, make_tr) !! Makes the index for a dense matrix !! @note Used for making matrices dense/undense !INTEGER, DIMENSION(:), INTENT(OUT) :: row_p, col_i, blk_p INTEGER, INTENT(IN) :: nblkrows_total !! Total blocked rows INTEGER, DIMENSION(:), INTENT(OUT) :: blk_p, col_i !! Storage for new index !! Storage for new index INTEGER, DIMENSION(1:nblkrows_total + 1), & INTENT(OUT) :: row_p !! Storage for new index INTEGER, INTENT(IN) :: nblkcols_total !! Total blocked columns INTEGER, DIMENSION(:), INTENT(IN) :: myblkrows, myblkcols, row_blk_offsets, & col_blk_offsets !! List of blocked rows in my process row !! List of blocked columns in my process column INTEGER, DIMENSION(dbcsr_meta_size), INTENT(INOUT) :: meta !! Metadata updates for new index LOGICAL, INTENT(IN), OPTIONAL :: make_tr !! Dense blocks are transposed CHARACTER(len=*), PARAMETER :: routineN = 'make_dense_index' INTEGER :: blk, c, col_l, mynblkcols, mynblkrows, & nblks, nze, prev_row, row, row_l, & sign_carrier, sz ! --------------------------------------------------------------------------- sign_carrier = 1 IF (PRESENT(make_tr)) THEN IF (make_tr) sign_carrier = -1 END IF mynblkrows = SIZE(myblkrows) mynblkcols = SIZE(myblkcols) meta(dbcsr_slot_nblkrows_local) = mynblkrows meta(dbcsr_slot_nblkcols_local) = mynblkcols nblks = mynblkrows*mynblkcols nze = 1 IF (nblks .EQ. 0) THEN row_p(1:) = 0 ELSE row_p(1) = 0 !row_p(nrows+1) = nblks prev_row = 1 blk = 0 DO row_l = 1, mynblkrows row = myblkrows(row_l) row_p(prev_row + 1:row) = blk DO col_l = 1, mynblkcols c = myblkcols(col_l) col_i(blk + col_l) = c sz = (row_blk_offsets(row + 1) - row_blk_offsets(row))* & (col_blk_offsets(c + 1) - col_blk_offsets(c)) IF (sz .GT. 0) THEN blk_p(blk + col_l) = SIGN(nze, sign_carrier) nze = nze + sz ELSE blk_p(blk + col_l) = 0 END IF END DO prev_row = row blk = blk + mynblkcols END DO IF (blk /= nblks) DBCSR_ABORT("Block mismatch") row_p(prev_row + 1:nblkrows_total + 1) = nblks END IF IF (debug_mod) THEN WRITE (*, *) routineN//" new index" WRITE (*, *) "row_p=", row_p WRITE (*, *) "col_i=", col_i WRITE (*, *) "blk_p=", blk_p END IF meta(dbcsr_slot_nblkrows_total) = nblkrows_total meta(dbcsr_slot_nblkcols_total) = nblkcols_total END SUBROUTINE make_dense_index SUBROUTINE make_undense_index( & row_p, col_i, blk_p, & distribution, local_row_offsets, local_col_offsets, & meta) !! Makes a blocked index from a dense matrix !! @note Used for making matrices dense/undense INTEGER, DIMENSION(:), INTENT(OUT) :: row_p, col_i, blk_p !! Storage for new index !! Storage for new index !! Storage for new index TYPE(dbcsr_distribution_obj) :: distribution !! Blocked distribution INTEGER, DIMENSION(:), INTENT(IN) :: local_row_offsets, local_col_offsets INTEGER, DIMENSION(dbcsr_meta_size), INTENT(INOUT) :: meta !! Metadata updates for new index INTEGER :: col, lr, lrow, nblkcols_local, & nblkrows_local, nblkrows_total, & nfullcols_local, prev_row, row INTEGER, DIMENSION(:), POINTER :: local_cols, local_rows ! --------------------------------------------------------------------------- local_cols => dbcsr_distribution_local_cols(distribution) local_rows => dbcsr_distribution_local_rows(distribution) meta(dbcsr_slot_nblkrows_total) = dbcsr_distribution_nrows(distribution) meta(dbcsr_slot_nblkcols_total) = dbcsr_distribution_ncols(distribution) meta(dbcsr_slot_nblkrows_local) = dbcsr_distribution_nlocal_rows(distribution) meta(dbcsr_slot_nblkcols_local) = dbcsr_distribution_nlocal_cols(distribution) nblkrows_total = meta(dbcsr_slot_nblkrows_total) nblkcols_local = meta(dbcsr_slot_nblkcols_local) nblkrows_local = meta(dbcsr_slot_nblkrows_local) nfullcols_local = meta(dbcsr_slot_nfullcols_local) ! Fill the row_p array. lr = 0 row_p(1) = 0 prev_row = 1 DO lrow = 1, nblkrows_local row = local_rows(lrow) row_p(prev_row + 1:row) = lr lr = lr + nblkcols_local row_p(row + 1) = lr prev_row = row END DO row_p(prev_row + 1:nblkrows_total + 1) = lr ! DO row = 1, nblkrows_local DO col = 1, nblkcols_local col_i(nblkcols_local*(row - 1) + col) = local_cols(col) blk_p(nblkcols_local*(row - 1) + col) = 1 + & (local_row_offsets(row) - 1)*nfullcols_local & + (local_col_offsets(col) - 1)* & (local_row_offsets(row + 1) - local_row_offsets(row)) END DO END DO END SUBROUTINE make_undense_index SUBROUTINE merge_index_arrays(new_row_i, new_col_i, new_blk_p, new_size, & old_row_i, old_col_i, old_blk_p, old_size, & add_ip, add_size, new_blk_d, old_blk_d, & added_size_offset, added_sizes, added_size, added_nblks) !! Merges two indices !! !! Added sizes !! added_size_offset and added_sizes can be optionally !! specified. This is meant for cases where the added blocks may !! be duplicates of existing blocks. In this way it is possible !! to recalculate new block pointers to avoid wasted space. !! @note Used in local multiply !! Assumes they are both pre-sorted INTEGER, INTENT(IN) :: new_size !! size of merged index INTEGER, DIMENSION(new_size), INTENT(OUT) :: new_blk_p, new_col_i, new_row_i !! merged result !! merged result !! merged result INTEGER, INTENT(IN) :: old_size !! size of current index INTEGER, DIMENSION(old_size), INTENT(IN) :: old_blk_p, old_col_i, old_row_i !! current index !! current index !! current index INTEGER, INTENT(IN) :: add_size !! size of index to add into the current index INTEGER, DIMENSION(3, add_size), INTENT(IN) :: add_ip !! index to add into the current index INTEGER, DIMENSION(new_size), INTENT(OUT), & OPTIONAL :: new_blk_d INTEGER, DIMENSION(old_size), INTENT(IN), OPTIONAL :: old_blk_d INTEGER, INTENT(IN), OPTIONAL :: added_size_offset !! specify base of added sizes INTEGER, DIMENSION(:), INTENT(IN), OPTIONAL :: added_sizes !! specify sizes of added blocks INTEGER, INTENT(OUT), OPTIONAL :: added_size, added_nblks !! counts number of sizes of added blocks !! actual number of new elements INTEGER :: add_blk, bp, i, merge_from_whom, & new_blk, old_blk LOGICAL :: multidata ! --------------------------------------------------------------------------- bp = 0 multidata = PRESENT(old_blk_d) .AND. PRESENT(new_blk_d) IF (old_size + add_size .NE. new_size) & DBCSR_WARN("Mismatch of new and old size") IF (PRESENT(added_size_offset) .NEQV. PRESENT(added_sizes)) & DBCSR_ABORT("Must specify a set of arguments") IF (PRESENT(added_sizes) .NEQV. PRESENT(added_size)) & DBCSR_ABORT("Must specify a set of arguments") IF (debug_mod) THEN WRITE (*, *) " Old array", old_size DO i = 1, old_size WRITE (*, '(I7,2X,I7,2X,I7)') old_row_i(i), old_col_i(i), old_blk_p(i) END DO WRITE (*, *) " Add array", add_size DO i = 1, add_size WRITE (*, '(I7,2X,I7,2X,I7)') add_ip(1:3, i) END DO END IF IF (PRESENT(added_nblks)) added_nblks = 0 IF (PRESENT(added_size)) THEN added_size = 0 bp = added_size_offset END IF IF (add_size .GT. 0) THEN old_blk = 1 add_blk = 1 new_blk = 1 IF (old_size .EQ. 0) THEN new_row_i(1:add_size) = add_ip(1, 1:add_size) new_col_i(1:add_size) = add_ip(2, 1:add_size) new_blk_p(1:add_size) = add_ip(3, 1:add_size) !IF (multidata) new_blk_d(1:add_size) = add_ip(4, 1:add_size) IF (PRESENT(added_nblks)) added_nblks = add_size IF (PRESENT(added_size)) added_size = SUM(added_sizes) ELSE DO WHILE (new_blk .LE. new_size) merge_from_whom = 0 IF (old_blk .LE. old_size .AND. add_blk .LE. add_size) THEN IF (add_ip(1, add_blk) .EQ. old_row_i(old_blk) & .AND. add_ip(2, add_blk) .EQ. old_col_i(old_blk)) THEN IF (debug_mod) THEN WRITE (*, *) "Duplicate block! addblk", & add_blk, "oldblk", old_blk END IF END IF ! Rows come first IF (add_ip(1, add_blk) .LT. old_row_i(old_blk)) THEN merge_from_whom = 2 ELSEIF (add_ip(1, add_blk) .GT. old_row_i(old_blk)) THEN merge_from_whom = 1 ELSE ! Same rows, so now come the columns IF (add_ip(2, add_blk) .LT. old_col_i(old_blk)) THEN ! Merges from the add array merge_from_whom = 2 ELSEIF (add_ip(2, add_blk) .GT. old_col_i(old_blk)) THEN ! Merges from the old array merge_from_whom = 1 ELSE ! Merge from old array and skip one in the new array IF (debug_mod) THEN WRITE (*, *) "Duplicate, keeping old", & add_ip(1, add_blk), add_ip(2, add_blk) END IF merge_from_whom = 1 add_blk = add_blk + 1 END IF END IF ELSE IF (add_blk .LE. add_size) THEN ! Merges from the add array merge_from_whom = 2 ELSEIF (old_blk .LE. old_size) THEN ! Merges from the old array merge_from_whom = 1 ELSE ! Hmmm, nothing to merge... merge_from_whom = 0 !WRITE(*,*)"Ran out of data to merge" END IF END IF SELECT CASE (merge_from_whom) CASE (2) ! Merges from the add array new_row_i(new_blk) = add_ip(1, add_blk) new_col_i(new_blk) = add_ip(2, add_blk) new_blk_p(new_blk) = add_ip(3, add_blk) !IF (multidata) new_blk_d(new_blk) = add_ip(4, add_blk) IF (PRESENT(added_nblks)) added_nblks = added_nblks + 1 IF (PRESENT(added_sizes)) THEN new_blk_p(new_blk) = bp bp = bp + added_sizes(add_blk) added_size = added_size + added_sizes(add_blk) END IF add_blk = add_blk + 1 CASE (1) ! Merges from the old array new_row_i(new_blk) = old_row_i(old_blk) new_col_i(new_blk) = old_col_i(old_blk) new_blk_p(new_blk) = old_blk_p(old_blk) IF (multidata) new_blk_p(new_blk) = old_blk_d(old_blk) old_blk = old_blk + 1 CASE DEFAULT !WRITE(*,*)"Nothing to merge" END SELECT new_blk = new_blk + 1 END DO END IF ELSE new_row_i(1:old_size) = old_row_i(1:old_size) new_col_i(1:old_size) = old_col_i(1:old_size) new_blk_p(1:old_size) = old_blk_p(1:old_size) IF (multidata) new_blk_d(1:old_size) = old_blk_d(1:old_size) END IF IF (debug_mod) THEN WRITE (*, *) " New array" DO i = 1, new_size WRITE (*, '(4(2X,I7))') new_row_i(i), new_col_i(i), new_blk_p(i) END DO END IF END SUBROUTINE merge_index_arrays SUBROUTINE dbcsr_make_index_local_row(matrix) !! Converts BCSR global row index to local row index. TYPE(dbcsr_type), INTENT(INOUT) :: matrix !! matrix for which to make canonical index CHARACTER(len=*), PARAMETER :: routineN = 'dbcsr_make_index_local_row' INTEGER :: error_handle, lrow, nlocal_rows, & ntotal_rows, prow INTEGER, ALLOCATABLE, DIMENSION(:) :: local_row_p INTEGER, DIMENSION(:), POINTER :: local_rows ! --------------------------------------------------------------------------- CALL timeset(routineN, error_handle) IF (.NOT. ASSOCIATED(matrix%row_p)) & DBCSR_ABORT("The row index must be initialized.") IF (matrix%bcsc) & DBCSR_ABORT("Not support for BCSC yet.") ! prow = matrix%index(dbcsr_slot_home_vprow) IF (prow .LT. 0) THEN prow = matrix%index(dbcsr_slot_home_prow) END IF nlocal_rows = matrix%nblkrows_local ALLOCATE (local_row_p(nlocal_rows + 1)) ! The existing row_p is converted from an indexing array into a ! counting array. Because it is later discarded, the counting is ! done in-place. ntotal_rows = matrix%nblkrows_total CALL dbcsr_count_row_index(matrix%row_p, ntotal_rows) ! We first have to find the local rows for the given prow. local_rows => array_data(matrix%local_rows) IF (SIZE(local_rows) /= nlocal_rows) & DBCSR_ABORT("Mismatch in the number of local rows.") ! The counts are mapped to local rows, DO lrow = 1, nlocal_rows local_row_p(lrow) = matrix%row_p(local_rows(lrow)) END DO IF (SUM(matrix%row_p(1:ntotal_rows)) /= SUM(local_row_p(1:nlocal_rows))) & DBCSR_ABORT("Inconsistent row counts. Perhaps non-local rows contain data?.") ! then converted into an index. CALL dbcsr_build_row_index(local_row_p, nlocal_rows) ! The local row index replaces the global one. CALL dbcsr_clearfrom_index_array(matrix, dbcsr_slot_row_p) CALL dbcsr_addto_index_array(matrix, dbcsr_slot_row_p, DATA=local_row_p) ! Finally the matrix is designated as having a local-based index. matrix%local_indexing = .TRUE. IF (careful_mod) THEN IF (array_size(matrix%local_rows) /= nlocal_rows) & DBCSR_ABORT("Inconsistent local row counts.") IF (array_size(matrix%global_rows) /= ntotal_rows) & DBCSR_ABORT("Inconsistent global row counts.") IF (array_size(matrix%global_rows) .EQ. 0) THEN IF (nlocal_rows /= 0) & DBCSR_ABORT("Invalid number of local or global rows.") END IF END IF DEALLOCATE (local_row_p) ! CALL timestop(error_handle) END SUBROUTINE dbcsr_make_index_local_row SUBROUTINE dbcsr_make_index_list(matrix, thread_redist) !! Converts BCSR index into list indices (similar to work matrices) TYPE(dbcsr_type), INTENT(INOUT) :: matrix !! matrix for which to make canonical index LOGICAL, INTENT(IN) :: thread_redist !! make a thread subdistribution CHARACTER(len=*), PARAMETER :: routineN = 'dbcsr_make_index_list' LOGICAL, PARAMETER :: dbg = .FALSE. INTEGER :: blk, error_handle, ithread, my_cnt, & nblks, nrows, nthreads INTEGER, ALLOCATABLE, DIMENSION(:, :) :: blki INTEGER, DIMENSION(0) :: zero_len_array INTEGER, DIMENSION(:), POINTER :: global_cols, local_rows, td, thr_c ! --------------------------------------------------------------------------- CALL timeset(routineN, error_handle) IF (.NOT. ASSOCIATED(matrix%row_p)) & DBCSR_ABORT("The row index must be initialized.") IF (matrix%list_indexing) & DBCSR_ABORT("List index already exists?") IF (matrix%bcsc) & DBCSR_ABORT("Not support for BCSC yet.") IF (matrix%nblks /= SIZE(matrix%col_i)) & DBCSR_ABORT("Block count mismatch.") IF (matrix%nblks /= SIZE(matrix%blk_p)) & DBCSR_ABORT("Block count mismatch") ! IF (matrix%local_indexing) THEN IF (SIZE(matrix%row_p) - 1 /= matrix%nblkrows_local) & DBCSR_ABORT("Local row index incorrectly sized.") ELSE IF (SIZE(matrix%row_p) - 1 /= matrix%nblkrows_total) & DBCSR_ABORT("Global row index incorrectly sized") END IF ! matrix%list_indexing = .TRUE. ! IF (matrix%local_indexing) THEN nrows = matrix%nblkrows_local ELSE nrows = matrix%nblkrows_total END IF ! nblks = matrix%nblks ALLOCATE (blki(3, nblks)) CALL dbcsr_expand_row_index_2d(matrix%row_p, blki, nrows, 1) IF (matrix%local_indexing) THEN global_cols => array_data(matrix%global_cols) ! If local indexing is enabled, then the rows but not the ! columns are already localized IF (dbg) THEN WRITE (*, *) routineN//" Making local columns" WRITE (*, '(10(1X,i7))') global_cols WRITE (*, *) 'local' WRITE (*, '(10(1X,i7))') array_data(matrix%local_cols) END IF DO blk = 1, nblks blki(2, blk) = global_cols(matrix%col_i(blk)) blki(3, blk) = matrix%blk_p(blk) END DO ELSE IF (dbg) WRITE (*, *) routineN//" Not making local columns" DO blk = 1, nblks blki(2, blk) = matrix%col_i(blk) blki(3, blk) = matrix%blk_p(blk) END DO END IF CALL dbcsr_clearfrom_index_array(matrix, dbcsr_slot_row_p) CALL dbcsr_clearfrom_index_array(matrix, dbcsr_slot_col_i) CALL dbcsr_clearfrom_index_array(matrix, dbcsr_slot_blk_p) nthreads = 0 !$ nthreads = OMP_GET_MAX_THREADS() ! ! Reshuffle according to threads IF (nthreads .GT. 0 .AND. thread_redist) THEN td => array_data(dbcsr_distribution_thread_dist(dbcsr_distribution(matrix))) IF (matrix%local_indexing) THEN local_rows => array_data(matrix%local_rows) END IF !$OMP PARALLEL DEFAULT (NONE) & !$OMP PRIVATE (my_cnt, ithread, blk) & !$OMP SHARED (td, blki, nthreads, thr_c, nblks, matrix,local_rows) ! ithread = 0 !$ ithread = OMP_GET_THREAD_NUM() !$OMP MASTER !$ nthreads = OMP_GET_NUM_THREADS() CALL dbcsr_addto_index_array(matrix, dbcsr_slot_thr_c, & reservation=nthreads + 1, extra=3*nblks) thr_c => matrix%thr_c CALL dbcsr_addto_index_array(matrix, dbcsr_slot_coo_l, & reservation=3*nblks) !$OMP END MASTER !$OMP BARRIER my_cnt = 0 IF (matrix%local_indexing) THEN my_cnt = COUNT(td(local_rows(blki(1, :))) .EQ. ithread) ELSE my_cnt = COUNT(td(blki(1, :)) .EQ. ithread) END IF !DO blk = 1, nblks ! IF (td(blki(1, blk)) .EQ. ithread) my_cnt = my_cnt+1 !ENDDO thr_c(ithread + 1) = my_cnt !$OMP BARRIER !$OMP MASTER CALL dbcsr_build_row_index_inplace(thr_c, nthreads) !$OMP END MASTER !$OMP BARRIER my_cnt = (thr_c(ithread + 1) + 1)*3 - 2 IF (matrix%local_indexing) THEN DO blk = 1, nblks IF (td(local_rows(blki(1, blk))) .EQ. ithread) THEN matrix%coo_l(my_cnt:my_cnt + 2) = blki(1:3, blk) my_cnt = my_cnt + 3 END IF END DO ELSE DO blk = 1, nblks IF (td(blki(1, blk)) .EQ. ithread) THEN matrix%coo_l(my_cnt:my_cnt + 2) = blki(1:3, blk) my_cnt = my_cnt + 3 END IF END DO END IF !$OMP END PARALLEL ELSE ! Small price to pay for avoiding infinite recursions. DO blk = 2, nblks IF (blki(1, blk) .EQ. blki(1, blk - 1) .AND. blki(2, blk) .EQ. blki(2, blk - 1)) THEN ! Weird assertion to give some idea of the two blocks. IF (-blki(1, blk) /= blki(2, blk)) & CALL dbcsr_abort(__LOCATION__, & "Should not have duplicate matrix blocks. (-row, col) is duplicated.") END IF END DO ! IF (nblks > 0) THEN CALL dbcsr_addto_index_array(matrix, dbcsr_slot_coo_l, & DATA=RESHAPE(blki, (/3*nblks/))) ELSE CALL dbcsr_addto_index_array(matrix, dbcsr_slot_coo_l, & DATA=zero_len_array) END IF END IF DEALLOCATE (blki) ! CALL timestop(error_handle) END SUBROUTINE dbcsr_make_index_list SUBROUTINE dbcsr_index_compact(matrix) !! Compacts an index. TYPE(dbcsr_type), INTENT(INOUT) :: matrix !! matrix for which to make canonical index CHARACTER(len=*), PARAMETER :: routineN = 'dbcsr_index_compact' INTEGER :: error_handle, new_size, size_blk_p, & size_col_i, size_coo_l, size_row_p, & size_thr_c INTEGER, ALLOCATABLE, DIMENSION(:) :: blk_p, col_i, coo_l, meta, row_p, thr_c LOGICAL :: compact, has_blk_p, has_col_i, & has_coo_l, has_row_p, has_thr_c ! --------------------------------------------------------------------------- CALL timeset(routineN, error_handle) ! Ensures the index pointers are set. CALL dbcsr_repoint_index(matrix) ! Check that compaction is even needed. has_row_p = ASSOCIATED(matrix%row_p) IF (has_row_p) THEN size_row_p = SIZE(matrix%row_p) ELSE size_row_p = 0 END IF has_col_i = ASSOCIATED(matrix%col_i) IF (has_col_i) THEN size_col_i = SIZE(matrix%col_i) ELSE size_col_i = 0 END IF has_blk_p = ASSOCIATED(matrix%blk_p) IF (has_blk_p) THEN size_blk_p = SIZE(matrix%blk_p) ELSE size_blk_p = 0 END IF has_thr_c = ASSOCIATED(matrix%thr_c) IF (has_thr_c) THEN size_thr_c = SIZE(matrix%thr_c) ELSE size_thr_c = 0 END IF has_coo_l = ASSOCIATED(matrix%coo_l) IF (has_coo_l) THEN size_coo_l = SIZE(matrix%coo_l) ELSE size_coo_l = 0 END IF ! new_size = dbcsr_num_slots + & size_row_p + size_col_i + size_blk_p + size_thr_c + size_coo_l compact = new_size .LT. SIZE(matrix%index) IF (compact) THEN ! Store old index arrays. IF (has_row_p) THEN ALLOCATE (row_p(size_row_p)) row_p(:) = matrix%row_p(:) END IF IF (has_col_i) THEN ALLOCATE (col_i(size_col_i)) col_i(:) = matrix%col_i(:) END IF IF (has_blk_p) THEN ALLOCATE (blk_p(size_blk_p)) blk_p(:) = matrix%blk_p(:) END IF IF (has_thr_c) THEN ALLOCATE (thr_c(size_thr_c)) thr_c(:) = matrix%thr_c(:) END IF IF (has_coo_l) THEN ALLOCATE (coo_l(size_coo_l)) coo_l(:) = matrix%coo_l(:) END IF ALLOCATE (meta(dbcsr_num_slots)) meta(:) = matrix%index(1:dbcsr_num_slots) ! Clear the index. CALL memory_deallocate(matrix%index, & dbcsr_get_index_memory_type(matrix)) NULLIFY (matrix%index) CALL memory_allocate(matrix%index, new_size, & dbcsr_get_index_memory_type(matrix)) ! ! Now copy the old index arrays into the index. We must not ! copy the positions of the old pointers. matrix%index(1:dbcsr_meta_size) = meta(1:dbcsr_meta_size) matrix%index(dbcsr_meta_size + 1:) = 0 matrix%index(dbcsr_slot_size) = dbcsr_num_slots IF (has_thr_c) THEN CALL dbcsr_addto_index_array(matrix, dbcsr_slot_thr_c, thr_c) DEALLOCATE (thr_c) END IF IF (has_row_p) THEN CALL dbcsr_addto_index_array(matrix, dbcsr_slot_row_p, row_p) DEALLOCATE (row_p) END IF IF (has_col_i) THEN CALL dbcsr_addto_index_array(matrix, dbcsr_slot_col_i, col_i) DEALLOCATE (col_i) END IF IF (has_blk_p) THEN CALL dbcsr_addto_index_array(matrix, dbcsr_slot_blk_p, blk_p) DEALLOCATE (blk_p) END IF IF (has_coo_l) THEN CALL dbcsr_addto_index_array(matrix, dbcsr_slot_coo_l, coo_l) DEALLOCATE (coo_l) END IF DEALLOCATE (meta) IF (careful_mod) THEN ! This is pretty strong but it should be true. IF (matrix%index(dbcsr_slot_size) /= new_size) & DBCSR_ABORT("Unexpected index size.") IF (SIZE(matrix%index) /= new_size) & DBCSR_ABORT("Unexpected index size.") END IF CALL dbcsr_repoint_index(matrix) END IF CALL timestop(error_handle) END SUBROUTINE dbcsr_index_compact SUBROUTINE dbcsr_index_checksum(matrix, checksum) !! Calculates the checksum of an index. TYPE(dbcsr_type), INTENT(INOUT) :: matrix !! matrix for which to make canonical index INTEGER, INTENT(OUT) :: checksum CHARACTER(len=*), PARAMETER :: routineN = 'dbcsr_index_checksum' INTEGER :: error_handle ! --------------------------------------------------------------------------- CALL timeset(routineN, error_handle) ! checksum = joaat_hash((/joaat_hash(matrix%row_p), & joaat_hash(matrix%col_i), & joaat_hash(matrix%blk_p)/)) ! CALL timestop(error_handle) END SUBROUTINE dbcsr_index_checksum FUNCTION dbcsr_has_local_row_index(matrix) RESULT(local_indexing) TYPE(dbcsr_type), INTENT(INOUT) :: matrix LOGICAL :: local_indexing local_indexing = matrix%local_indexing END FUNCTION dbcsr_has_local_row_index END MODULE dbcsr_index_operations