30#include "./base/base_uses.f90"
36 CHARACTER(len=*),
PARAMETER,
PRIVATE :: moduleN =
'qs_tddfpt2_assign'
38 LOGICAL,
PARAMETER,
PRIVATE :: debug_this_module = .false.
55 SUBROUTINE assign_state(qs_env, matrix_s, evects, psi0, wfn_history, my_state)
61 INTEGER,
INTENT(INOUT) :: my_state
63 CHARACTER(LEN=*),
PARAMETER :: routinen =
'assign_state'
65 INTEGER :: handle, is, ispin, natom, ncol, nspins, &
68 REAL(kind=
dp),
ALLOCATABLE,
DIMENSION(:) :: dv, rdiag
72 CALL timeset(routinen, handle)
74 CALL get_qs_env(qs_env, natom=natom, para_env=para_env)
76 nstate =
SIZE(evects, 2)
78 smat => matrix_s(1)%matrix
80 IF (
ASSOCIATED(wfn_history%evect))
THEN
83 wfn_history%gsval = 0.0_dp
84 wfn_history%gsmin = 1.0_dp
87 CALL lowdin_orthogonalization(wfn_history%cpmos(ispin), wfn_history%evect(ispin), &
89 ALLOCATE (rdiag(ncol))
90 CALL wfn_align(psi0(ispin), wfn_history%cpmos(ispin), wfn_history%evect(ispin), &
92 wfn_history%gsval = wfn_history%gsval + sum(rdiag)/real(ncol*nspins, kind=
dp)
93 wfn_history%gsmin = min(wfn_history%gsmin, minval(rdiag))
100 ALLOCATE (rdiag(ncol))
101 CALL xvec_ovlp(evects(ispin, is), wfn_history%evect(ispin), rdiag, smat)
102 xsum = xsum + sum(rdiag)
105 dv(is) = abs(xsum)/sqrt(real(nspins,
dp))
107 my_state = maxval(maxloc(dv))
108 wfn_history%xsval = dv(my_state)
109 IF (wfn_history%xsval < 0.75_dp)
THEN
110 dv(my_state) = 0.0_dp
111 IF (wfn_history%xsval/maxval(dv) < 0.5_dp)
THEN
112 CALL cp_warn(__location__,
"Uncertain assignment for State following."// &
113 " Reduce trust radius in Geometry Optimization or timestep"// &
119 CALL cp_fm_to_fm(evects(ispin, my_state), wfn_history%evect(ispin))
120 CALL cp_fm_to_fm(psi0(ispin), wfn_history%cpmos(ispin), ncol, 1, 1)
126 ALLOCATE (wfn_history%evect(nspins))
127 ALLOCATE (wfn_history%cpmos(nspins))
129 CALL cp_fm_create(wfn_history%evect(ispin), evects(ispin, 1)%matrix_struct,
"Xvec")
130 CALL cp_fm_create(wfn_history%cpmos(ispin), evects(ispin, 1)%matrix_struct,
"Cvec")
134 CALL cp_fm_to_fm(evects(ispin, my_state), wfn_history%evect(ispin))
135 CALL cp_fm_to_fm(psi0(ispin), wfn_history%cpmos(ispin), ncol, 1, 1)
137 wfn_history%xsval = 1.0_dp
138 wfn_history%gsval = 1.0_dp
139 wfn_history%gsmin = 1.0_dp
142 CALL timestop(handle)
160 SUBROUTINE lowdin_orthogonalization(vmatrix, xmatrix, ncol, matrix_s)
162 TYPE(
cp_fm_type),
INTENT(IN) :: vmatrix, xmatrix
163 INTEGER,
INTENT(IN) :: ncol
166 CHARACTER(LEN=*),
PARAMETER :: routinen =
'lowdin_orthogonalization'
167 REAL(kind=
dp),
PARAMETER :: rone = 1.0_dp, rzero = 0.0_dp
169 INTEGER :: handle, n, ncol_global, ndep
170 REAL(
dp) :: threshold, xsum
171 REAL(kind=
dp),
ALLOCATABLE,
DIMENSION(:) :: rdiag
175 IF (ncol .EQ. 0)
RETURN
177 CALL timeset(routinen, handle)
179 threshold = 1.0e-7_dp
180 CALL cp_fm_get_info(matrix=vmatrix, nrow_global=n, ncol_global=ncol_global)
181 IF (ncol .GT. ncol_global) cpabort(
"Wrong ncol value")
186 NULLIFY (fm_struct_tmp)
188 para_env=vmatrix%matrix_struct%para_env, &
189 context=vmatrix%matrix_struct%context)
194 CALL parallel_gemm(
'T',
'N', ncol, ncol, n, rone, vmatrix, sc, rzero, csc)
195 CALL cp_fm_power(csc, work, -0.5_dp, threshold, ndep)
196 CALL parallel_gemm(
'N',
'N', n, ncol, ncol, rone, vmatrix, csc, rzero, sc)
199 CALL parallel_gemm(
'N',
'N', n, ncol, ncol, rone, xmatrix, csc, rzero, sc)
203 CALL parallel_gemm(
'T',
'N', ncol, ncol, n, rone, vmatrix, sc, rzero, csc)
204 CALL parallel_gemm(
'N',
'N', n, ncol, ncol, rone, vmatrix, csc, rzero, sc)
205 CALL cp_fm_geadd(-1.0_dp,
'N', sc, 1.0_dp, xmatrix)
208 CALL parallel_gemm(
'T',
'N', ncol, ncol, n, rone, xmatrix, sc, rzero, csc)
209 ALLOCATE (rdiag(ncol))
213 xsum = 1._dp/sqrt(xsum)
220 CALL timestop(handle)
222 END SUBROUTINE lowdin_orthogonalization
232 SUBROUTINE wfn_align(gmatrix, vmatrix, xmatrix, rdiag, matrix_s)
234 TYPE(
cp_fm_type),
INTENT(IN) :: gmatrix, vmatrix, xmatrix
235 REAL(kind=
dp),
DIMENSION(:),
INTENT(INOUT) :: rdiag
238 CHARACTER(LEN=*),
PARAMETER :: routinen =
'wfn_align'
239 REAL(kind=
dp),
PARAMETER :: rone = 1.0_dp, rzero = 0.0_dp
241 INTEGER :: handle, n, ncol, ncol_global
245 CALL timeset(routinen, handle)
248 CALL cp_fm_get_info(matrix=vmatrix, nrow_global=n, ncol_global=ncol_global)
249 IF (ncol .GT. ncol_global) cpabort(
"Wrong ncol value")
254 NULLIFY (fm_struct_tmp)
256 para_env=vmatrix%matrix_struct%para_env, &
257 context=vmatrix%matrix_struct%context)
261 CALL parallel_gemm(
'T',
'N', ncol, ncol, n, rone, gmatrix, sc, rzero, csc)
262 CALL parallel_gemm(
'N',
'T', n, ncol, ncol, rone, vmatrix, csc, rzero, sc)
264 CALL parallel_gemm(
'N',
'T', n, ncol, ncol, rone, xmatrix, csc, rzero, sc)
267 CALL lowdin_orthogonalization(vmatrix, xmatrix, ncol, matrix_s)
270 CALL parallel_gemm(
'T',
'N', ncol, ncol, n, rone, gmatrix, sc, rzero, csc)
276 CALL timestop(handle)
278 END SUBROUTINE wfn_align
287 SUBROUTINE xvec_ovlp(ematrix, xmatrix, rdiag, matrix_s)
289 TYPE(
cp_fm_type),
INTENT(IN) :: ematrix, xmatrix
290 REAL(kind=
dp),
DIMENSION(:),
INTENT(INOUT) :: rdiag
293 CHARACTER(LEN=*),
PARAMETER :: routinen =
'xvec_ovlp'
294 REAL(kind=
dp),
PARAMETER :: rone = 1.0_dp, rzero = 0.0_dp
296 INTEGER :: handle, n, ncol, ncol_global
300 CALL timeset(routinen, handle)
303 CALL cp_fm_get_info(matrix=xmatrix, nrow_global=n, ncol_global=ncol_global)
304 IF (ncol .GT. ncol_global) cpabort(
"Wrong ncol value")
309 NULLIFY (fm_struct_tmp)
311 para_env=xmatrix%matrix_struct%para_env, &
312 context=xmatrix%matrix_struct%context)
316 CALL parallel_gemm(
'T',
'N', ncol, ncol, n, rone, ematrix, sc, rzero, csc)
322 CALL timestop(handle)
324 END SUBROUTINE xvec_ovlp
DBCSR operations in CP2K.
subroutine, public cp_dbcsr_sm_fm_multiply(matrix, fm_in, fm_out, ncol, alpha, beta)
multiply a dbcsr with a fm matrix
basic linear algebra operations for full matrices
subroutine, public cp_fm_geadd(alpha, trans, matrix_a, beta, matrix_b)
interface to BLACS geadd: matrix_b = beta*matrix_b + alpha*opt(matrix_a) where opt(matrix_a) can be e...
subroutine, public cp_fm_scale(alpha, matrix_a)
scales a matrix matrix_a = alpha * matrix_b
used for collecting some of the diagonalization schemes available for cp_fm_type. cp_fm_power also mo...
subroutine, public cp_fm_power(matrix, work, exponent, threshold, n_dependent, verbose, eigvals)
...
represent the structure of a full matrix
subroutine, public cp_fm_struct_create(fmstruct, para_env, context, nrow_global, ncol_global, nrow_block, ncol_block, descriptor, first_p_pos, local_leading_dimension, template_fmstruct, square_blocks, force_block)
allocates and initializes a full matrix structure
subroutine, public cp_fm_struct_release(fmstruct)
releases a full matrix structure
represent a full matrix distributed on many processors
subroutine, public cp_fm_get_diag(matrix, diag)
returns the diagonal elements of a fm
subroutine, public cp_fm_get_info(matrix, name, nrow_global, ncol_global, nrow_block, ncol_block, nrow_local, ncol_local, row_indices, col_indices, local_data, context, nrow_locals, ncol_locals, matrix_struct, para_env)
returns all kind of information about the full matrix
subroutine, public cp_fm_create(matrix, matrix_struct, name, use_sp)
creates a new full matrix with the given structure
Types for excited states potential energies.
Defines the basic variable types.
integer, parameter, public dp
Interface to the message passing library MPI.
basic linear algebra operations for full matrixes
subroutine, public get_qs_env(qs_env, atomic_kind_set, qs_kind_set, cell, super_cell, cell_ref, use_ref_cell, kpoints, dft_control, mos, sab_orb, sab_all, qmmm, qmmm_periodic, sac_ae, sac_ppl, sac_lri, sap_ppnl, sab_vdw, sab_scp, sap_oce, sab_lrc, sab_se, sab_xtbe, sab_tbe, sab_core, sab_xb, sab_xtb_pp, sab_xtb_nonbond, sab_almo, sab_kp, sab_kp_nosym, particle_set, energy, force, matrix_h, matrix_h_im, matrix_ks, matrix_ks_im, matrix_vxc, run_rtp, rtp, matrix_h_kp, matrix_h_im_kp, matrix_ks_kp, matrix_ks_im_kp, matrix_vxc_kp, kinetic_kp, matrix_s_kp, matrix_w_kp, matrix_s_ri_aux_kp, matrix_s, matrix_s_ri_aux, matrix_w, matrix_p_mp2, matrix_p_mp2_admm, rho, rho_xc, pw_env, ewald_env, ewald_pw, active_space, mpools, input, para_env, blacs_env, scf_control, rel_control, kinetic, qs_charges, vppl, rho_core, rho_nlcc, rho_nlcc_g, ks_env, ks_qmmm_env, wf_history, scf_env, local_particles, local_molecules, distribution_2d, dbcsr_dist, molecule_kind_set, molecule_set, subsys, cp_subsys, oce, local_rho_set, rho_atom_set, task_list, task_list_soft, rho0_atom_set, rho0_mpole, rhoz_set, ecoul_1c, rho0_s_rs, rho0_s_gs, do_kpoints, has_unit_metric, requires_mo_derivs, mo_derivs, mo_loc_history, nkind, natom, nelectron_total, nelectron_spin, efield, neighbor_list_id, linres_control, xas_env, virial, cp_ddapc_env, cp_ddapc_ewald, outer_scf_history, outer_scf_ihistory, x_data, et_coupling, dftb_potential, results, se_taper, se_store_int_env, se_nddo_mpole, se_nonbond_env, admm_env, lri_env, lri_density, exstate_env, ec_env, harris_env, dispersion_env, gcp_env, vee, rho_external, external_vxc, mask, mp2_env, bs_env, kg_env, wanniercentres, atprop, ls_scf_env, do_transport, transport_env, v_hartree_rspace, s_mstruct_changed, rho_changed, potential_changed, forces_up_to_date, mscfg_env, almo_scf_env, gradient_history, variable_history, embed_pot, spin_embed_pot, polar_env, mos_last_converged, eeq, rhs)
Get the QUICKSTEP environment.
subroutine, public assign_state(qs_env, matrix_s, evects, psi0, wfn_history, my_state)
...
keeps the information about the structure of a full matrix
stores all the informations relevant to an mpi environment