25#include "./base/base_uses.f90"
31 LOGICAL,
PARAMETER,
PRIVATE :: debug_this_module = .true.
32 CHARACTER(len=*),
PARAMETER,
PRIVATE :: moduleN =
'nnp_model'
49 CHARACTER(LEN=*),
INTENT(IN) :: printtag
51 CHARACTER(len=default_string_length) :: my_label
52 INTEGER :: i, j, unit_nr
58 my_label = trim(printtag)//
"| "
59 IF (para_env%is_source())
THEN
62 WRITE (unit_nr, *) trim(my_label)//
" Neural network specification for element "// &
65 WRITE (unit_nr,
'(1X,A,1X,I3,1X,A,1X,I2)') trim(my_label), &
66 nnp%arc(i)%n_nodes(j),
"nodes in layer", j
84 INTEGER,
INTENT(IN) :: i_com
86 CHARACTER(len=*),
PARAMETER :: routinen =
'nnp_predict'
88 INTEGER :: handle, i, j
91 CALL timeset(routinen, handle)
95 arc%layer(i)%node(:) = 0.0_dp
102 arc%n_nodes(i - 1), &
105 arc%layer(i)%weights(:, :, i_com), &
106 arc%n_nodes(i - 1), &
107 arc%layer(i - 1)%node, &
114 DO j = 1, arc%n_nodes(i)
115 arc%layer(i)%node(j) = arc%layer(i)%node(j) + arc%layer(i)%bweights(j, i_com)
119 IF (nnp%normnodes)
THEN
120 norm = 1.0_dp/real(arc%n_nodes(i - 1),
dp)
121 DO j = 1, arc%n_nodes(i)
122 arc%layer(i)%node(j) = arc%layer(i)%node(j)*norm
128 DO j = 1, arc%n_nodes(i)
129 arc%layer(i)%node_grad(j) = arc%layer(i)%node(j)
133 SELECT CASE (nnp%actfnct(i - 1))
135 arc%layer(i)%node(:) = tanh(arc%layer(i)%node(:))
137 arc%layer(i)%node(:) = exp(-0.5_dp*arc%layer(i)%node(:)**2)
141 arc%layer(i)%node(:) = cos(arc%layer(i)%node(:))
143 arc%layer(i)%node(:) = 1.0_dp/(1.0_dp + exp(-1.0_dp*arc%layer(i)%node(:)))
145 arc%layer(i)%node(:) = 1.0_dp - 1.0_dp/(1.0_dp + exp(-1.0_dp*arc%layer(i)%node(:)))
147 arc%layer(i)%node(:) = exp(-1.0_dp*arc%layer(i)%node(:))
149 arc%layer(i)%node(:) = log(exp(arc%layer(i)%node(:)) + 1.0_dp)
151 arc%layer(i)%node(:) = arc%layer(i)%node(:)**2
153 cpabort(
"NNP| Error: Unknown activation function")
157 CALL timestop(handle)
170 TYPE(
nnp_type),
INTENT(INOUT) :: nnp
171 INTEGER,
INTENT(IN) :: i_com
172 REAL(kind=
dp),
ALLOCATABLE,
DIMENSION(:) :: denergydsym
174 CHARACTER(len=*),
PARAMETER :: routinen =
'nnp_gradients'
176 INTEGER :: handle, i, j, k
177 REAL(kind=
dp) :: norm
179 CALL timeset(routinen, handle)
183 DO i = 2, nnp%n_layer
186 SELECT CASE (nnp%actfnct(i - 1))
188 arc%layer(i)%node_grad(:) = 1.0_dp - arc%layer(i)%node(:)**2
190 arc%layer(i)%node_grad(:) = -1.0_dp*arc%layer(i)%node(:)*arc%layer(i)%node_grad(:)
192 arc%layer(i)%node_grad(:) = 1.0_dp
194 arc%layer(i)%node_grad(:) = -sin(arc%layer(i)%node_grad(:))
196 arc%layer(i)%node_grad(:) = exp(-arc%layer(i)%node_grad(:))/ &
197 (1.0_dp + exp(-1.0_dp*arc%layer(i)%node_grad(:)))**2
199 arc%layer(i)%node_grad(:) = -1.0_dp*exp(-1.0_dp*arc%layer(i)%node_grad(:))/ &
200 (1.0_dp + exp(-1.0_dp*arc%layer(i)%node_grad(:)))**2
202 arc%layer(i)%node_grad(:) = -1.0_dp*arc%layer(i)%node(:)
204 arc%layer(i)%node_grad(:) = (exp(arc%layer(i)%node(:)) + 1.0_dp)/ &
205 exp(arc%layer(i)%node(:))
207 arc%layer(i)%node_grad(:) = 2.0_dp*arc%layer(i)%node_grad(:)
209 cpabort(
"NNP| Error: Unknown activation function")
212 IF (nnp%normnodes)
THEN
213 norm = 1.0_dp/real(arc%n_nodes(i - 1),
dp)
214 arc%layer(i)%node_grad(:) = norm*arc%layer(i)%node_grad(:)
220 DO j = 1, arc%n_nodes(2)
221 DO i = 1, arc%n_nodes(1)
222 arc%layer(2)%tmp_der(i, j) = arc%layer(2)%node_grad(j)*arc%layer(2)%weights(i, j, i_com)
226 DO k = 3, nnp%n_layer
228 arc%layer(k)%tmp_der(:, :) = 0.0_dp
238 arc%n_nodes(k - 1), &
240 arc%layer(k - 1)%tmp_der, &
242 arc%layer(k)%weights(:, :, i_com), &
243 arc%n_nodes(k - 1), &
245 arc%layer(k)%tmp_der, &
249 DO j = 1, arc%n_nodes(k)
251 DO i = 1, arc%n_nodes(1)
252 arc%layer(k)%tmp_der(i, j) = arc%layer(k)%node_grad(j)* &
253 arc%layer(k)%tmp_der(i, j)
258 DO i = 1, arc%n_nodes(1)
259 denergydsym(i) = arc%layer(nnp%n_layer)%tmp_der(i, 1)
262 CALL timestop(handle)
static void dgemm(const char transa, const char transb, const int m, const int n, const int k, const double alpha, const double *a, const int lda, const double *b, const int ldb, const double beta, double *c, const int ldc)
Convenient wrapper to hide Fortran nature of dgemm_, swapping a and b.
various routines to log and control the output. The idea is that decisions about where to log should ...
recursive integer function, public cp_logger_get_default_unit_nr(logger, local, skip_not_ionode)
asks the default unit number of the given logger. try to use cp_logger_get_unit_nr
type(cp_logger_type) function, pointer, public cp_get_default_logger()
returns the default logger
Defines the basic variable types.
integer, parameter, public dp
integer, parameter, public default_string_length
Interface to the message passing library MPI.
Data types for neural network potentials.
integer, parameter, public nnp_actfnct_lin
integer, parameter, public nnp_actfnct_cos
integer, parameter, public nnp_actfnct_invsig
integer, parameter, public nnp_actfnct_sig
integer, parameter, public nnp_actfnct_exp
integer, parameter, public nnp_actfnct_softplus
integer, parameter, public nnp_actfnct_quad
integer, parameter, public nnp_actfnct_gaus
integer, parameter, public nnp_actfnct_tanh
Methods dealing with core routines for artificial neural networks.
subroutine, public nnp_predict(arc, nnp, i_com)
Predict energy by evaluating neural network.
subroutine, public nnp_gradients(arc, nnp, i_com, denergydsym)
Calculate gradients of neural network.
subroutine, public nnp_write_arc(nnp, para_env, printtag)
Write neural network architecture information.
type of a logger, at the moment it contains just a print level starting at which level it should be l...
stores all the informations relevant to an mpi environment
Data type for artificial neural networks.
Main data type collecting all relevant data for neural network potentials.