inputtrain.ipt 2.65 KB
Newer Older
1
2
3
4
!InputFile for Force Prediction Network
sflparamsfile=rHCNO-5.2R_16-3.5A_a4-8.params
ntwkStoreDir=networks/
atomEnergyFile=sae_linfit.dat
5
nmax=10! Maximum number of iterations (0 = inf)
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
tolr=1! Tolerance - early stopping
emult=0.1!Multiplier by eta after tol switch
eta=0.001! Eta -- Learning rate
tcrit=1.0E-5! Eta termination criterion
tmax=0! Maximum time (0 = inf)
tbtchsz=2560
vbtchsz=2560
gpuid=0
ntwshr=0
nkde=2
energy=1
force=0
fmult=0.0
pbc=0
cmult =0.001
runtype=ANNP_CREATE_HDNN_AND_TRAIN!Create and train a HDN network
network_setup {
  inputsize=384;
  atom_net H $
    layer [
          nodes=160;
          activation=9;
          type=0;
          !l2norm=1;
          !l2valu=0.0001;
    ]
    layer [
          nodes=128;
          activation=9;
          type=0;
          !l2norm=1;
          !l2valu=0.00001;
    ]
    layer [
          nodes=96;
          activation=9;
          type=0;
          !l2norm=1;
          !l2valu=0.000001;
    ]
    layer [
          nodes=1;
          activation=6;
          type=0;
    ]
    $
  atom_net C $
    layer [
          nodes=144;
          activation=9;
          type=0;
          !l2norm=1;
          !l2valu=0.0001;
    ]
    layer [
          nodes=112;
          activation=9;
          type=0;
          !l2norm=1;
          !l2valu=0.00001;
    ]
    layer [
          nodes=96;
          activation=9;
          type=0;
          !l2norm=1;
          !l2valu=0.000001;
    ]
    layer [
          nodes=1;
          activation=6;
          type=0;
    ]
    $
  atom_net N $
    layer [
          nodes=128;
          activation=9;
          type=0;
          !l2norm=1;
          !l2valu=0.0001;
    ]
    layer [
          nodes=112;
          activation=9;
          type=0;
          !l2norm=1;
          !l2valu=0.00001;
    ]
    layer [
          nodes=96;
          activation=9;
          type=0;
          !l2norm=1;
          !l2valu=0.000001;
    ]
    layer [
          nodes=1;
          activation=6;
          type=0;
    ]
    $
  atom_net O $
    layer [
          nodes=128;
          activation=9;
          type=0;
          !l2norm=1;
          !l2valu=0.0001;
    ]
    layer [
          nodes=112;
          activation=9;
          type=0;
          !l2norm=1;
          !l2valu=0.00001;
    ]
    layer [
          nodes=96;
          activation=9;
          type=0;
          !l2norm=1;
          !l2valu=0.000001;
    ]
    layer [
          nodes=1;
          activation=6;
          type=0;
    ]
    $

}
adptlrn=OFF ! Adaptive learning (OFF,RMSPROP)
decrate=0.9 !Decay rate of RMSPROP
moment=ADAM! Turn on momentum or nesterov momentum (OFF,CNSTTEMP,TMANNEAL,REGULAR,NESTEROV)
mu=0.99 ! Mu factor for momentum