params.yaml 1.89 KB
Newer Older
yuhai's avatar
yuhai committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
# this is only part of input settings. 
# should be used together with systems.yaml and machines.yaml

# number of iterations to do, can be set to zero for DeePHF training
n_iter: 0
  
# directory setting (these are default choices, can be omitted)
workdir: "."
share_folder: "share" # folder that stores all other settings

# scf settings, set to false when n_iter = 0 to skip checking
scf_input: false

# train settings, set to false when n_iter = 0 to skip checking
train_input: false

# init settings, these are for DeePHF task
init_model: false # do not use existing model to restart from

init_scf: # parameters for SCF calculation
  basis: ccpvdz
  # this is for pure energy training
  dump_fields: 
    - e_base # Hartree Fock energy
    - dm_eig # Descriptors
    - conv # whether converged or not
    - l_e_delta # delta energy betweem e_base and reference, label
  verbose: 1
  mol_args: # args to be passed to pyscf.gto.Mole.build
    incore_anyway: True
  scf_args: # args to be passed to pyscf.scf.RHF.run
    conv_tol: 1e-8
    conv_check: false # pyscf conv_check has a bug

init_train: # parameters for nn training
  model_args:
    hidden_sizes: [100, 100, 100] # neurons in hidden layers
    output_scale: 100 # the output will be divided by 100 before compare with label
    use_resnet: true # skip connection
    actv_fn: mygelu # same as gelu, support force calculation
  data_args: 
    batch_size: 16
    group_batch: 1 # can collect multiple system in one batch
  preprocess_args:
    preshift: true # shift the descriptor by its mean
    prescale: false # scale the descriptor by its variance (can cause convergence problem)
    prefit_ridge: 1e1 # do a ridge regression as prefitting
    prefit_trainable: false
  train_args: 
    decay_rate: 0.96 # learning rate decay factor
    decay_steps: 500 # decay the learning rate every this steps
    display_epoch: 100
    n_epoch: 10000
    start_lr: 0.0003