"The input frozen model: %s has no training script or min_nbor_dist information, "
"which is not supported by the model compression interface. "
"Please consider using the --training-script command within the model compression interface to provide the training script of the input frozen model. "
"Note that the input training script must contain the correct path to the training data."%input
)frome
elifnotos.path.exists(training_script):
raiseRuntimeError(
"The input training script %s (%s) does not exist! Please check the path of the training script. "%(input,os.path.abspath(input))
Fit the atomic polarizability with descriptor se_a
Parameters
----------
descrpt : tf.Tensor
The descrptor
neuron : List[int]
Number of neurons in each hidden layer of the fitting net
resnet_dt : bool
Time-step `dt` in the resnet construction:
y = x + dt * \phi (Wx + b)
sel_type : List[int]
The atom types selected to have an atomic polarizability prediction. If is None, all atoms are selected.
fit_diag : bool
Fit the diagonal part of the rotational invariant polarizability matrix, which will be converted to normal polarizability matrix by contracting with the rotation matrix.
scale : List[float]
The output of the fitting net (polarizability matrix) for type i atom will be scaled by scale[i]
diag_shift : List[float]
The diagonal part of the polarizability matrix of type i will be shifted by diag_shift[i]. The shift operation is carried out after scale.
seed : int
Random seed for initializing the network parameters.
activation_function : str
The activation function in the embedding net. Supported options are |ACTIVATION_FN|
precision : str
The precision of the embedding net parameters. Supported options are |PRECISION|
uniform_seed
Only for the purpose of backward compatibility, retrieves the old behavior of using the random seed
"""
def__init__(self,
descrpt:tf.Tensor,
neuron:List[int]=[120,120,120],
resnet_dt:bool=True,
sel_type:List[int]=None,
fit_diag:bool=True,
scale:List[float]=None,
shift_diag:bool=True,# YWolfeee: will support the user to decide whether to use this function
#diag_shift : List[float] = None, YWolfeee: will not support the user to assign a shift
seed:int=None,
activation_function:str='tanh',
precision:str='default',
uniform_seed:bool=False
)->None:
"""
Constructor
"""
ifnotisinstance(descrpt,DescrptSeA):
raiseRuntimeError('PolarFittingSeA only supports DescrptSeA')
self.ntypes=descrpt.get_ntypes()
self.dim_descrpt=descrpt.get_dim_out()
# args = ClassArg()\
# .add('neuron', list, default = [120,120,120], alias = 'n_neuron')\
# .add('resnet_dt', bool, default = True)\
# .add('fit_diag', bool, default = True)\
# .add('diag_shift', [list,float], default = [0.0 for ii in range(self.ntypes)])\
# .add('scale', [list,float], default = [1.0 for ii in range(self.ntypes)])\
# .add('sel_type', [list,int], default = [ii for ii in range(self.ntypes)], alias = 'pol_type')\
Fit the system polarizability with descriptor se_a
Parameters
----------
descrpt : tf.Tensor
The descrptor
neuron : List[int]
Number of neurons in each hidden layer of the fitting net
resnet_dt : bool
Time-step `dt` in the resnet construction:
y = x + dt * \phi (Wx + b)
sel_type : List[int]
The atom types selected to have an atomic polarizability prediction
fit_diag : bool
Fit the diagonal part of the rotational invariant polarizability matrix, which will be converted to normal polarizability matrix by contracting with the rotation matrix.
scale : List[float]
The output of the fitting net (polarizability matrix) for type i atom will be scaled by scale[i]
diag_shift : List[float]
The diagonal part of the polarizability matrix of type i will be shifted by diag_shift[i]. The shift operation is carried out after scale.
seed : int
Random seed for initializing the network parameters.
activation_function : str
The activation function in the embedding net. Supported options are |ACTIVATION_FN|
precision : str
The precision of the embedding net parameters. Supported options are |PRECISION|
"""
def__init__(self,
descrpt:tf.Tensor,
neuron:List[int]=[120,120,120],
resnet_dt:bool=True,
sel_type:List[int]=None,
fit_diag:bool=True,
scale:List[float]=None,
diag_shift:List[float]=None,
seed:int=None,
activation_function:str='tanh',
precision:str='default'
)->None:
"""
Constructor
"""
ifnotisinstance(descrpt,DescrptSeA):
raiseRuntimeError('GlobalPolarFittingSeA only supports DescrptSeA')
self.ntypes=descrpt.get_ntypes()
self.dim_descrpt=descrpt.get_dim_out()
self.polar_fitting=PolarFittingSeA(descrpt,
neuron,
resnet_dt,
sel_type,
fit_diag,
scale,
diag_shift,
seed,
activation_function,
precision)
defget_sel_type(self)->int:
"""
Get selected atom types
"""
returnself.polar_fitting.get_sel_type()
defget_out_size(self)->int:
"""
Get the output size. Should be 9
"""
returnself.polar_fitting.get_out_size()
defbuild(self,
input_d,
rot_mat,
natoms,
reuse=None,
suffix='')->tf.Tensor:
"""
Build the computational graph for fitting net
Parameters
----------
input_d
The input descriptor
rot_mat
The rotation matrix from the descriptor.
natoms
The number of atoms. This tensor has the length of Ntypes + 2
natoms[0]: number of local atoms
natoms[1]: total number of atoms held by this processor
natoms[i]: 2 <= i < Ntypes+2, number of type i atoms
reuse
The weights in the networks should be reused when get the variable.