Commit 6b33aeb8 authored by zhangqha's avatar zhangqha
Browse files

BladeDISC DeePMD code

parents
Pipeline #179 canceled with stages
from .freeze import save_weight
from .mapt import MapTable
from .wrap import Wrap
__all__ = [
"save_weight",
"MapTable",
"Wrap"
]
#!/usr/bin/env python3
from deepmd.env import tf
from deepmd.nvnmd.utils.fio import FioDic
def filter_tensorVariableList(tensorVariableList) -> dict:
r"""Get the name of variable for NVNMD
| :code:`descrpt_attr/t_avg:0`
| :code:`descrpt_attr/t_std:0`
| :code:`filter_type_{atom i}/matrix_{layer l}_{atomj}:0`
| :code:`filter_type_{atom i}/bias_{layer l}_{atomj}:0`
| :code:`layer_{layer l}_type_{atom i}/matrix:0`
| :code:`layer_{layer l}_type_{atom i}/bias:0`
| :code:`final_layer_type_{atom i}/matrix:0`
| :code:`final_layer_type_{atom i}/bias:0`
"""
nameList = [tv.name for tv in tensorVariableList]
nameList = [name.replace(':0', '') for name in nameList]
nameList = [name.replace('/', '.') for name in nameList]
dic_name_tv = {}
for ii in range(len(nameList)):
name = nameList[ii]
tv = tensorVariableList[ii]
p1 = name.startswith('descrpt_attr')
p1 = p1 or name.startswith('filter_type_')
p1 = p1 or name.startswith('layer_')
p1 = p1 or name.startswith('final_layer_type_')
p2 = 'Adam' not in name
p3 = 'XXX' not in name
if p1 and p2 and p3:
dic_name_tv[name] = tv
return dic_name_tv
def save_weight(sess, file_name: str = 'nvnmd/weight.npy'):
r"""Save the dictionary of weight to a npy file
"""
tvs = tf.global_variables()
dic_key_tv = filter_tensorVariableList(tvs)
dic_key_value = {}
for key in dic_key_tv.keys():
value = sess.run(dic_key_tv[key])
dic_key_value[key] = value
FioDic().save(file_name, dic_key_value)
import numpy as np
import logging
from deepmd.env import tf
from deepmd.utils.sess import run_sess
from deepmd.nvnmd.utils.fio import FioDic
from deepmd.nvnmd.utils.config import nvnmd_cfg
from deepmd.nvnmd.utils.weight import get_normalize, get_rng_s, get_filter_weight
from deepmd.nvnmd.utils.network import get_sess
from deepmd.nvnmd.data.data import jdata_deepmd_input
from typing import List, Optional
log = logging.getLogger(__name__)
class MapTable:
r"""Generate the mapping table describing the relastionship of
atomic distance, cutoff function, and embedding matrix.
three mapping table will be built:
| :math:`r^2_{ji} \rightarrow s_{ji}`
| :math:`r^2_{ji} \rightarrow sr_{ji}`
| :math:`r^2_{ji} \rightarrow \mathcal{G}_{ji}`
where :math:`s_{ji}` is cut-off function,
:math:`sr_{ji} = \frac{s(r_{ji})}{r_{ji}}`, and
:math:`\mathcal{G}_{ji}` is embedding matrix.
The mapping funciton can be define as:
| :math:`y = f(x) = y_{k} + (x - x_{k}) * dy_{k}`
| :math:`y_{k} = f(x_{k})`
| :math:`dy_{k} = \frac{f(x_{k+1}) - f(x_{k})}{dx}`
| :math:`x_{k} \leq x < x_{k+1}`
| :math:`x_{k} = k * dx`
where :math:`dx` is interpolation interval.
Parameters
----------
config_file
input file name
an .npy file containing the configuration information of NVNMD model
weight_file
input file name
an .npy file containing the weights of NVNMD model
map_file
output file name
an .npy file containing the mapping tables of NVNMD model
References
----------
DOI: 10.1038/s41524-022-00773-z
"""
def __init__(
self,
config_file: str,
weight_file: str,
map_file: str
):
self.config_file = config_file
self.weight_file = weight_file
self.map_file = map_file
jdata = jdata_deepmd_input['nvnmd']
jdata['config_file'] = config_file
jdata['weight_file'] = weight_file
jdata['enable'] = True
nvnmd_cfg.init_from_jdata(jdata)
# map_table = self.build_map()
def qqq(self, dat, NBIT_FEA_FL, NBIT_FEA_X, is_set_zero=False):
dat = dat if isinstance(dat, list) else [dat]
prec = 2 ** NBIT_FEA_FL
N = int(2 ** NBIT_FEA_X)
#
dat2 = []
for ii in range(len(dat)):
dati = dat[ii]
vi = dati[:-1] # i
vi1 = dati[1:] # i+1
# v = vi + dvi * (r - ri)
# ri = i * dt
# dvi = v(i+1) / dt
vi = np.round(vi * prec) / prec
vi1 = np.round(vi1 * prec) / prec
dvi = vi1 - vi
if is_set_zero:
dvi[0] = 0
#
v = [np.reshape(vp, [N, -1]) for vp in [vi, dvi]]
dat2.append(v)
return dat2
def build_map(self):
ntypex = nvnmd_cfg.dscp['ntypex']
ntype = nvnmd_cfg.dscp['ntype']
NBIT_FEA_FL = nvnmd_cfg.nbit['NBIT_FEA_FL']
NBIT_FEA_X = nvnmd_cfg.nbit['NBIT_FEA_X']
dic = self.run_u2s()
dic.update(self.run_s2G(dic))
# quantize s and G
prec = 2**NBIT_FEA_FL
for tt in range(ntypex):
dic['s'][tt][0] = np.round(dic['s'][tt][0] * prec) / prec
dic['sr'][tt][0] = np.round(dic['sr'][tt][0] * prec) / prec
for tt2 in range(ntype):
v = np.round(dic['G'][tt * ntype + tt2][0] * prec) / prec
dic['G'][tt * ntype + tt2][0] = v
maps = {}
keys = 's,sr,ds_dr2,dsr_dr2,G,dG_ds'.split(',')
keys2 = 'G,dG_ds'.split(',')
for key in keys:
val = self.qqq(dic[key], NBIT_FEA_FL, NBIT_FEA_X, key not in keys2)
maps[key] = val
N = int(2**NBIT_FEA_X)
maps2 = {}
maps2['r2'] = dic['r2'][0:N]
maps2['s2'] = dic['s2'][0:N]
for tt in range(ntypex):
for tt2 in range(ntype):
postfix = f'_t{tt}_t{tt2}'
for key in keys:
maps2[key + postfix] = []
maps2[key + postfix].append(maps[key][tt * ntype + tt2][0].reshape([N, -1]))
maps2[key + postfix].append(maps[key][tt * ntype + tt2][1].reshape([N, -1]))
self.map = maps2
FioDic().save(self.map_file, self.map)
log.info("NVNMD: finish building mapping table")
return self.map
# =====================================================================
# build r2s
# =====================================================================
def build_r2s(self, r2):
# limit = nvnmd_cfg.dscp['rc_lim']
rmin = nvnmd_cfg.dscp['rcut_smth']
rmax = nvnmd_cfg.dscp['rcut']
# ntypex = nvnmd_cfg.dscp['ntypex']
ntype = nvnmd_cfg.dscp['ntype']
avg, std = get_normalize(nvnmd_cfg.weight)
avg, std = np.float32(avg), np.float32(std)
r = tf.sqrt(r2)
r_ = tf.clip_by_value(r, rmin, rmax)
r__ = tf.clip_by_value(r, 0, rmax)
uu = (r_ - rmin) / (rmax - rmin)
vv = uu * uu * uu * (-6 * uu * uu + 15 * uu - 10) + 1
sl = []
srl = []
for tt in range(ntype):
s = vv / r__
sr = s / r__
s = tf.reshape(s, [-1, 1])
sr = tf.reshape(sr, [-1, 1])
s = (s - avg[tt, 0]) / std[tt, 0]
sr = sr / std[tt, 1]
sl.append(s)
srl.append(sr)
return sl, srl
def build_ds_dr(self, r2, s, sr):
# ntypex = nvnmd_cfg.dscp['ntypex']
ntype = nvnmd_cfg.dscp['ntype']
ds_drl = []
dsr_drl = []
for tt in range(ntype):
si = s[tt]
sri = sr[tt]
ds_dr = tf.gradients(si, r2)
dsr_dr = tf.gradients(sri, r2)
ds_drl.append(ds_dr[0])
dsr_drl.append(dsr_dr[0])
return ds_drl, dsr_drl
def build_r2s_r2ds(self):
dic_ph = {}
dic_ph['r2'] = tf.placeholder(tf.float32, [None, 1], 't_r2')
dic_ph['s'], dic_ph['sr'] = self.build_r2s(dic_ph['r2'])
dic_ph['ds_dr2'], dic_ph['dsr_dr2'] = self.build_ds_dr(dic_ph['r2'], dic_ph['s'], dic_ph['sr'])
return dic_ph
def run_u2s(self):
# ntypex = nvnmd_cfg.dscp['ntypex']
ntype = nvnmd_cfg.dscp['ntype']
avg, std = get_normalize(nvnmd_cfg.weight)
avg, std = np.float32(avg), np.float32(std)
NBIT_FEA_X = nvnmd_cfg.nbit['NBIT_FEA_X']
NBIT_FEA_X_FL = nvnmd_cfg.nbit['NBIT_FEA_X_FL']
dic_ph = self.build_r2s_r2ds()
sess = get_sess()
N = 2 ** NBIT_FEA_X
N2 = 2 ** NBIT_FEA_X_FL
# N+1 ranther than N for calculating defference
r2 = 1.0 * np.arange(0, N + 1) / N2
r2 = np.reshape(r2, [-1, 1])
feed_dic = {dic_ph['r2']: r2}
key = 'r2,s,sr,ds_dr2,dsr_dr2'
tlst = [dic_ph[k] for k in key.split(',')]
# res = sess.run(tlst, feed_dic)
res = run_sess(sess, tlst, feed_dict=feed_dic)
res2 = {}
key = key.split(',')
for ii in range(len(key)):
res2[key[ii]] = res[ii]
# change value
# set 0 value, when u=0
for tt in range(ntype):
res2['s'][tt][0] = -avg[tt, 0] / std[tt, 0]
res2['sr'][tt][0] = 0
res2['ds_dr2'][tt][0] = 0
res2['dsr_dr2'][tt][0] = 0
# r = np.sqrt(res2['r2'])
sess.close()
return res2
# =====================================================================
# build s2G
# =====================================================================
def build_s2G(self, s):
ntypex = nvnmd_cfg.dscp['ntypex']
ntype = nvnmd_cfg.dscp['ntype']
activation_fn = tf.tanh
outputs_size = nvnmd_cfg.dscp['NNODE_FEAS']
xyz_scatters = []
for tt in range(ntypex):
for tt2 in range(ntype):
xyz_scatter = s
for ll in range(1, len(outputs_size)):
w, b = get_filter_weight(nvnmd_cfg.weight, tt, tt2, ll)
w, b = np.float32(w), np.float32(b)
if outputs_size[ll] == outputs_size[ll - 1]:
xyz_scatter += activation_fn(tf.matmul(xyz_scatter, w) + b)
elif outputs_size[ll] == outputs_size[ll - 1] * 2:
xyz_scatter = tf.concat([xyz_scatter, xyz_scatter], 1) + activation_fn(tf.matmul(xyz_scatter, w) + b)
else:
xyz_scatter = activation_fn(tf.matmul(xyz_scatter, w) + b)
xyz_scatters.append(xyz_scatter)
return xyz_scatters
def build_dG_ds(self, G, s):
ntypex = nvnmd_cfg.dscp['ntypex']
ntype = nvnmd_cfg.dscp['ntype']
M1 = nvnmd_cfg.dscp['M1']
dG_ds = []
for tt in range(ntypex):
for tt2 in range(ntype):
Gi = G[tt * ntype + tt2]
si = s
dG_ds_i = []
for ii in range(M1):
dG_ds_ii = tf.reshape(tf.gradients(Gi[:, ii], si), [-1, 1])
dG_ds_i.append(dG_ds_ii)
dG_ds_i = tf.concat(dG_ds_i, axis=1)
dG_ds.append(dG_ds_i)
return dG_ds
def build_s2G_s2dG(self):
# ntypex = nvnmd_cfg.dscp['ntypex']
dic_ph = {}
dic_ph['s2'] = tf.placeholder(tf.float32, [None, 1], 't_s')
dic_ph['G'] = self.build_s2G(dic_ph['s2'])
dic_ph['dG_ds'] = self.build_dG_ds(dic_ph['G'], dic_ph['s2'])
return dic_ph
def run_s2G(self, dat):
NBIT_FEA_FL = nvnmd_cfg.nbit['NBIT_FEA_FL']
NBIT_FEA_X = nvnmd_cfg.nbit['NBIT_FEA_X']
NBIT_FEA_X2_FL = nvnmd_cfg.nbit['NBIT_FEA_X2_FL']
prec = 2 ** NBIT_FEA_FL
dic_ph = self.build_s2G_s2dG()
sess = get_sess()
N = 2 ** NBIT_FEA_X
N2 = 2 ** NBIT_FEA_X2_FL
s_min, s_max = get_rng_s(nvnmd_cfg.weight)
#
if (s_min < -2.0) or (s_max > 14.0):
log.warning(f"the range of s [{s_min}, {s_max}] is over the limit [-2.0, 14.0]")
s_min = -2.0
s = s_min + np.arange(0, N + 1) / N2
s = np.reshape(s, [-1, 1])
feed_dic = {dic_ph['s2']: s}
feed_dic = {dic_ph['s2']: s}
key = 's2,G,dG_ds'
tlst = [dic_ph[k] for k in key.split(',')]
# res = sess.run(tlst, feed_dic)
res = run_sess(sess, tlst, feed_dict=feed_dic)
res2 = {}
key = key.split(',')
for ii in range(len(key)):
res2[key[ii]] = res[ii]
sess.close()
return res2
def mapt(
*,
nvnmd_config: Optional[str] = 'nvnmd/config.npy',
nvnmd_weight: Optional[str] = 'nvnmd/weight.npy',
nvnmd_map: Optional[str] = 'nvnmd/map.npy',
**kwargs
):
# build mapping table
mapObj = MapTable(nvnmd_config, nvnmd_weight, nvnmd_map)
mapObj.build_map()
import os
import logging
from deepmd.env import tf
from deepmd.entrypoints.train import train
from deepmd.entrypoints.freeze import freeze
from deepmd.nvnmd.entrypoints.mapt import mapt
from deepmd.nvnmd.entrypoints.wrap import wrap
from deepmd.nvnmd.utils.fio import FioDic
from deepmd.nvnmd.utils.config import nvnmd_cfg
from deepmd.nvnmd.data.data import jdata_deepmd_input
log = logging.getLogger(__name__)
jdata_cmd_train = {
"INPUT": "train.json",
"init_model": None,
"restart": None,
"output": "out.json",
"init_frz_model": None,
"mpi_log": "master",
"log_level": 2,
"log_path": None,
"is_compress": False
}
jdata_cmd_freeze = {
"checkpoint_folder": '.',
"output": 'frozen_model.pb',
"node_names": None,
"nvnmd_weight": "nvnmd/weight.npy"
}
def replace_path(p, p2):
pars = p.split(os.sep)
pars[-2] = p2
return os.path.join(*pars)
def add_path(p, p2):
pars = p.split('/')
pars.insert(-1, p2)
return os.path.join(*pars)
def normalized_input(fn, PATH_CNN):
r"""Normalize a input script file for continuous neural network
"""
f = FioDic()
jdata = f.load(fn, jdata_deepmd_input)
# nvnmd
jdata_nvnmd = jdata_deepmd_input['nvnmd']
jdata_nvnmd['enable'] = True
jdata_nvnmd_ = f.get(jdata, 'nvnmd', jdata_nvnmd)
jdata_nvnmd = f.update(jdata_nvnmd_, jdata_nvnmd)
# model
jdata_model = {
"descriptor": {
"seed": 1,
"sel": jdata_nvnmd_["sel"],
"rcut": jdata_nvnmd_['rcut'],
"rcut_smth": jdata_nvnmd_['rcut_smth']
},
"fitting_net": {
"seed": 1
}}
nvnmd_cfg.init_from_jdata(jdata_nvnmd)
nvnmd_cfg.init_from_deepmd_input(jdata_model)
nvnmd_cfg.init_train_mode('cnn')
# training
jdata_train = f.get(jdata, 'training', {})
jdata_train['disp_training'] = True
jdata_train['time_training'] = True
jdata_train['profiling'] = False
jdata_train['disp_file'] = add_path(jdata_train['disp_file'], PATH_CNN)
jdata_train['save_ckpt'] = add_path(jdata_train['save_ckpt'], PATH_CNN)
#
jdata['model'] = nvnmd_cfg.get_model_jdata()
jdata['nvnmd'] = nvnmd_cfg.get_nvnmd_jdata()
return jdata
def normalized_input_qnn(jdata, PATH_QNN, CONFIG_CNN, WEIGHT_CNN, MAP_CNN):
r"""Normalize a input script file for quantize neural network
"""
#
jdata_nvnmd = jdata_deepmd_input['nvnmd']
jdata_nvnmd['enable'] = True
jdata_nvnmd['config_file'] = CONFIG_CNN
jdata_nvnmd['weight_file'] = WEIGHT_CNN
jdata_nvnmd['map_file'] = MAP_CNN
nvnmd_cfg.init_from_jdata(jdata_nvnmd)
nvnmd_cfg.init_train_mode('qnn')
jdata['nvnmd'] = nvnmd_cfg.get_nvnmd_jdata()
# training
jdata2 = jdata['training']
jdata2['disp_file'] = replace_path(jdata2['disp_file'], PATH_QNN)
jdata2['save_ckpt'] = replace_path(jdata2['save_ckpt'], PATH_QNN)
jdata['training'] = jdata2
return jdata
def train_nvnmd(
*,
INPUT: str,
step: str,
**kwargs,
):
# test input
if not os.path.exists(INPUT):
log.warning("The input script %s does not exist"%(INPUT))
# STEP1
PATH_CNN = 'nvnmd_cnn'
CONFIG_CNN = os.path.join(PATH_CNN, 'config.npy')
INPUT_CNN = os.path.join(PATH_CNN, 'train.json')
WEIGHT_CNN = os.path.join(PATH_CNN, 'weight.npy')
FRZ_MODEL_CNN = os.path.join(PATH_CNN, 'frozen_model.pb')
MAP_CNN = os.path.join(PATH_CNN, 'map.npy')
if step == "s1":
# normailize input file
jdata = normalized_input(INPUT, PATH_CNN)
FioDic().save(INPUT_CNN, jdata)
nvnmd_cfg.save(CONFIG_CNN)
# train cnn
jdata = jdata_cmd_train.copy()
jdata['INPUT'] = INPUT_CNN
train(**jdata)
tf.reset_default_graph()
# freeze
jdata = jdata_cmd_freeze.copy()
jdata['checkpoint_folder'] = PATH_CNN
jdata['output'] = FRZ_MODEL_CNN
jdata['nvnmd_weight'] = WEIGHT_CNN
freeze(**jdata)
tf.reset_default_graph()
# map table
jdata = {
"nvnmd_config": CONFIG_CNN,
"nvnmd_weight": WEIGHT_CNN,
"nvnmd_map": MAP_CNN
}
mapt(**jdata)
tf.reset_default_graph()
# STEP2
PATH_QNN = 'nvnmd_qnn'
CONFIG_QNN = os.path.join(PATH_QNN, 'config.npy')
INPUT_QNN = os.path.join(PATH_QNN, 'train.json')
WEIGHT_QNN = os.path.join(PATH_QNN, 'weight.npy')
FRZ_MODEL_QNN = os.path.join(PATH_QNN, 'frozen_model.pb')
MODEL_QNN = os.path.join(PATH_QNN, 'model.pb')
if step == "s2":
# normailize input file
jdata = normalized_input(INPUT, PATH_CNN)
jdata = normalized_input_qnn(jdata, PATH_QNN, CONFIG_CNN, WEIGHT_CNN, MAP_CNN)
FioDic().save(INPUT_QNN, jdata)
nvnmd_cfg.save(CONFIG_QNN)
# train qnn
jdata = jdata_cmd_train.copy()
jdata['INPUT'] = INPUT_QNN
train(**jdata)
tf.reset_default_graph()
# freeze
jdata = jdata_cmd_freeze.copy()
jdata['checkpoint_folder'] = PATH_QNN
jdata['output'] = FRZ_MODEL_QNN
jdata['nvnmd_weight'] = WEIGHT_QNN
freeze(**jdata)
tf.reset_default_graph()
# wrap
jdata = {
"nvnmd_config": CONFIG_QNN,
"nvnmd_weight": WEIGHT_QNN,
"nvnmd_map": MAP_CNN,
"nvnmd_model": MODEL_QNN
}
wrap(**jdata)
tf.reset_default_graph()
import numpy as np
import logging
from deepmd.nvnmd.utils.fio import FioBin, FioTxt
from deepmd.nvnmd.utils.config import nvnmd_cfg
from deepmd.nvnmd.utils.weight import get_fitnet_weight
from deepmd.nvnmd.utils.encode import Encode
from deepmd.nvnmd.utils.op import map_nvnmd
from deepmd.nvnmd.data.data import jdata_deepmd_input, jdata_sys
from typing import List, Optional
log = logging.getLogger(__name__)
class Wrap():
r"""Generate the binary model file (model.pb)
the model file can be use to run the NVNMD with lammps
the pair style need set as:
.. code-block:: lammps
pair_style nvnmd model.pb
pair_coeff * *
Parameters
----------
config_file
input file name
an .npy file containing the configuration information of NVNMD model
weight_file
input file name
an .npy file containing the weights of NVNMD model
map_file
input file name
an .npy file containing the mapping tables of NVNMD model
model_file
output file name
an .pb file containing the model using in the NVNMD
References
----------
DOI: 10.1038/s41524-022-00773-z
"""
def __init__(
self,
config_file: str,
weight_file: str,
map_file: str,
model_file: str
):
self.config_file = config_file
self.weight_file = weight_file
self.map_file = map_file
self.model_file = model_file
jdata = jdata_deepmd_input['nvnmd']
jdata['config_file'] = config_file
jdata['weight_file'] = weight_file
jdata['map_file'] = map_file
jdata['enable'] = True
nvnmd_cfg.init_from_jdata(jdata)
def wrap(self):
dscp = nvnmd_cfg.dscp
ctrl = nvnmd_cfg.ctrl
M1 = dscp['M1']
ntype = dscp['ntype']
ntype_max = dscp['ntype_max']
NSTDM_M1X = ctrl['NSTDM_M1X']
e = Encode()
bcfg = self.wrap_dscp()
bfps, bbps = self.wrap_fitn()
bfea, bgra = self.wrap_map()
# split data with {nbit} bits per row
hcfg = e.bin2hex(e.split_bin(bcfg, 72))
# the legnth of hcfg need to be the multiples of NSTDM_M1X
hcfg = e.extend_list(hcfg, int(np.ceil(len(hcfg) / NSTDM_M1X)) * NSTDM_M1X)
hfps = e.bin2hex(e.split_bin(bfps, 72))
# hfps = e.extend_list(hfps, (len(hfps) // ntype) * ntype_max)
hbps = e.bin2hex(e.split_bin(bbps, 72))
# hbps = e.extend_list(hbps, (len(hbps) // ntype) * ntype_max)
# split into multiple rows
bfea = e.split_bin(bfea, len(bfea[0]) // NSTDM_M1X)
# bfea = e.reverse_bin(bfea, NSTDM_M1X)
# extend the number of lines
hfea = e.bin2hex(bfea)
hfea = e.extend_list(hfea, (len(hfea) // ntype) * ntype_max)
# split into multiple rows
bgra = e.split_bin(bgra, len(bgra[0]) // NSTDM_M1X)
# bgra = e.reverse_bin(bgra, NSTDM_M1X)
# extend the number of lines
hgra = e.bin2hex(bgra)
hgra = e.extend_list(hgra, (len(hgra) // ntype) * ntype_max)
# extend data according to the number of bits per row of BRAM
nhex = 512
hcfg = e.extend_hex(hcfg, nhex)
hfps = e.extend_hex(hfps, nhex)
hbps = e.extend_hex(hbps, nhex)
hfea = e.extend_hex(hfea, nhex)
hgra = e.extend_hex(hgra, nhex)
# DEVELOP_DEBUG
if jdata_sys['debug']:
log.info("len(hcfg): %d" % (len(hcfg)))
log.info("len(hfps): %d" % (len(hfps)))
log.info("len(hbps): %d" % (len(hbps)))
log.info("len(hfea): %d" % (len(hfea)))
log.info("len(hgra): %d" % (len(hgra)))
#
FioTxt().save('nvnmd/wrap/hcfg.txt', hcfg)
FioTxt().save('nvnmd/wrap/hfps.txt', hfps)
FioTxt().save('nvnmd/wrap/hbps.txt', hbps)
FioTxt().save('nvnmd/wrap/hfea.txt', hfea)
FioTxt().save('nvnmd/wrap/hgra.txt', hgra)
#
NCFG = len(hcfg)
NNET = len(hfps)
NFEA = len(hfea)
nvnmd_cfg.nbit['NCFG'] = NCFG
nvnmd_cfg.nbit['NNET'] = NNET
nvnmd_cfg.nbit['NFEA'] = NFEA
nvnmd_cfg.save(nvnmd_cfg.config_file)
head = self.wrap_head(NCFG, NNET, NFEA)
#
hs = [] + head
hs.extend(hcfg)
hs.extend(hfps)
hs.extend(hbps)
hs.extend(hfea)
hs.extend(hgra)
FioBin().save(self.model_file, hs)
log.info("NVNMD: finish wrapping model file")
def wrap_head(self, NCFG, NNET, NFEA):
nbit = nvnmd_cfg.nbit
NBTI_MODEL_HEAD = nbit['NBTI_MODEL_HEAD']
NBIT_DATA_FL = nbit['NBIT_DATA_FL']
rcut = nvnmd_cfg.dscp['rcut']
bs = ''
e = Encode()
# nline
bs = e.dec2bin(NCFG, NBTI_MODEL_HEAD)[0] + bs
bs = e.dec2bin(NNET, NBTI_MODEL_HEAD)[0] + bs
bs = e.dec2bin(NFEA, NBTI_MODEL_HEAD)[0] + bs
# dscp
RCUT = e.qr(rcut, NBIT_DATA_FL)
bs = e.dec2bin(RCUT, NBTI_MODEL_HEAD)[0] + bs
# extend
hs = e.bin2hex(bs)
nhex = 512
hs = e.extend_hex(hs, nhex)
return hs
def wrap_dscp(self):
r"""Wrap the configuration of descriptor
"""
dscp = nvnmd_cfg.dscp
nbit = nvnmd_cfg.nbit
maps = nvnmd_cfg.map
NBIT_FEA_X = nbit['NBIT_FEA_X']
NBIT_FEA_X_FL = nbit['NBIT_FEA_X_FL']
NBIT_FEA_X2_FL = nbit['NBIT_FEA_X2_FL']
NBIT_FEA_FL = nbit['NBIT_FEA_FL']
NBIT_LST = nbit['NBIT_LST']
NBIT_SHIFT = nbit['NBIT_SHIFT']
bs = ''
e = Encode()
# sel
SEL = dscp['SEL']
bs = e.dec2bin(SEL[0], NBIT_LST)[0] + bs
bs = e.dec2bin(SEL[1], NBIT_LST)[0] + bs
bs = e.dec2bin(SEL[2], NBIT_LST)[0] + bs
bs = e.dec2bin(SEL[3], NBIT_LST)[0] + bs
#
NIX = dscp['NIX']
ln2_NIX = int(np.log2(NIX))
bs = e.dec2bin(ln2_NIX, NBIT_SHIFT)[0] + bs
# G*s
# ntypex = dscp['ntypex']
ntype = dscp['ntype']
# ntypex_max = dscp['ntypex_max']
ntype_max = dscp['ntype_max']
M1 = dscp['M1']
GSs = []
for tt in range(ntype_max):
for tt2 in range(ntype_max):
if (tt < ntype) and (tt2 < ntype):
s = maps[f's_t{0}_t{tt}'][0][0]
s = e.qf(s, NBIT_FEA_FL) / (2**NBIT_FEA_FL)
s_min = -2.0
yk, dyk = maps[f'G_t{0}_t{tt2}']
prec = 1 / (2 ** NBIT_FEA_X2_FL)
G = map_nvnmd(s - s_min, yk, dyk / prec, prec)
G = e.qf(G, NBIT_FEA_FL) / (2**NBIT_FEA_FL)
v = s * G
else:
v = np.zeros(M1)
for ii in range(M1):
GSs.extend(e.dec2bin(e.qr(v[ii], 2 * NBIT_FEA_FL), 27, True))
sGSs = ''.join(GSs[::-1])
bs = sGSs + bs
return bs
def wrap_fitn(self):
r"""Wrap the weights of fitting net
"""
dscp = nvnmd_cfg.dscp
fitn = nvnmd_cfg.fitn
weight = nvnmd_cfg.weight
nbit = nvnmd_cfg.nbit
ctrl = nvnmd_cfg.ctrl
ntype = dscp['ntype']
ntype_max = dscp['ntype_max']
nlayer_fit = fitn['nlayer_fit']
NNODE_FITS = fitn['NNODE_FITS']
NBIT_SUM = nbit['NBIT_SUM']
NBIT_DATA_FL = nbit['NBIT_DATA_FL']
NBIT_WEIGHT = nbit['NBIT_WEIGHT']
NBIT_WEIGHT_FL = nbit['NBIT_WEIGHT_FL']
NBIT_SPE = nbit['NBIT_SPE']
NSTDM = ctrl['NSTDM']
NSEL = ctrl['NSEL']
# encode all parameters
bb, bw = [], []
for ll in range(nlayer_fit):
bbt, bwt = [], []
for tt in range(ntype_max):
# get parameters: weight and bias
if (tt < ntype):
w, b = get_fitnet_weight(weight, tt, ll, nlayer_fit)
else:
w, b = get_fitnet_weight(weight, 0, ll, nlayer_fit)
w = w * 0
b = b * 0
# restrict the shift value of energy
if (ll == (nlayer_fit - 1)):
b = b * 0
bbi = self.wrap_bias(b, NBIT_SUM, NBIT_DATA_FL)
bwi = self.wrap_weight(w, NBIT_WEIGHT, NBIT_WEIGHT_FL)
bbt.append(bbi)
bwt.append(bwi)
bb.append(bbt)
bw.append(bwt)
#
bfps, bbps = [], []
for ss in range(NSEL):
tt = ss // NSTDM
sc = ss % NSTDM
sr = ss % NSTDM
bfp, bbp = '', ''
for ll in range(nlayer_fit):
nr = NNODE_FITS[ll]
nc = NNODE_FITS[ll + 1]
nrs = int(np.ceil(nr / NSTDM))
ncs = int(np.ceil(nc / NSTDM))
if (nc == 1):
# final layer
# fp #
bi = [bw[ll][tt][sr * nrs + rr][cc] for rr in range(nrs) for cc in range(nc)]
bi.reverse()
bfp = ''.join(bi) + bfp
#
bi = [bb[ll][tt][sc * ncs * 0 + cc] for cc in range(ncs)]
bi.reverse()
bfp = ''.join(bi) + bfp
# bp #
bi = [bw[ll][tt][sr * nrs + rr][cc] for rr in range(nrs) for cc in range(nc)]
bi.reverse()
bbp = ''.join(bi) + bbp
#
bi = [bb[ll][tt][sc * ncs * 0 + cc] for cc in range(ncs)]
bi.reverse()
bbp = ''.join(bi) + bbp
else:
# fp #
bi = [bw[ll][tt][rr][sc * ncs + cc] for cc in range(ncs) for rr in range(nr)]
bi.reverse()
bfp = ''.join(bi) + bfp
#
bi = [bb[ll][tt][sc * ncs + cc] for cc in range(ncs)]
bi.reverse()
bfp = ''.join(bi) + bfp
# bp #
bi = [bw[ll][tt][sr * nrs + rr][cc] for rr in range(nrs) for cc in range(nc)]
bi.reverse()
bbp = ''.join(bi) + bbp
#
bi = [bb[ll][tt][sc * ncs + cc] for cc in range(ncs)]
bi.reverse()
bbp = ''.join(bi) + bbp
bfps.append(bfp)
bbps.append(bbp)
return bfps, bbps
def wrap_bias(self, bias, NBIT_SUM, NBIT_DATA_FL):
e = Encode()
bias = e.qr(bias, NBIT_DATA_FL)
Bs = e.dec2bin(bias, NBIT_SUM, True)
return Bs
def wrap_weight(self, weight, NBIT_WEIGHT, NBIT_WEIGHT_FL):
sh = weight.shape
nr, nc = sh[0], sh[1]
e = Encode()
weight = e.qr(weight, NBIT_WEIGHT_FL)
Ws = e.dec2bin(weight, NBIT_WEIGHT, True)
Ws = [[Ws[nc * rr + cc] for cc in range(nc)] for rr in range(nr)]
return Ws
def wrap_map(self):
r"""Wrap the mapping table of embedding network
"""
dscp = nvnmd_cfg.dscp
maps = nvnmd_cfg.map
nbit = nvnmd_cfg.nbit
M1 = dscp['M1']
ntype = dscp['ntype']
NBIT_FEA = nbit['NBIT_FEA']
NBIT_FEA_FL = nbit['NBIT_FEA_FL']
keys = 's,sr,G'.split(',')
keys2 = 'ds_dr2,dsr_dr2,dG_ds'.split(',')
e = Encode()
datas = {}
datas2 = {}
idxs = [[0, tt] for tt in range(ntype)]
for ii in range(len(idxs)):
tt, tt2 = idxs[ii]
postfix = f'_t{tt}_t{tt2}'
for key in (keys + keys2):
if ii == 0:
datas[key] = []
datas2[key] = []
datas[key].append(maps[key + postfix][0]) # v
datas2[key].append(maps[key + postfix][1]) # dv
for key in (keys + keys2):
datas[key] = np.vstack(datas[key])
datas[key] = e.qr(datas[key], NBIT_FEA_FL)
datas2[key] = np.vstack(datas2[key])
datas2[key] = e.qr(datas2[key], NBIT_FEA_FL)
# fea
dat = [datas[key] for key in keys] + [datas2[key] for key in keys]
idx = np.int32(np.arange(0, int((M1 + 2) * 2)).reshape([2, -1]).transpose().reshape(-1))
dat = np.hstack(dat)
dat = dat[:, ::-1]
dat = dat[:, idx] # data consists of value and delta_value
bs = e.dec2bin(dat, NBIT_FEA, True, 'fea')
bs = e.merge_bin(bs, (M1 + 2) * 2)
bfea = bs
# gra
dat = [datas[key] for key in keys2] + [datas2[key] for key in keys2]
dat = np.hstack(dat)
dat = dat[:, ::-1]
dat = dat[:, idx]
bs = e.dec2bin(dat, NBIT_FEA, True, 'gra')
bs = e.merge_bin(bs, (M1 + 2) * 2)
bgra = bs
return bfea, bgra
def wrap(
*,
nvnmd_config: Optional[str] = 'nvnmd/config.npy',
nvnmd_weight: Optional[str] = 'nvnmd/weight.npy',
nvnmd_map: Optional[str] = 'nvnmd/map.npy',
nvnmd_model: Optional[str] = 'nvnmd/model.pb',
**kwargs
):
wrapObj = Wrap(nvnmd_config, nvnmd_weight, nvnmd_map, nvnmd_model)
wrapObj.wrap()
"""
nvnmd.fit
=========
Provides
1. continuous fitting network
2. quantized fitting network
"""
\ No newline at end of file
from deepmd.env import tf
from deepmd.env import GLOBAL_TF_FLOAT_PRECISION
from deepmd.nvnmd.utils.config import nvnmd_cfg
from deepmd.nvnmd.utils.network import one_layer as one_layer_nvnmd
from .argcheck import nvnmd_args
from .config import nvnmd_cfg
from .encode import Encode
from .fio import FioBin, FioDic, FioTxt
from .network import one_layer
from .op import map_nvnmd
from .weight import get_filter_weight, get_fitnet_weight
__all__ = [
"nvnmd_args",
"nvnmd_cfg",
"Encode",
"FioBin",
"FioDic",
"FioTxt",
"one_layer",
"map_nvnmd",
"get_filter_weight",
"get_fitnet_weight",
]
from dargs import Argument
def nvnmd_args():
doc_net_size_file = "configuration the number of nodes of fitting_net, just can be set as 128"
doc_map_file = "A file containing the mapping tables to replace the calculation of embedding nets"
doc_config_file = "A file containing the parameters about how to implement the model in certain hardware"
doc_weight_file = "a *.npy file containing the weights of the model"
doc_enable = "enable the nvnmd training"
doc_restore_descriptor = "enable to restore the parameter of embedding_net from weight.npy"
doc_restore_fitting_net = "enable to restore the parameter of fitting_net from weight.npy"
doc_quantize_descriptor = "enable the quantizatioin of descriptor"
doc_quantize_fitting_net = "enable the quantizatioin of fitting_net"
args = [
Argument("net_size", int, optional=False, default=128, doc=doc_net_size_file),
Argument("map_file", str, optional=False, default='none', doc=doc_map_file),
Argument("config_file", str, optional=False, default='none', doc=doc_config_file),
Argument("weight_file", str, optional=False, default='none', doc=doc_weight_file),
Argument("enable", bool, optional=False, default=False, doc=doc_enable),
Argument("restore_descriptor", bool, optional=False, default=False, doc=doc_restore_descriptor),
Argument("restore_fitting_net", bool, optional=False, default=False, doc=doc_restore_fitting_net),
Argument("quantize_descriptor", bool, optional=False, default=False, doc=doc_quantize_descriptor),
Argument("quantize_fitting_net", bool, optional=False, default=False, doc=doc_quantize_fitting_net),
]
doc_nvnmd = 'The nvnmd options.'
return Argument("nvnmd", dict, args, [], optional=True, doc = doc_nvnmd)
\ No newline at end of file
import numpy as np
import logging
from deepmd.nvnmd.data.data import jdata_config, jdata_configs, jdata_deepmd_input
from deepmd.nvnmd.data.data import NVNMD_WELCOME, NVNMD_CITATION
from deepmd.nvnmd.utils.fio import FioDic
log = logging.getLogger(__name__)
class NvnmdConfig():
r"""Configuration for NVNMD
record the message of model such as size, using nvnmd or not
Parameters
----------
jdata
a dictionary of input script
References
----------
DOI: 10.1038/s41524-022-00773-z
"""
def __init__(
self,
jdata: dict
):
self.map = {}
self.config = jdata_config
self.save_path = 'nvnmd/config.npy'
self.weight = {}
self.init_from_jdata(jdata)
def init_from_jdata(self, jdata: dict = {}):
r"""Initial this class with `jdata` loaded from input script
"""
if jdata == {}:
return None
self.net_size = jdata['net_size']
self.map_file = jdata['map_file']
self.config_file = jdata['config_file']
self.enable = jdata['enable']
self.weight_file = jdata['weight_file']
self.restore_descriptor = jdata['restore_descriptor']
self.restore_fitting_net = jdata['restore_fitting_net']
self.quantize_descriptor = jdata['quantize_descriptor']
self.quantize_fitting_net = jdata['quantize_fitting_net']
# load data
if self.enable:
self.map = FioDic().load(self.map_file, {})
self.weight = FioDic().load(self.weight_file, {})
jdata_config_ = jdata_config.copy()
jdata_config_['fitn']['neuron'][0] = self.net_size
load_config = FioDic().load(self.config_file, jdata_config_)
self.init_from_config(load_config)
# if load the file, set net_size
self.init_net_size()
def init_value(self):
r"""Initial member with dict
"""
self.dscp = self.config['dscp']
self.fitn = self.config['fitn']
self.size = self.config['size']
self.ctrl = self.config['ctrl']
self.nbit = self.config['nbit']
def init_train_mode(self, mod='cnn'):
r"""Configure for taining cnn or qnn
"""
if mod == 'cnn':
self.restore_descriptor = False
self.restore_fitting_net = False
self.quantize_descriptor = False
self.quantize_fitting_net = False
elif mod == 'qnn':
self.restore_descriptor = True
self.restore_fitting_net = True
self.quantize_descriptor = True
self.quantize_fitting_net = True
def init_from_config(self, jdata):
r"""Initial member element one by one
"""
self.config = FioDic().update(jdata, self.config)
self.config['dscp'] = self.init_dscp(self.config['dscp'], self.config)
self.config['fitn'] = self.init_fitn(self.config['fitn'], self.config)
self.config['size'] = self.init_size(self.config['size'], self.config)
self.config['ctrl'] = self.init_ctrl(self.config['ctrl'], self.config)
self.config['nbit'] = self.init_nbit(self.config['nbit'], self.config)
self.init_value()
def init_net_size(self):
r"""Initial net_size
"""
# self.net_size = self.fitn['neuron'][0]
self.net_size = self.config['fitn']['neuron'][0]
if self.enable:
key = str(self.net_size)
if key in jdata_configs.keys():
# log.info(f"NVNMD: configure the net_size is {key}")
self.init_from_config(jdata_configs[key])
else:
log.error("NVNMD: don't have the configure of net_size")
def init_from_deepmd_input(self, jdata):
r"""Initial members with input script of deepmd
"""
self.config['dscp'] = FioDic().update(jdata['descriptor'], self.config['dscp'])
self.config['fitn'] = FioDic().update(jdata['fitting_net'], self.config['fitn'])
self.config['dscp'] = self.init_dscp(self.config['dscp'], self.config)
self.config['fitn'] = self.init_fitn(self.config['fitn'], self.config)
#
self.init_net_size()
self.init_value()
def init_dscp(self, jdata: dict, jdata_parent: dict = {}) -> dict:
r"""Initial members about descriptor
"""
jdata['M1'] = jdata['neuron'][-1]
jdata['M2'] = jdata['axis_neuron']
jdata['NNODE_FEAS'] = [1] + jdata['neuron']
jdata['nlayer_fea'] = len(jdata['neuron'])
jdata['same_net'] = int(1) if jdata['type_one_side'] else int(0)
jdata['NIDP'] = int(np.sum(jdata['sel']))
jdata['NIX'] = 2 ** int(np.ceil(np.log2(jdata['NIDP'] / 1.5)))
jdata['SEL'] = (jdata['sel'] + [0, 0, 0, 0])[0:4]
jdata['ntype'] = len(jdata['sel'])
jdata['ntypex'] = 1 if(jdata['same_net']) else jdata['ntype']
return jdata
def init_fitn(self, jdata: dict, jdata_parent: dict = {}) -> dict:
r"""Initial members about fitting network
"""
M1 = jdata_parent['dscp']['M1']
M2 = jdata_parent['dscp']['M2']
jdata['NNODE_FITS'] = [int(M1 * M2)] + jdata['neuron'] + [1]
jdata['nlayer_fit'] = len(jdata['neuron']) + 1
jdata['NLAYER'] = jdata['nlayer_fit']
return jdata
def init_size(self, jdata: dict, jdata_parent: dict = {}) -> dict:
r"""Initial members about ram capacity
"""
jdata['Na'] = jdata['NSPU']
jdata['NaX'] = jdata['MSPU']
return jdata
def init_ctrl(self, jdata: dict, jdata_parent: dict = {}) -> dict:
r"""Initial members about control signal
"""
ntype_max = jdata_parent['dscp']['ntype_max']
jdata['NSADV'] = jdata['NSTDM'] + 1
jdata['NSEL'] = jdata['NSTDM'] * ntype_max
if (32 % jdata['NSTDM_M1X'] > 0):
log.warning("NVNMD: NSTDM_M1X must be divisor of 32 for the right runing in data_merge module")
return jdata
def init_nbit(self, jdata: dict, jdata_parent: dict = {}) -> dict:
r"""Initial members about quantification precision
"""
Na = jdata_parent['size']['Na']
NaX = jdata_parent['size']['NaX']
jdata['NBIT_CRD'] = jdata['NBIT_DATA'] * 3
jdata['NBIT_LST'] = int(np.ceil(np.log2(NaX)))
jdata['NBIT_ATOM'] = jdata['NBIT_SPE'] + jdata['NBIT_CRD']
jdata['NBIT_LONG_ATOM'] = jdata['NBIT_SPE'] + jdata['NBIT_LONG_DATA'] * 3
jdata['NBIT_RIJ'] = jdata['NBIT_DATA_FL'] + 5
jdata['NBIT_SUM'] = jdata['NBIT_DATA_FL'] + 8
jdata['NBIT_DATA2'] = jdata['NBIT_DATA'] + jdata['NBIT_DATA_FL']
jdata['NBIT_DATA2_FL'] = 2 * jdata['NBIT_DATA_FL']
jdata['NBIT_DATA_FEA'] = jdata['NBIT_DATA'] + jdata['NBIT_FEA_FL']
jdata['NBIT_DATA_FEA_FL'] = jdata['NBIT_DATA_FL'] + jdata['NBIT_FEA_FL']
jdata['NBIT_FORCE_FL'] = 2 * jdata['NBIT_DATA_FL'] - 1
return jdata
def save(self, file_name=None):
r"""Save all configuration to file
"""
if file_name is None:
file_name = self.save_path
else:
self.save_path = file_name
FioDic().save(file_name, self.config)
def get_dscp_jdata(self):
r"""Generate `model/descriptor` in input script
"""
dscp = self.dscp
jdata = jdata_deepmd_input['model']['descriptor']
jdata['sel'] = dscp['sel']
jdata['rcut'] = dscp['rcut']
jdata['rcut_smth'] = dscp['rcut_smth']
jdata['neuron'] = dscp['neuron']
jdata['type_one_side'] = dscp['type_one_side']
jdata['axis_neuron'] = dscp['axis_neuron']
return jdata
def get_fitn_jdata(self):
r"""Generate `model/fitting_net` in input script
"""
fitn = self.fitn
jdata = jdata_deepmd_input['model']['fitting_net']
jdata['neuron'] = fitn['neuron']
return jdata
def get_model_jdata(self):
r"""Generate `model` in input script
"""
jdata = jdata_deepmd_input['model']
jdata['descriptor'] = self.get_dscp_jdata()
jdata['fitting_net'] = self.get_fitn_jdata()
return jdata
def get_nvnmd_jdata(self):
r"""Generate `nvnmd` in input script
"""
jdata = jdata_deepmd_input['nvnmd']
jdata['net_size'] = self.net_size
jdata['config_file'] = self.config_file
jdata['weight_file'] = self.weight_file
jdata['map_file'] = self.map_file
jdata['enable'] = self.enable
jdata['restore_descriptor'] = self.restore_descriptor
jdata['restore_fitting_net'] = self.restore_fitting_net
jdata['quantize_descriptor'] = self.quantize_descriptor
jdata['quantize_fitting_net'] = self.quantize_fitting_net
return jdata
def get_learning_rate_jdata(self):
r"""Generate `learning_rate` in input script
"""
return jdata_deepmd_input['learning_rate']
def get_loss_jdata(self):
r"""Generate `loss` in input script
"""
return jdata_deepmd_input['loss']
def get_training_jdata(self):
r"""Generate `training` in input script
"""
return jdata_deepmd_input['training']
def get_deepmd_jdata(self):
r"""Generate input script with member element one by one
"""
jdata = jdata_deepmd_input.copy()
jdata['model'] = self.get_model_jdata()
jdata['nvnmd'] = self.get_nvnmd_jdata()
jdata['learning_rate'] = self.get_learning_rate_jdata()
jdata['loss'] = self.get_loss_jdata()
jdata['training'] = self.get_training_jdata()
return jdata
def disp_message(self):
r"""Display the log of NVNMD
"""
NVNMD_CONFIG = (
f"enable: {self.enable}",
f"net_size: {self.net_size}",
f"map_file: {self.map_file}",
f"config_file: {self.config_file}",
f"weight_file: {self.weight_file}",
f"restore_descriptor: {self.restore_descriptor}",
f"restore_fitting_net: {self.restore_fitting_net}",
f"quantize_descriptor: {self.quantize_descriptor}",
f"quantize_fitting_net: {self.quantize_fitting_net}",
)
for message in NVNMD_WELCOME + NVNMD_CITATION + NVNMD_CONFIG:
log.info(message)
# global configuration for nvnmd
nvnmd_cfg = NvnmdConfig(jdata_deepmd_input['nvnmd'])
import numpy as np
import logging
from deepmd.nvnmd.data.data import jdata_sys
log = logging.getLogger(__name__)
class Encode():
r"""Encoding value as hex, bin, and dec format
"""
def __init__(self):
pass
def qr(self, v, nbit: int = 14):
r"""Quantize value using round
"""
return np.round(v * (2**nbit))
def qf(self, v, nbit: int = 14):
r"""Quantize value using floor
"""
return np.floor(v * (2**nbit))
def qc(self, v, nbit: int = 14):
r"""Quantize value using ceil
"""
return np.ceil(v * (2**nbit))
def check_dec(self, idec, nbit, signed=False, name=''):
r"""Check whether the data (idec) is in the range
range is :math:`[0, 2^nbit-1]` for unsigned
range is :math:`[-2^{nbit-1}, 2^{nbit-1}-1]` for signed
"""
prec = np.int64(2**nbit)
if signed:
pmax = prec // 2 - 1
pmin = -pmax
else:
pmax = prec - 1
pmin = 0
I1 = idec < pmin
I2 = idec > pmax
if jdata_sys['debug']:
if np.sum(I1) > 0:
log.warning(f"NVNMD: there are data {name} smaller than the lower limit {pmin}")
if np.sum(I2) > 0:
log.warning(f"NVNMD: there are data {name} bigger than the upper limit {pmax}")
def extend_list(self, slbin, nfull):
r"""Extend the list (slbin) to the length (nfull)
the attched element of list is 0
such as, when
| slbin = ['10010','10100'],
| nfull = 4
extent it to
['10010','10100','00000','00000]
"""
nfull = int(nfull)
n = len(slbin)
dn = nfull - n
ds = '0' * len(slbin[0])
return slbin + [ds for ii in range(dn)]
def extend_bin(self, slbin, nfull):
r"""Extend the element of list (slbin) to the length (nfull)
such as, when
| slbin = ['10010','10100'],
| nfull = 6
extent to
['010010','010100']
"""
nfull = int(nfull)
n = len(slbin[0])
dn = nfull - n
ds = '0' * int(dn)
return [ds + s for s in slbin]
def extend_hex(self, slhex, nfull):
r"""Extend the element of list (slhex) to the length (nfull)
"""
nfull = int(nfull)
n = len(slhex[0])
dn = (nfull // 4) - n
ds = '0' * int(dn)
return [ds + s for s in slhex]
def split_bin(self, sbin, nbit: int):
r"""Split sbin into many segment with the length nbit
"""
if isinstance(sbin, list):
sl = []
for s in sbin:
sl.extend(self.split_bin(s, nbit))
return sl
else:
n = len(sbin)
nseg = int(np.ceil(n / nbit))
s = '0' * int(nseg * nbit - n)
sbin = s + sbin
sl = [sbin[ii * nbit:(ii + 1) * nbit] for ii in range(nseg)]
sl = sl[::-1]
return sl
def reverse_bin(self, slbin, nreverse):
r"""Reverse binary string list per `nreverse` value
"""
nreverse = int(nreverse)
# consider that {len(slbin)} can not be divided by {nreverse} without remainder
n = int(np.ceil(len(slbin) / nreverse))
slbin = self.extend_list(slbin, n * nreverse)
return [slbin[ii * nreverse + nreverse - 1 - jj] for ii in range(n) for jj in range(nreverse)]
def merge_bin(self, slbin, nmerge):
r"""Merge binary string list per `nmerge` value
"""
nmerge = int(nmerge)
# consider that {len(slbin)} can not be divided by {nmerge} without remainder
n = int(np.ceil(len(slbin) / nmerge))
slbin = self.extend_list(slbin, n * nmerge)
return [''.join(slbin[nmerge * ii: nmerge * (ii + 1)]) for ii in range(n)]
def dec2bin(self, idec, nbit=10, signed=False, name=''):
r"""Convert dec array to binary string list
"""
idec = np.int64(np.reshape(np.array(idec), [-1]))
self.check_dec(idec, nbit, signed, name)
prec = np.int64(2**nbit)
if signed:
pmax = prec // 2 - 1
pmin = -pmax
else:
pmax = prec - 1
pmin = 0
idec = np.maximum(pmin, idec)
idec = np.minimum(pmax, idec)
idec = idec + 2 * prec
sl = []
n = len(idec)
for ii in range(n):
s = bin(idec[ii])
s = s[-nbit:]
sl.append(s)
return sl
def hex2bin_str(self, shex):
r"""Convert hex string to binary string
"""
n = len(shex)
sl = []
for ii in range(n):
si = bin(int(shex[ii], 16) + 16)
sl.append(si[-4:])
return ''.join(sl)
def hex2bin(self, data):
r"""Convert hex string list to binary string list
"""
data = np.reshape(np.array(data), [-1])
return [self.hex2bin_str(d) for d in data]
def bin2hex_str(self, sbin):
r"""Convert binary string to hex string
"""
n = len(sbin)
nx = int(np.ceil(n / 4))
sbin = ('0' * (nx * 4 - n)) + sbin
sl = []
for ii in range(nx):
si = hex(int(sbin[4 * ii: 4 * (ii + 1)], 2) + 16)
sl.append(si[-1])
return ''.join(sl)
def bin2hex(self, data):
r"""Convert binary string list to hex string list
"""
data = np.reshape(np.array(data), [-1])
return [self.bin2hex_str(d) for d in data]
import os
import numpy as np
import json
import struct
import logging
log = logging.getLogger(__name__)
class Fio:
r"""Basic class for FIO
"""
def __init__(self):
pass
def exits(self, file_name=''):
if file_name == '':
return True
return os.path.exists(file_name)
def mkdir(self, path_name=''):
if not self.exits(path_name):
os.makedirs(path_name)
def create_file_path(self, file_name=''):
pars = file_name.split('/')
if len(pars) > 0:
path_name = '/'.join(pars[:-1])
self.mkdir(path_name)
def is_path(self, path):
return self.exits(path) and os.path.isdir(path)
def is_file(self, file_name):
return self.exits(file_name) and os.path.isfile(file_name)
def get_file_list(self, path) -> list:
if self.is_file(path):
return []
if self.is_path:
listdir = os.listdir(path)
file_lst = []
for name in listdir:
if self.is_file(os.path.join(path, name)):
file_lst.append(os.path.join(path, name))
else:
file_lst_ = self.get_file_list(os.path.join(path, name))
file_lst.extend(file_lst_)
return file_lst
return []
class FioDic:
r"""Input and output for dict class data
the file can be .json or .npy file containing a dictionary
"""
def __init__(self) -> None:
pass
def load(self, file_name='', default_value={}):
if file_name.endswith('.json'):
return FioJsonDic().load(file_name, default_value)
elif file_name.endswith('.npy'):
return FioNpyDic().load(file_name, default_value)
else:
return FioNpyDic().load(file_name, default_value)
def save(self, file_name='', dic={}):
if file_name.endswith('.json'):
FioJsonDic().save(file_name, dic)
elif file_name.endswith('.npy'):
FioNpyDic().save(file_name, dic)
else:
FioNpyDic().save(file_name, dic)
def get(self, jdata, key, default_value):
if key in jdata.keys():
return jdata[key]
else:
return default_value
def update(self, jdata, jdata_o):
r"""Update key-value pair is key in jdata_o.keys()
Parameter
=========
jdata
new jdata
jdata_o
origin jdata
"""
for key in jdata.keys():
if key in jdata_o.keys():
if isinstance(jdata_o[key], dict):
jdata_o[key] = self.update(jdata[key], jdata_o[key])
else:
jdata_o[key] = jdata[key]
return jdata_o
class FioNpyDic:
r"""Input and output for .npy file containing dictionary
"""
def __init__(self):
pass
def load(self, file_name='', default_value={}):
if Fio().exits(file_name):
log.info(f"load {file_name}")
dat = np.load(file_name, allow_pickle=True)[0]
return dat
else:
log.warning(f"can not find {file_name}")
return default_value
def save(self, file_name='', dic={}):
Fio().create_file_path(file_name)
np.save(file_name, [dic])
class FioJsonDic:
r"""Input and output for .json file containing dictionary
"""
def __init__(self):
pass
def load(self, file_name='', default_value={}):
r"""Load .json file into dict
"""
if Fio().exits(file_name):
log.info(f"load {file_name}")
with open(file_name, 'r') as fr:
jdata = fr.read()
dat = json.loads(jdata)
return dat
else:
log.warning(f"can not find {file_name}")
return default_value
def save(self, file_name='', dic={}):
r"""Save dict into .json file
"""
log.info(f"write jdata to {file_name}")
Fio().create_file_path(file_name)
with open(file_name, 'w') as fw:
json.dump(dic, fw, indent=4)
class FioBin():
r"""Input and output for binary file
"""
def __init__(self):
pass
def load(self, file_name='', default_value=''):
r"""Load binary file into bytes value
"""
if Fio().exits(file_name):
log.info(f"load {file_name}")
dat = ""
with open(file_name, 'rb') as fr:
dat = fr.read()
return dat
else:
log.warning(f"can not find {file_name}")
return default_value
def save(self, file_name: str = '', data: str = ''):
r"""Save hex string into binary file
"""
log.info(f"write binary to {file_name}")
Fio().create_file_path(file_name)
with open(file_name, 'wb') as fp:
for si in data:
# one byte consists of two hex chars
for ii in range(len(si) // 2):
v = int(si[2 * ii: 2 * (ii + 1)], 16)
v = struct.pack('B', v)
fp.write(v)
class FioTxt():
r"""Input and output for .txt file with string
"""
def __init__(self):
pass
def load(self, file_name='', default_value=[]):
r"""Load .txt file into string list
"""
if Fio().exits(file_name):
log.info(f"load {file_name}")
with open(file_name, 'r', encoding='utf-8') as fr:
dat = fr.readlines()
dat = [d.replace('\n', '') for d in dat]
return dat
else:
log.info(f"can not find {file_name}")
return default_value
def save(self, file_name: str = '', data: list = []):
r"""Save string list into .txt file
"""
log.info(f"write string to txt file {file_name}")
Fio().create_file_path(file_name)
if isinstance(data, str):
data = [data]
data = [d + '\n' for d in data]
with open(file_name, 'w') as fw:
fw.writelines(data)
import numpy as np
from deepmd.env import tf
from deepmd.env import GLOBAL_TF_FLOAT_PRECISION
from deepmd.env import op_module
from deepmd.nvnmd.utils.config import nvnmd_cfg
from deepmd.nvnmd.utils.weight import get_constant_initializer
from deepmd.utils.network import variable_summaries
def get_sess():
init_op = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init_op)
return sess
def matmul2_qq(a, b, nbit):
r"""Quantized matmul operation for 2d tensor.
a and b is input tensor, nbit represent quantification precision
"""
sh_a = a.get_shape().as_list()
sh_b = b.get_shape().as_list()
a = tf.reshape(a, [-1, 1, sh_a[1]])
b = tf.reshape(tf.transpose(b), [1, sh_b[1], sh_b[0]])
y = a * b
y = qf(y, nbit)
y = tf.reduce_sum(y, axis=2)
return y
def matmul3_qq(a, b, nbit):
r"""Quantized matmul operation for 3d tensor.
a and b is input tensor, nbit represent quantification precision
"""
sh_a = a.get_shape().as_list()
sh_b = b.get_shape().as_list()
a = tf.reshape(a, [-1, sh_a[1], 1, sh_a[2]])
b = tf.reshape(tf.transpose(b, [0, 2, 1]), [-1, 1, sh_b[2], sh_b[1]])
y = a * b
if nbit == -1:
y = y
else:
y = qf(y, nbit)
y = tf.reduce_sum(y, axis=3)
return y
def qf(x, nbit):
r"""Quantize and floor tensor `x` with quantification precision `nbit`.
"""
prec = 2**nbit
y = tf.floor(x * prec) / prec
y = x + tf.stop_gradient(y - x)
return y
def qr(x, nbit):
r"""Quantize and round tensor `x` with quantification precision `nbit`.
"""
prec = 2**nbit
y = tf.round(x * prec) / prec
y = x + tf.stop_gradient(y - x)
return y
# fitting_net
def tanh2(x, nbit=-1, nbit2=-1):
r"""User-defined activation function tanh2
Parameter
---------
x
input tensor
nbit
quantification precision for forward calculation
nbit2
quantification precision for backward calculation
"""
y = op_module.tanh2_nvnmd(x, 0, nbit, nbit2, -1)
return y
def tanh4(x, nbit=-1, nbit2=-1):
r"""User-defined activation function tanh4
Parameter
---------
x
input tensor
nbit
quantification precision for forward calculation
nbit2
quantification precision for backward calculation
"""
y = op_module.tanh4_nvnmd(x, 0, nbit, nbit2, -1)
return y
def one_layer_wb(
shape,
outputs_size,
bavg,
stddev,
precision,
trainable,
initial_variables,
seed,
uniform_seed,
name
):
if nvnmd_cfg.restore_fitting_net:
# initializer
w_initializer = get_constant_initializer(nvnmd_cfg.weight, 'matrix')
b_initializer = get_constant_initializer(nvnmd_cfg.weight, 'bias')
else:
w_initializer = tf.random_normal_initializer(
stddev=stddev / np.sqrt(shape[1] + outputs_size),
seed=seed if (seed is None or uniform_seed) else seed + 0)
b_initializer = tf.random_normal_initializer(
stddev=stddev,
mean=bavg,
seed=seed if (seed is None or uniform_seed) else seed + 1)
if initial_variables is not None:
w_initializer = tf.constant_initializer(initial_variables[name + '/matrix'])
b_initializer = tf.constant_initializer(initial_variables[name + '/bias'])
# variable
w = tf.get_variable('matrix',
[shape[1], outputs_size],
precision,
w_initializer,
trainable=trainable)
variable_summaries(w, 'matrix')
b = tf.get_variable('bias',
[outputs_size],
precision,
b_initializer,
trainable=trainable)
variable_summaries(b, 'bias')
return w, b
def one_layer(inputs,
outputs_size,
activation_fn=tf.nn.tanh,
precision=GLOBAL_TF_FLOAT_PRECISION,
stddev=1.0,
bavg=0.0,
name='linear',
reuse=None,
seed=None,
use_timestep=False,
trainable=True,
useBN=False,
uniform_seed=False,
initial_variables=None,
mixed_prec=None,
final_layer=False):
r"""Build one layer with continuous or quantized value.
Its weight and bias can be initialed with random or constant value.
"""
if activation_fn is not None:
activation_fn = tanh4
with tf.variable_scope(name, reuse=reuse):
shape = inputs.get_shape().as_list()
w, b = one_layer_wb(shape, outputs_size, bavg, stddev, precision, trainable, initial_variables, seed, uniform_seed, name)
if nvnmd_cfg.quantize_fitting_net:
NBIT_DATA_FL = nvnmd_cfg.nbit['NBIT_DATA_FL']
NBIT_WEIGHT_FL = nvnmd_cfg.nbit['NBIT_WEIGHT_FL']
#
inputs = qf(inputs, NBIT_DATA_FL)
w = qr(w, NBIT_WEIGHT_FL)
with tf.variable_scope('wx', reuse=reuse):
wx = op_module.matmul_nvnmd(inputs, w, 0, NBIT_DATA_FL, NBIT_DATA_FL, -1)
#
b = qr(b, NBIT_DATA_FL)
with tf.variable_scope('wxb', reuse=reuse):
hidden = wx + b
#
with tf.variable_scope('actfun', reuse=reuse):
if activation_fn is not None:
y = activation_fn(hidden, NBIT_DATA_FL, NBIT_DATA_FL)
else:
y = hidden
else:
hidden = tf.matmul(inputs, w) + b
y = activation_fn(hidden, -1, -1) if (activation_fn is not None) else hidden
# 'reshape' is necessary
# the next layer needs shape of input tensor to build weight
y = tf.reshape(y, [-1, outputs_size])
return y
import numpy as np
def map_nvnmd(x, map_y, map_dy, prec, nbit=None):
r"""Mapping function implemented by numpy
"""
xk = int(np.floor(x / prec))
dx = x - xk * prec
y = map_y[xk] + map_dy[xk] * dx
return y
import numpy as np
import logging
from deepmd.env import tf
log = logging.getLogger(__name__)
def get_weight(weights, key):
r"""Get weight value according to key
"""
if key in weights.keys():
return weights[key]
else:
log.warning(f"There is not {key} in weights.")
return None
def get_normalize(weights: dict):
r"""Get normalize parameter (avg and std) of :math:`s_{ji}`
"""
key = "descrpt_attr.t_avg"
avg = get_weight(weights, key)
key = "descrpt_attr.t_std"
std = get_weight(weights, key)
return avg, std
def get_rng_s(weights: dict):
r"""Guess the range of :math:`s_{ji}`
"""
avg, std = get_normalize(weights)
smin = np.min(-avg[:, 0] / std[:, 0])
smax = np.max(2.0 / std[:, 0])
return smin, smax
def get_filter_weight(weights: dict, spe_i: int, spe_j: int, layer_l: int):
r"""Get weight and bias of embedding network
Parameters
----------
spe_i(int)
special order of central atom i
0~ntype-1
spe_j(int)
special order of neighbor atom j
0~ntype-1
layer_l
layer order in embedding network
1~nlayer
"""
# key = f"filter_type_{spe_i}.matrix_{layer_l}_{spe_j}" # type_one_side = false
key = f"filter_type_all.matrix_{layer_l}_{spe_j}" # type_one_side = true
weight = get_weight(weights, key)
# key = f"filter_type_{spe_i}.bias_{layer_l}_{spe_j}" # type_one_side = false
key = f"filter_type_all.bias_{layer_l}_{spe_j}" # type_one_side = true
bias = get_weight(weights, key)
return weight, bias
def get_fitnet_weight(weights: dict, spe_i: int, layer_l: int, nlayer: int = 10):
r"""Get weight and bias of fitting network
Parameters
----------
spe_i(int)
special order of central atom i
0~ntype-1
layer_l(int)
layer order in embedding network
0~nlayer-1
"""
if layer_l == nlayer - 1:
key = f"final_layer_type_{spe_i}.matrix"
weight = get_weight(weights, key)
key = f"final_layer_type_{spe_i}.bias"
bias = get_weight(weights, key)
else:
key = f"layer_{layer_l}_type_{spe_i}.matrix"
weight = get_weight(weights, key)
key = f"layer_{layer_l}_type_{spe_i}.bias"
bias = get_weight(weights, key)
return weight, bias
def get_constant_initializer(weights, name):
r"""Get initial value by name and create a initializer
"""
scope = tf.get_variable_scope().name
name = scope + '.' + name
value = get_weight(weights, name)
return tf.constant_initializer(value)
"""This module will house cust Tf OPs after CMake installation."""
from pathlib import Path
import importlib
import logging
NOT_LOADABLE = ("__init__.py",)
PACKAGE_BASE = "deepmd.op"
log = logging.getLogger(__name__)
def import_ops():
"""Import all custom TF ops that are present in this submodule.
Notes
-----
Initialy this subdir is unpopulated. CMake will install all the op module python
files and shared libs.
"""
for module_file in Path(__file__).parent.glob("*.py"):
if module_file.name not in NOT_LOADABLE:
module_name = f".{module_file.stem}"
log.debug(f"importing op module: {module_name}")
importlib.import_module(module_name, PACKAGE_BASE)
import_ops()
#!/usr/bin/env python3
"""
First-order derivatives and second-order derivatives for gelu function.
"""
import tensorflow
from tensorflow.python.framework import ops
from deepmd.env import op_module
try:
gelu = tensorflow.nn.gelu
except AttributeError:
@ops.RegisterGradient("Gelu")
def _gelu_cc (op, dy) :
return op_module.gelu_grad_custom(dy, op.inputs[0])
@ops.RegisterGradient("GeluGrad")
def _gelu_grad_cc (op, dy) :
return [op_module.gelu_grad_custom(dy, op.inputs[1]), op_module.gelu_grad_grad_custom(dy, op.inputs[0], op.inputs[1])]
@ops.RegisterGradient("GeluCustom")
def _gelu_custom_cc (op, dy):
return op_module.gelu_grad_custom(dy, op.inputs[0])
@ops.RegisterGradient("GeluGradCustom")
def _gelu_grad_custom_cc (op, dy) :
return [op_module.gelu_grad_custom(dy, op.inputs[1]), op_module.gelu_grad_grad_custom(dy, op.inputs[0], op.inputs[1])]
\ No newline at end of file
#!/usr/bin/env python3
from tensorflow.python.framework import ops
from deepmd.env import op_module
from deepmd.env import tf
@ops.RegisterGradient("MapNvnmd")
def _MapNvnmdGrad(op, grad):
x = op.inputs[0]
v = op.inputs[1]
dv = op.inputs[2]
grad_v = op.inputs[3]
grad_dv = op.inputs[4]
prec = op.get_attr("prec")
nbit = op.get_attr("nbit")
y = op.outputs[0]
dydx = op_module.map_nvnmd(x, grad_v, grad_dv, tf.zeros_like(v), tf.zeros_like(dv), prec, nbit)
dydx = op_module.quantize_nvnmd(dydx, 0, nbit, -1, -1)
dx = tf.reshape(tf.reduce_sum(dydx * grad, axis=1), [-1, 1])
d_v = None
d_dv = None
d_grad_v = None
d_grad_dv = None
return [dx, d_v, d_dv, d_grad_v, d_grad_dv]
#!/usr/bin/env python3
from tensorflow.python.framework import ops
from deepmd.env import op_module
from deepmd.env import tf
@ops.RegisterGradient("MatmulNvnmd")
def _MatmulNvnmdGrad(op, grad):
x = op.inputs[0]
w = op.inputs[1]
isround = op.get_attr("isround")
nbit1 = op.get_attr("nbit1")
nbit2 = op.get_attr("nbit2")
nbit3 = op.get_attr("nbit3")
dx = op_module.matmul_nvnmd(grad, tf.transpose(w), isround, nbit2, nbit3, nbit1)
dw = op_module.matmul_nvnmd(tf.transpose(x), grad, isround, nbit2, nbit3, nbit1)
return [dx, dw]
#!/usr/bin/env python3
"""
Gradients for prod force.
"""
from tensorflow.python.framework import ops
from deepmd.env import op_grads_module
@ops.RegisterGradient("ProdForce")
def _prod_force_grad_cc (op, grad):
net_grad = op_grads_module.prod_force_grad (grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
op.inputs[3],
op.inputs[4],
n_a_sel = op.get_attr("n_a_sel"),
n_r_sel = op.get_attr("n_r_sel"))
return [net_grad, None, None, None, None]
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment