Commit f30dad25 authored by zhangwenbo's avatar zhangwenbo 🎨
Browse files

Initial commit

parents
"""
@author: Maziar Raissi
"""
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np
import scipy.io
import time
import sys
from utilities import neural_net, Navier_Stokes_2D, Gradient_Velocity_2D, \
tf_session, mean_squared_error, relative_error
class HFM(object):
# notational conventions
# _tf: placeholders for input/output data and points used to regress the equations
# _pred: output of neural network
# _eqns: points used to regress the equations
# _data: input-output data
# _inlet: input-output data at the inlet
# _star: preditions
def __init__(self, t_data, x_data, y_data, c_data,
t_eqns, x_eqns, y_eqns,
t_inlet, x_inlet, y_inlet, u_inlet, v_inlet,
layers, batch_size):
# specs
self.layers = layers
self.batch_size = batch_size
# flow properties
self.Pec = tf.Variable(100.0, dtype=tf.float32, trainable = True)
self.Rey = tf.Variable(100.0, dtype=tf.float32, trainable = True)
# data
[self.t_data, self.x_data, self.y_data, self.c_data] = [t_data, x_data, y_data, c_data]
[self.t_eqns, self.x_eqns, self.y_eqns] = [t_eqns, x_eqns, y_eqns]
[self.t_inlet, self.x_inlet, self.y_inlet, self.u_inlet, self.v_inlet] = [t_inlet, x_inlet, y_inlet, u_inlet, v_inlet]
# placeholders
[self.t_data_tf, self.x_data_tf, self.y_data_tf, self.c_data_tf] = [tf.placeholder(tf.float32, shape=[None, 1]) for _ in range(4)]
[self.t_eqns_tf, self.x_eqns_tf, self.y_eqns_tf] = [tf.placeholder(tf.float32, shape=[None, 1]) for _ in range(3)]
[self.t_inlet_tf, self.x_inlet_tf, self.y_inlet_tf, self.u_inlet_tf, self.v_inlet_tf] = [tf.placeholder(tf.float32, shape=[None, 1]) for _ in range(5)]
# physics "uninformed" neural networks
self.net_cuvp = neural_net(self.t_data, self.x_data, self.y_data, layers = self.layers)
[self.c_data_pred,
self.u_data_pred,
self.v_data_pred,
self.p_data_pred] = self.net_cuvp(self.t_data_tf,
self.x_data_tf,
self.y_data_tf)
# physics "uninformed" neural networks (data at the inlet)
[_,
self.u_inlet_pred,
self.v_inlet_pred,
_] = self.net_cuvp(self.t_inlet_tf,
self.x_inlet_tf,
self.y_inlet_tf)
# physics "informed" neural networks
[self.c_eqns_pred,
self.u_eqns_pred,
self.v_eqns_pred,
self.p_eqns_pred] = self.net_cuvp(self.t_eqns_tf,
self.x_eqns_tf,
self.y_eqns_tf)
[self.e1_eqns_pred,
self.e2_eqns_pred,
self.e3_eqns_pred,
self.e4_eqns_pred] = Navier_Stokes_2D(self.c_eqns_pred,
self.u_eqns_pred,
self.v_eqns_pred,
self.p_eqns_pred,
self.t_eqns_tf,
self.x_eqns_tf,
self.y_eqns_tf,
self.Pec,
self.Rey)
# gradients required for the lift and drag forces
[self.u_x_eqns_pred,
self.v_x_eqns_pred,
self.u_y_eqns_pred,
self.v_y_eqns_pred] = Gradient_Velocity_2D(self.u_eqns_pred,
self.v_eqns_pred,
self.x_eqns_tf,
self.y_eqns_tf)
# loss
self.loss = mean_squared_error(self.c_data_pred, self.c_data_tf) + \
mean_squared_error(self.u_inlet_pred, self.u_inlet_tf) + \
mean_squared_error(self.v_inlet_pred, self.v_inlet_tf) + \
mean_squared_error(self.e1_eqns_pred, 0.0) + \
mean_squared_error(self.e2_eqns_pred, 0.0) + \
mean_squared_error(self.e3_eqns_pred, 0.0) + \
mean_squared_error(self.e4_eqns_pred, 0.0)
# optimizers
self.learning_rate = tf.placeholder(tf.float32, shape=[])
self.optimizer = tf.train.AdamOptimizer(learning_rate = self.learning_rate)
self.train_op = self.optimizer.minimize(self.loss)
self.sess = tf_session()
def train(self, total_time, learning_rate):
N_data = self.t_data.shape[0]
N_eqns = self.t_eqns.shape[0]
start_time = time.time()
running_time = 0
it = 0
while running_time < total_time:
idx_data = np.random.choice(N_data, min(self.batch_size, N_data))
idx_eqns = np.random.choice(N_eqns, self.batch_size)
(t_data_batch,
x_data_batch,
y_data_batch,
c_data_batch) = (self.t_data[idx_data,:],
self.x_data[idx_data,:],
self.y_data[idx_data,:],
self.c_data[idx_data,:])
(t_eqns_batch,
x_eqns_batch,
y_eqns_batch) = (self.t_eqns[idx_eqns,:],
self.x_eqns[idx_eqns,:],
self.y_eqns[idx_eqns,:])
tf_dict = {self.t_data_tf: t_data_batch,
self.x_data_tf: x_data_batch,
self.y_data_tf: y_data_batch,
self.c_data_tf: c_data_batch,
self.t_eqns_tf: t_eqns_batch,
self.x_eqns_tf: x_eqns_batch,
self.y_eqns_tf: y_eqns_batch,
self.t_inlet_tf: self.t_inlet,
self.x_inlet_tf: self.x_inlet,
self.y_inlet_tf: self.y_inlet,
self.u_inlet_tf: self.u_inlet,
self.v_inlet_tf: self.v_inlet,
self.learning_rate: learning_rate}
self.sess.run([self.train_op], tf_dict)
# Print
if it % 10 == 0:
elapsed = time.time() - start_time
running_time += elapsed/3600.0
[loss_value,
Pec_value,
Rey_value,
learning_rate_value] = self.sess.run([self.loss,
self.Pec,
self.Rey,
self.learning_rate], tf_dict)
print('It: %d, Loss: %.3e, Pec: %.3f, Rey: %.3f, Time: %.2fs, Running Time: %.2fh, Learning Rate: %.1e'
%(it, loss_value, Pec_value, Rey_value, elapsed, running_time, learning_rate_value))
sys.stdout.flush()
start_time = time.time()
it += 1
def predict(self, t_star, x_star, y_star):
tf_dict = {self.t_data_tf: t_star, self.x_data_tf: x_star, self.y_data_tf: y_star}
c_star = self.sess.run(self.c_data_pred, tf_dict)
u_star = self.sess.run(self.u_data_pred, tf_dict)
v_star = self.sess.run(self.v_data_pred, tf_dict)
p_star = self.sess.run(self.p_data_pred, tf_dict)
return c_star, u_star, v_star, p_star
def predict_drag_lift(self, t_cyl):
theta = np.linspace(0.0,2*np.pi,200)[:,None] # N x 1
d_theta = theta[1,0] - theta[0,0]
x_cyl = 0.5*np.cos(theta) # N x 1
y_cyl = 0.5*np.sin(theta) # N x 1
N = x_cyl.shape[0]
T = t_cyl.shape[0]
T_star = np.tile(t_cyl, (1,N)).T # N x T
X_star = np.tile(x_cyl, (1,T)) # N x T
Y_star = np.tile(y_cyl, (1,T)) # N x T
t_star = np.reshape(T_star,[-1,1]) # NT x 1
x_star = np.reshape(X_star,[-1,1]) # NT x 1
y_star = np.reshape(Y_star,[-1,1]) # NT x 1
tf_dict = {self.t_eqns_tf: t_star, self.x_eqns_tf: x_star, self.y_eqns_tf: y_star}
[p_star,
u_x_star,
u_y_star,
v_x_star,
v_y_star,
Rey_value] = self.sess.run([self.p_eqns_pred,
self.u_x_eqns_pred,
self.u_y_eqns_pred,
self.v_x_eqns_pred,
self.v_y_eqns_pred,
self.Rey], tf_dict)
viscosity = (1.0/Rey_value)
P_star = np.reshape(p_star, [N,T]) # N x T
P_star = P_star - np.mean(P_star, axis=0)
U_x_star = np.reshape(u_x_star, [N,T]) # N x T
U_y_star = np.reshape(u_y_star, [N,T]) # N x T
V_x_star = np.reshape(v_x_star, [N,T]) # N x T
V_y_star = np.reshape(v_y_star, [N,T]) # N x T
INT0 = (-P_star[0:-1,:] + 2*viscosity*U_x_star[0:-1,:])*X_star[0:-1,:] + viscosity*(U_y_star[0:-1,:] + V_x_star[0:-1,:])*Y_star[0:-1,:]
INT1 = (-P_star[1: , :] + 2*viscosity*U_x_star[1: , :])*X_star[1: , :] + viscosity*(U_y_star[1: , :] + V_x_star[1: , :])*Y_star[1: , :]
F_D = 0.5*np.sum(INT0.T+INT1.T, axis = 1)*d_theta # T x 1
INT0 = (-P_star[0:-1,:] + 2*viscosity*V_y_star[0:-1,:])*Y_star[0:-1,:] + viscosity*(U_y_star[0:-1,:] + V_x_star[0:-1,:])*X_star[0:-1,:]
INT1 = (-P_star[1: , :] + 2*viscosity*V_y_star[1: , :])*Y_star[1: , :] + viscosity*(U_y_star[1: , :] + V_x_star[1: , :])*X_star[1: , :]
F_L = 0.5*np.sum(INT0.T+INT1.T, axis = 1)*d_theta # T x 1
return F_D, F_L
if __name__ == "__main__":
batch_size = 10000
layers = [3] + 10*[4*50] + [4]
# Load Data
data = scipy.io.loadmat('../Data/Cylinder2D.mat')
t_star = data['t_star'] # T x 1
x_star = data['x_star'] # N x 1
y_star = data['y_star'] # N x 1
T = t_star.shape[0]
N = x_star.shape[0]
U_star = data['U_star'] # N x T
V_star = data['V_star'] # N x T
P_star = data['P_star'] # N x T
C_star = data['C_star'] # N x T
# Rearrange Data
T_star = np.tile(t_star, (1,N)).T # N x T
X_star = np.tile(x_star, (1,T)) # N x T
Y_star = np.tile(y_star, (1,T)) # N x T
t = T_star.flatten()[:,None] # NT x 1
x = X_star.flatten()[:,None] # NT x 1
y = Y_star.flatten()[:,None] # NT x 1
u = U_star.flatten()[:,None] # NT x 1
v = V_star.flatten()[:,None] # NT x 1
p = P_star.flatten()[:,None] # NT x 1
c = C_star.flatten()[:,None] # NT x 1
######################################################################
######################## Training Data ###############################
######################################################################
T_data = T # int(sys.argv[1])
N_data = N # int(sys.argv[2])
idx_t = np.concatenate([np.array([0]), np.random.choice(T-2, T_data-2, replace=False)+1, np.array([T-1])] )
idx_x = np.random.choice(N, N_data, replace=False)
t_data = T_star[:, idx_t][idx_x,:].flatten()[:,None]
x_data = X_star[:, idx_t][idx_x,:].flatten()[:,None]
y_data = Y_star[:, idx_t][idx_x,:].flatten()[:,None]
c_data = C_star[:, idx_t][idx_x,:].flatten()[:,None]
T_eqns = T
N_eqns = N
idx_t = np.concatenate([np.array([0]), np.random.choice(T-2, T_eqns-2, replace=False)+1, np.array([T-1])] )
idx_x = np.random.choice(N, N_eqns, replace=False)
t_eqns = T_star[:, idx_t][idx_x,:].flatten()[:,None]
x_eqns = X_star[:, idx_t][idx_x,:].flatten()[:,None]
y_eqns = Y_star[:, idx_t][idx_x,:].flatten()[:,None]
# Training Data on velocity (inlet)
t_inlet = t[x == x.min()][:,None]
x_inlet = x[x == x.min()][:,None]
y_inlet = y[x == x.min()][:,None]
u_inlet = u[x == x.min()][:,None]
v_inlet = v[x == x.min()][:,None]
# Training
model = HFM(t_data, x_data, y_data, c_data,
t_eqns, x_eqns, y_eqns,
t_inlet, x_inlet, y_inlet, u_inlet, v_inlet,
layers, batch_size)
model.train(total_time = 40, learning_rate=1e-3)
F_D, F_L = model.predict_drag_lift(t_star)
# Test Data
snap = np.array([100])
t_test = T_star[:,snap]
x_test = X_star[:,snap]
y_test = Y_star[:,snap]
c_test = C_star[:,snap]
u_test = U_star[:,snap]
v_test = V_star[:,snap]
p_test = P_star[:,snap]
# Prediction
c_pred, u_pred, v_pred, p_pred = model.predict(t_test, x_test, y_test)
# Error
error_c = relative_error(c_pred, c_test)
error_u = relative_error(u_pred, u_test)
error_v = relative_error(v_pred, v_test)
error_p = relative_error(p_pred - np.mean(p_pred), p_test - np.mean(p_test))
print('Error c: %e' % (error_c))
print('Error u: %e' % (error_u))
print('Error v: %e' % (error_v))
print('Error p: %e' % (error_p))
################# Save Data ###########################
C_pred = 0*C_star
U_pred = 0*U_star
V_pred = 0*V_star
P_pred = 0*P_star
for snap in range(0,t_star.shape[0]):
t_test = T_star[:,snap:snap+1]
x_test = X_star[:,snap:snap+1]
y_test = Y_star[:,snap:snap+1]
c_test = C_star[:,snap:snap+1]
u_test = U_star[:,snap:snap+1]
v_test = V_star[:,snap:snap+1]
p_test = P_star[:,snap:snap+1]
# Prediction
c_pred, u_pred, v_pred, p_pred = model.predict(t_test, x_test, y_test)
C_pred[:,snap:snap+1] = c_pred
U_pred[:,snap:snap+1] = u_pred
V_pred[:,snap:snap+1] = v_pred
P_pred[:,snap:snap+1] = p_pred
# Error
error_c = relative_error(c_pred, c_test)
error_u = relative_error(u_pred, u_test)
error_v = relative_error(v_pred, v_test)
error_p = relative_error(p_pred - np.mean(p_pred), p_test - np.mean(p_test))
print('Error c: %e' % (error_c))
print('Error u: %e' % (error_u))
print('Error v: %e' % (error_v))
print('Error p: %e' % (error_p))
scipy.io.savemat('../Results/Cylinder2D_Pec_Re_results_%s.mat' %(time.strftime('%d_%m_%Y')),
{'C_pred':C_pred, 'U_pred':U_pred, 'V_pred':V_pred, 'P_pred':P_pred, 'F_L':F_L, 'F_D':F_D, 'Pec': model.sess.run(model.Pec), 'Rey': model.sess.run(model.Rey)})
"""
@author: Maziar Raissi
"""
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np
import scipy.io
import time
import sys
from utilities import neural_net, Navier_Stokes_2D, \
tf_session, mean_squared_error, relative_error
class HFM(object):
# notational conventions
# _tf: placeholders for input/output data and points used to regress the equations
# _pred: output of neural network
# _eqns: points used to regress the equations
# _data: input-output data
# _star: preditions
def __init__(self, t_data, x_data, y_data, c_data,
t_eqns, x_eqns, y_eqns,
layers, batch_size,
Pec, Rey):
# specs
self.layers = layers
self.batch_size = batch_size
# flow properties
self.Pec = Pec
self.Rey = Rey
# data
[self.t_data, self.x_data, self.y_data, self.c_data] = [t_data, x_data, y_data, c_data]
[self.t_eqns, self.x_eqns, self.y_eqns] = [t_eqns, x_eqns, y_eqns]
# placeholders
[self.t_data_tf, self.x_data_tf, self.y_data_tf, self.c_data_tf] = [tf.placeholder(tf.float32, shape=[None, 1]) for _ in range(4)]
[self.t_eqns_tf, self.x_eqns_tf, self.y_eqns_tf] = [tf.placeholder(tf.float32, shape=[None, 1]) for _ in range(3)]
# physics "uninformed" neural networks
self.net_cuvp = neural_net(self.t_data, self.x_data, self.y_data, layers = self.layers)
[self.c_data_pred,
self.u_data_pred,
self.v_data_pred,
self.p_data_pred] = self.net_cuvp(self.t_data_tf,
self.x_data_tf,
self.y_data_tf)
# physics "informed" neural networks
[self.c_eqns_pred,
self.u_eqns_pred,
self.v_eqns_pred,
self.p_eqns_pred] = self.net_cuvp(self.t_eqns_tf,
self.x_eqns_tf,
self.y_eqns_tf)
[self.e1_eqns_pred,
self.e2_eqns_pred,
self.e3_eqns_pred,
self.e4_eqns_pred] = Navier_Stokes_2D(self.c_eqns_pred,
self.u_eqns_pred,
self.v_eqns_pred,
self.p_eqns_pred,
self.t_eqns_tf,
self.x_eqns_tf,
self.y_eqns_tf,
self.Pec,
self.Rey)
# loss
self.loss = mean_squared_error(self.c_data_pred, self.c_data_tf) + \
mean_squared_error(self.e1_eqns_pred, 0.0) + \
mean_squared_error(self.e2_eqns_pred, 0.0) + \
mean_squared_error(self.e3_eqns_pred, 0.0) + \
mean_squared_error(self.e4_eqns_pred, 0.0)
# optimizers
self.learning_rate = tf.placeholder(tf.float32, shape=[])
self.optimizer = tf.train.AdamOptimizer(learning_rate = self.learning_rate)
self.train_op = self.optimizer.minimize(self.loss)
self.sess = tf_session()
def train(self, total_time, learning_rate):
N_data = self.t_data.shape[0]
N_eqns = self.t_eqns.shape[0]
start_time = time.time()
running_time = 0
it = 0
while running_time < total_time:
idx_data = np.random.choice(N_data, min(self.batch_size, N_data))
idx_eqns = np.random.choice(N_eqns, self.batch_size)
(t_data_batch,
x_data_batch,
y_data_batch,
c_data_batch) = (self.t_data[idx_data,:],
self.x_data[idx_data,:],
self.y_data[idx_data,:],
self.c_data[idx_data,:])
(t_eqns_batch,
x_eqns_batch,
y_eqns_batch) = (self.t_eqns[idx_eqns,:],
self.x_eqns[idx_eqns,:],
self.y_eqns[idx_eqns,:])
tf_dict = {self.t_data_tf: t_data_batch,
self.x_data_tf: x_data_batch,
self.y_data_tf: y_data_batch,
self.c_data_tf: c_data_batch,
self.t_eqns_tf: t_eqns_batch,
self.x_eqns_tf: x_eqns_batch,
self.y_eqns_tf: y_eqns_batch,
self.learning_rate: learning_rate}
self.sess.run([self.train_op], tf_dict)
# Print
if it % 10 == 0:
elapsed = time.time() - start_time
running_time += elapsed/3600.0
[loss_value,
learning_rate_value] = self.sess.run([self.loss,
self.learning_rate], tf_dict)
print('It: %d, Loss: %.3e, Time: %.2fs, Running Time: %.2fh, Learning Rate: %.1e'
%(it, loss_value, elapsed, running_time, learning_rate_value))
sys.stdout.flush()
start_time = time.time()
it += 1
def predict(self, t_star, x_star, y_star):
tf_dict = {self.t_data_tf: t_star, self.x_data_tf: x_star, self.y_data_tf: y_star}
c_star = self.sess.run(self.c_data_pred, tf_dict)
u_star = self.sess.run(self.u_data_pred, tf_dict)
v_star = self.sess.run(self.v_data_pred, tf_dict)
p_star = self.sess.run(self.p_data_pred, tf_dict)
return c_star, u_star, v_star, p_star
if __name__ == "__main__":
batch_size = 10000
layers = [3] + 10*[4*50] + [4]
# Load Data
data = scipy.io.loadmat('../Data/Cylinder2D_flower.mat')
t_star = data['t_star'] # T x 1
x_star = data['x_star'] # N x 1
y_star = data['y_star'] # N x 1
T = t_star.shape[0]
N = x_star.shape[0]
U_star = data['U_star'] # N x T
V_star = data['V_star'] # N x T
P_star = data['P_star'] # N x T
C_star = data['C_star'] # N x T
# Rearrange Data
T_star = np.tile(t_star, (1,N)).T # N x T
X_star = np.tile(x_star, (1,T)) # N x T
Y_star = np.tile(y_star, (1,T)) # N x T
######################################################################
######################## Training Data ###############################
######################################################################
T_data = T # int(sys.argv[1])
N_data = N # int(sys.argv[2])
idx_t = np.concatenate([np.array([0]), np.random.choice(T-2, T_data-2, replace=False)+1, np.array([T-1])] )
idx_x = np.random.choice(N, N_data, replace=False)
t_data = T_star[:, idx_t][idx_x,:].flatten()[:,None]
x_data = X_star[:, idx_t][idx_x,:].flatten()[:,None]
y_data = Y_star[:, idx_t][idx_x,:].flatten()[:,None]
c_data = C_star[:, idx_t][idx_x,:].flatten()[:,None]
T_eqns = T
N_eqns = N
idx_t = np.concatenate([np.array([0]), np.random.choice(T-2, T_eqns-2, replace=False)+1, np.array([T-1])] )
idx_x = np.random.choice(N, N_eqns, replace=False)
t_eqns = T_star[:, idx_t][idx_x,:].flatten()[:,None]
x_eqns = X_star[:, idx_t][idx_x,:].flatten()[:,None]
y_eqns = Y_star[:, idx_t][idx_x,:].flatten()[:,None]
# Training
model = HFM(t_data, x_data, y_data, c_data,
t_eqns, x_eqns, y_eqns,
layers, batch_size,
Pec = 100, Rey = 100)
model.train(total_time = 40, learning_rate=1e-3)
# Test Data
snap = np.array([100])
t_test = T_star[:,snap]
x_test = X_star[:,snap]
y_test = Y_star[:,snap]
c_test = C_star[:,snap]
u_test = U_star[:,snap]
v_test = V_star[:,snap]
p_test = P_star[:,snap]
# Prediction
c_pred, u_pred, v_pred, p_pred = model.predict(t_test, x_test, y_test)
# Error
error_c = relative_error(c_pred, c_test)
error_u = relative_error(u_pred, u_test)
error_v = relative_error(v_pred, v_test)
error_p = relative_error(p_pred - np.mean(p_pred), p_test - np.mean(p_test))
print('Error c: %e' % (error_c))
print('Error u: %e' % (error_u))
print('Error v: %e' % (error_v))
print('Error p: %e' % (error_p))
################# Save Data ###########################
C_pred = 0*C_star
U_pred = 0*U_star
V_pred = 0*V_star
P_pred = 0*P_star
for snap in range(0,t_star.shape[0]):
t_test = T_star[:,snap:snap+1]
x_test = X_star[:,snap:snap+1]
y_test = Y_star[:,snap:snap+1]
c_test = C_star[:,snap:snap+1]
u_test = U_star[:,snap:snap+1]
v_test = V_star[:,snap:snap+1]
p_test = P_star[:,snap:snap+1]
# Prediction
c_pred, u_pred, v_pred, p_pred = model.predict(t_test, x_test, y_test)
C_pred[:,snap:snap+1] = c_pred
U_pred[:,snap:snap+1] = u_pred
V_pred[:,snap:snap+1] = v_pred
P_pred[:,snap:snap+1] = p_pred
# Error
error_c = relative_error(c_pred, c_test)
error_u = relative_error(u_pred, u_test)
error_v = relative_error(v_pred, v_test)
error_p = relative_error(p_pred - np.mean(p_pred), p_test - np.mean(p_test))
print('Error c: %e' % (error_c))
print('Error u: %e' % (error_u))
print('Error v: %e' % (error_v))
print('Error p: %e' % (error_p))
scipy.io.savemat('../Results/Cylinder2D_flower_results_%s.mat' %(time.strftime('%d_%m_%Y')),
{'C_pred':C_pred, 'U_pred':U_pred, 'V_pred':V_pred, 'P_pred':P_pred})
"""
@author: Maziar Raissi
"""
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np
import scipy.io
import time
import sys
from utilities import neural_net, Navier_Stokes_2D, \
tf_session, mean_squared_error, relative_error
class HFM(object):
# notational conventions
# _tf: placeholders for input/output data and points used to regress the equations
# _pred: output of neural network
# _eqns: points used to regress the equations
# _data: input-output data
# _star: preditions
def __init__(self, t_data, x_data, y_data, c_data,
u_data, v_data, p_data,
x_ref, y_ref,
t_eqns, x_eqns, y_eqns,
layers, batch_size,
Pec, Rey):
# specs
self.layers = layers
self.batch_size = batch_size
# flow properties
self.Pec = Pec
self.Rey = Rey
# data
[self.t_data, self.x_data, self.y_data, self.c_data] = [t_data, x_data, y_data, c_data]
[self.u_data, self.v_data, self.p_data] = [u_data, v_data, p_data]
[self.x_ref, self.y_ref] = [x_ref, y_ref]
[self.t_eqns, self.x_eqns, self.y_eqns] = [t_eqns, x_eqns, y_eqns]
# placeholders
[self.t_data_tf, self.x_data_tf, self.y_data_tf, self.c_data_tf] = [tf.placeholder(tf.float32, shape=[None, 1]) for _ in range(4)]
[self.u_data_tf, self.v_data_tf, self.p_data_tf] = [tf.placeholder(tf.float32, shape=[None, 1]) for _ in range(3)]
[self.t_eqns_tf, self.x_eqns_tf, self.y_eqns_tf] = [tf.placeholder(tf.float32, shape=[None, 1]) for _ in range(3)]
# physics "uninformed" neural networks
self.net_cuvp = neural_net(self.t_data, self.x_data, self.y_data, layers = self.layers)
[self.c_data_pred,
self.u_data_pred,
self.v_data_pred,
self.p_data_pred] = self.net_cuvp(self.t_data_tf,
self.x_data_tf,
self.y_data_tf)
[_, _, _,
self.p_ref_pred] = self.net_cuvp(self.t_data_tf,
self.x_data_tf*0.0 + self.x_ref,
self.y_data_tf*0.0 + self.y_ref)
# physics "informed" neural networks
[self.c_eqns_pred,
self.u_eqns_pred,
self.v_eqns_pred,
self.p_eqns_pred] = self.net_cuvp(self.t_eqns_tf,
self.x_eqns_tf,
self.y_eqns_tf)
[self.e1_eqns_pred,
self.e2_eqns_pred,
self.e3_eqns_pred,
self.e4_eqns_pred] = Navier_Stokes_2D(self.c_eqns_pred,
self.u_eqns_pred,
self.v_eqns_pred,
self.p_eqns_pred,
self.t_eqns_tf,
self.x_eqns_tf,
self.y_eqns_tf,
self.Pec,
self.Rey)
# loss
self.loss_c = mean_squared_error(self.c_data_pred, self.c_data_tf)
self.loss_e1 = mean_squared_error(self.e1_eqns_pred, 0.0)
self.loss_e2 = mean_squared_error(self.e2_eqns_pred, 0.0)
self.loss_e3 = mean_squared_error(self.e3_eqns_pred, 0.0)
self.loss_e4 = mean_squared_error(self.e4_eqns_pred, 0.0)
self.loss = self.loss_c + \
self.loss_e1 + self.loss_e2 + \
self.loss_e3 + self.loss_e4
# relative L2 errors
self.error_c = relative_error(self.c_data_pred, self.c_data_tf)
self.error_u = relative_error(self.u_data_pred, self.u_data_tf)
self.error_v = relative_error(self.v_data_pred, self.v_data_tf)
self.error_p = relative_error(self.p_data_pred - self.p_ref_pred, self.p_data_tf)
# convergence plots
self.loss_history = []
self.loss_c_history = []
self.loss_e1_history = []
self.loss_e2_history = []
self.loss_e3_history = []
self.loss_e4_history = []
self.error_c_history = []
self.error_u_history = []
self.error_v_history = []
self.error_p_history = []
# optimizers
self.learning_rate = tf.placeholder(tf.float32, shape=[])
self.optimizer = tf.train.AdamOptimizer(learning_rate = self.learning_rate)
self.train_op = self.optimizer.minimize(self.loss)
self.sess = tf_session()
def train(self, total_time, learning_rate):
N_data = self.t_data.shape[0]
N_eqns = self.t_eqns.shape[0]
start_time = time.time()
running_time = 0
it = 0
while running_time < total_time:
idx_data = np.random.choice(N_data, min(self.batch_size, N_data))
idx_eqns = np.random.choice(N_eqns, self.batch_size)
(t_data_batch,
x_data_batch,
y_data_batch,
c_data_batch,
u_data_batch,
v_data_batch,
p_data_batch) = (self.t_data[idx_data,:],
self.x_data[idx_data,:],
self.y_data[idx_data,:],
self.c_data[idx_data,:],
self.u_data[idx_data,:],
self.v_data[idx_data,:],
self.p_data[idx_data,:])
(t_eqns_batch,
x_eqns_batch,
y_eqns_batch) = (self.t_eqns[idx_eqns,:],
self.x_eqns[idx_eqns,:],
self.y_eqns[idx_eqns,:])
tf_dict = {self.t_data_tf: t_data_batch,
self.x_data_tf: x_data_batch,
self.y_data_tf: y_data_batch,
self.c_data_tf: c_data_batch,
self.u_data_tf: u_data_batch,
self.v_data_tf: v_data_batch,
self.p_data_tf: p_data_batch,
self.t_eqns_tf: t_eqns_batch,
self.x_eqns_tf: x_eqns_batch,
self.y_eqns_tf: y_eqns_batch,
self.learning_rate: learning_rate}
self.sess.run([self.train_op], tf_dict)
# Print
if it % 1 == 0:
elapsed = time.time() - start_time
running_time += elapsed/3600.0
[loss_value,
loss_c_value,
loss_e1_value,
loss_e2_value,
loss_e3_value,
loss_e4_value,
error_c_value,
error_u_value,
error_v_value,
error_p_value,
learning_rate_value] = self.sess.run([self.loss,
self.loss_c,
self.loss_e1,
self.loss_e2,
self.loss_e3,
self.loss_e4,
self.error_c,
self.error_u,
self.error_v,
self.error_p,
self.learning_rate], tf_dict)
print('It: %d, Loss: %.3e, Time: %.2fs, Running Time: %.2fh, Learning Rate: %.1e'
%(it, loss_value, elapsed, running_time, learning_rate_value))
print('Loss c: %.3e, Loss e1: %.3e, Loss e2: %.3e, Loss e3: %.3e, Loss e4: %.3e'
%(loss_c_value, loss_e1_value, loss_e2_value, loss_e3_value, loss_e4_value))
print('Error c: %.3e, Error u: %.3e, Error v: %.3e, Error p: %.3e'
%(error_c_value, error_u_value, error_v_value, error_p_value))
print(' ')
sys.stdout.flush()
self.loss_history += [loss_value]
self.loss_c_history += [loss_c_value]
self.loss_e1_history += [loss_e1_value]
self.loss_e2_history += [loss_e2_value]
self.loss_e3_history += [loss_e3_value]
self.loss_e4_history += [loss_e4_value]
self.error_c_history += [error_c_value]
self.error_u_history += [error_u_value]
self.error_v_history += [error_v_value]
self.error_p_history += [error_p_value]
start_time = time.time()
it += 1
def predict(self, t_star, x_star, y_star):
tf_dict = {self.t_data_tf: t_star, self.x_data_tf: x_star, self.y_data_tf: y_star}
c_star = self.sess.run(self.c_data_pred, tf_dict)
u_star = self.sess.run(self.u_data_pred, tf_dict)
v_star = self.sess.run(self.v_data_pred, tf_dict)
p_star = self.sess.run(self.p_data_pred, tf_dict)
return c_star, u_star, v_star, p_star
if __name__ == "__main__":
batch_size = 10000
layers = [3] + 10*[4*50] + [4]
# Load Data
data = scipy.io.loadmat('../Data/Cylinder2D_flower.mat')
t_star = data['t_star'] # T x 1
x_star = data['x_star'] # N x 1
y_star = data['y_star'] # N x 1
T = t_star.shape[0]
N = x_star.shape[0]
U_star = data['U_star'] # N x T
V_star = data['V_star'] # N x T
P_star = data['P_star'] # N x T
C_star = data['C_star'] # N x T
# Rearrange Data
T_star = np.tile(t_star, (1,N)).T # N x T
X_star = np.tile(x_star, (1,T)) # N x T
Y_star = np.tile(y_star, (1,T)) # N x T
######################################################################
######################## Training Data ###############################
######################################################################
T_data = T # int(sys.argv[1])
N_data = N # int(sys.argv[2])
idx_t = np.concatenate([np.array([0]), np.random.choice(T-2, T_data-2, replace=False)+1, np.array([T-1])] )
idx_x = np.random.choice(N, N_data, replace=False)
t_data = T_star[:, idx_t][idx_x,:].flatten()[:,None]
x_data = X_star[:, idx_t][idx_x,:].flatten()[:,None]
y_data = Y_star[:, idx_t][idx_x,:].flatten()[:,None]
c_data = C_star[:, idx_t][idx_x,:].flatten()[:,None]
u_data = U_star[:, idx_t][idx_x,:].flatten()[:,None]
v_data = V_star[:, idx_t][idx_x,:].flatten()[:,None]
p_data = (P_star[:, idx_t][idx_x,:] - P_star[:, idx_t][idx_x[0:1],:]).flatten()[:,None]
x_ref = X_star[:, idx_t[0:1]][idx_x[0:1],:].flatten()[:,None]
y_ref = Y_star[:, idx_t[0:1]][idx_x[0:1],:].flatten()[:,None]
T_eqns = T
N_eqns = N
idx_t = np.concatenate([np.array([0]), np.random.choice(T-2, T_eqns-2, replace=False)+1, np.array([T-1])] )
idx_x = np.random.choice(N, N_eqns, replace=False)
t_eqns = T_star[:, idx_t][idx_x,:].flatten()[:,None]
x_eqns = X_star[:, idx_t][idx_x,:].flatten()[:,None]
y_eqns = Y_star[:, idx_t][idx_x,:].flatten()[:,None]
# Training
model = HFM(t_data, x_data, y_data, c_data,
u_data, v_data, p_data,
x_ref, y_ref,
t_eqns, x_eqns, y_eqns,
layers, batch_size,
Pec = 100, Rey = 100)
model.train(total_time = 40, learning_rate=1e-3)
# Test Data
snap = np.array([100])
t_test = T_star[:,snap]
x_test = X_star[:,snap]
y_test = Y_star[:,snap]
c_test = C_star[:,snap]
u_test = U_star[:,snap]
v_test = V_star[:,snap]
p_test = P_star[:,snap]
# Prediction
c_pred, u_pred, v_pred, p_pred = model.predict(t_test, x_test, y_test)
# Error
error_c = relative_error(c_pred, c_test)
error_u = relative_error(u_pred, u_test)
error_v = relative_error(v_pred, v_test)
error_p = relative_error(p_pred - np.mean(p_pred, axis=0, keepdims=True), p_test - np.mean(p_test, axis=0, keepdims=True))
print('Error c: %e' % (error_c))
print('Error u: %e' % (error_u))
print('Error v: %e' % (error_v))
print('Error p: %e' % (error_p))
################# Save Data ###########################
C_pred = 0*C_star
U_pred = 0*U_star
V_pred = 0*V_star
P_pred = 0*P_star
for snap in range(0,t_star.shape[0]):
t_test = T_star[:,snap:snap+1]
x_test = X_star[:,snap:snap+1]
y_test = Y_star[:,snap:snap+1]
c_test = C_star[:,snap:snap+1]
u_test = U_star[:,snap:snap+1]
v_test = V_star[:,snap:snap+1]
p_test = P_star[:,snap:snap+1]
# Prediction
c_pred, u_pred, v_pred, p_pred = model.predict(t_test, x_test, y_test)
C_pred[:,snap:snap+1] = c_pred
U_pred[:,snap:snap+1] = u_pred
V_pred[:,snap:snap+1] = v_pred
P_pred[:,snap:snap+1] = p_pred
# Error
error_c = relative_error(c_pred, c_test)
error_u = relative_error(u_pred, u_test)
error_v = relative_error(v_pred, v_test)
error_p = relative_error(p_pred - np.mean(p_pred, axis=0, keepdims=True), p_test - np.mean(p_test, axis=0, keepdims=True))
print('Error c: %e' % (error_c))
print('Error u: %e' % (error_u))
print('Error v: %e' % (error_v))
print('Error p: %e' % (error_p))
scipy.io.savemat('../Results/Cylinder2D_flower_convergence_plot_results_%s.mat' %(time.strftime('%d_%m_%Y')),
{'C_pred': C_pred, 'U_pred': U_pred, 'V_pred': V_pred, 'P_pred': P_pred,
'error_c': np.asarray(model.error_c_history),
'error_u': np.asarray(model.error_u_history),
'error_v': np.asarray(model.error_v_history),
'error_p': np.asarray(model.error_p_history),
'loss': np.asarray(model.loss_history),
'loss_c': np.asarray(model.loss_c_history),
'loss_e1': np.asarray(model.loss_e1_history),
'loss_e2': np.asarray(model.loss_e2_history),
'loss_e3': np.asarray(model.loss_e3_history),
'loss_e4': np.asarray(model.loss_e4_history)})
"""
@author: Maziar Raissi
"""
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np
import scipy.io
import time
import sys
from utilities import neural_net, Navier_Stokes_2D, \
tf_session, mean_squared_error, relative_error
class HFM(object):
# notational conventions
# _tf: placeholders for input/output data and points used to regress the equations
# _pred: output of neural network
# _eqns: points used to regress the equations
# _data: input-output data
# _star: preditions
def __init__(self, t_data, x_data, y_data, c_data,
t_eqns, x_eqns, y_eqns,
layers, batch_size,
Pec, Rey):
# specs
self.layers = layers
self.batch_size = batch_size
# flow properties
self.Pec = Pec
self.Rey = Rey
# data
[self.t_data, self.x_data, self.y_data, self.c_data] = [t_data, x_data, y_data, c_data]
[self.t_eqns, self.x_eqns, self.y_eqns] = [t_eqns, x_eqns, y_eqns]
# placeholders
[self.t_data_tf, self.x_data_tf, self.y_data_tf, self.c_data_tf] = [tf.placeholder(tf.float32, shape=[None, 1]) for _ in range(4)]
[self.t_eqns_tf, self.x_eqns_tf, self.y_eqns_tf] = [tf.placeholder(tf.float32, shape=[None, 1]) for _ in range(3)]
# physics "uninformed" neural networks
self.net_cuvp = neural_net(self.t_data, self.x_data, self.y_data, layers = self.layers)
[self.c_data_pred,
self.u_data_pred,
self.v_data_pred,
self.p_data_pred] = self.net_cuvp(self.t_data_tf,
self.x_data_tf,
self.y_data_tf)
# physics "informed" neural networks
[self.c_eqns_pred,
self.u_eqns_pred,
self.v_eqns_pred,
self.p_eqns_pred] = self.net_cuvp(self.t_eqns_tf,
self.x_eqns_tf,
self.y_eqns_tf)
[self.e1_eqns_pred,
self.e2_eqns_pred,
self.e3_eqns_pred,
self.e4_eqns_pred] = Navier_Stokes_2D(self.c_eqns_pred,
self.u_eqns_pred,
self.v_eqns_pred,
self.p_eqns_pred,
self.t_eqns_tf,
self.x_eqns_tf,
self.y_eqns_tf,
self.Pec,
self.Rey)
# loss
self.loss = mean_squared_error(self.c_data_pred, self.c_data_tf) + \
mean_squared_error(self.e1_eqns_pred, 0.0) + \
mean_squared_error(self.e2_eqns_pred, 0.0) + \
mean_squared_error(self.e3_eqns_pred, 0.0) + \
mean_squared_error(self.e4_eqns_pred, 0.0)
# optimizers
self.learning_rate = tf.placeholder(tf.float32, shape=[])
self.optimizer = tf.train.AdamOptimizer(learning_rate = self.learning_rate)
self.train_op = self.optimizer.minimize(self.loss)
self.sess = tf_session()
def train(self, total_time, learning_rate):
N_data = self.t_data.shape[0]
N_eqns = self.t_eqns.shape[0]
start_time = time.time()
running_time = 0
it = 0
while running_time < total_time:
idx_data = np.random.choice(N_data, min(self.batch_size, N_data))
idx_eqns = np.random.choice(N_eqns, self.batch_size)
(t_data_batch,
x_data_batch,
y_data_batch,
c_data_batch) = (self.t_data[idx_data,:],
self.x_data[idx_data,:],
self.y_data[idx_data,:],
self.c_data[idx_data,:])
(t_eqns_batch,
x_eqns_batch,
y_eqns_batch) = (self.t_eqns[idx_eqns,:],
self.x_eqns[idx_eqns,:],
self.y_eqns[idx_eqns,:])
tf_dict = {self.t_data_tf: t_data_batch,
self.x_data_tf: x_data_batch,
self.y_data_tf: y_data_batch,
self.c_data_tf: c_data_batch,
self.t_eqns_tf: t_eqns_batch,
self.x_eqns_tf: x_eqns_batch,
self.y_eqns_tf: y_eqns_batch,
self.learning_rate: learning_rate}
self.sess.run([self.train_op], tf_dict)
# Print
if it % 10 == 0:
elapsed = time.time() - start_time
running_time += elapsed/3600.0
[loss_value,
learning_rate_value] = self.sess.run([self.loss,
self.learning_rate], tf_dict)
print('It: %d, Loss: %.3e, Time: %.2fs, Running Time: %.2fh, Learning Rate: %.1e'
%(it, loss_value, elapsed, running_time, learning_rate_value))
sys.stdout.flush()
start_time = time.time()
it += 1
def predict(self, t_star, x_star, y_star):
tf_dict = {self.t_data_tf: t_star, self.x_data_tf: x_star, self.y_data_tf: y_star}
c_star = self.sess.run(self.c_data_pred, tf_dict)
u_star = self.sess.run(self.u_data_pred, tf_dict)
v_star = self.sess.run(self.v_data_pred, tf_dict)
p_star = self.sess.run(self.p_data_pred, tf_dict)
return c_star, u_star, v_star, p_star
def main():
batch_size = 10000
layers = [3] + 10*[4*50] + [4]
# Load Data
data = scipy.io.loadmat('../Data/Cylinder2D_flower.mat')
t_star = data['t_star'] # T x 1
x_star = data['x_star'] # N x 1
y_star = data['y_star'] # N x 1
T = t_star.shape[0]
N = x_star.shape[0]
U_star = data['U_star'] # N x T
V_star = data['V_star'] # N x T
P_star = data['P_star'] # N x T
C_star = data['C_star'] # N x T
# Rearrange Data
T_star = np.tile(t_star, (1,N)).T # N x T
X_star = np.tile(x_star, (1,T)) # N x T
Y_star = np.tile(y_star, (1,T)) # N x T
######################################################################
######################## Training Data ###############################
######################################################################
T_data = int(sys.argv[1])
N_data = int(sys.argv[2])
idx_t = np.concatenate([np.array([0]), np.random.choice(T-2, T_data-2, replace=False)+1, np.array([T-1])] )
idx_x = np.random.choice(N, N_data, replace=False)
t_data = T_star[:, idx_t][idx_x,:].flatten()[:,None]
x_data = X_star[:, idx_t][idx_x,:].flatten()[:,None]
y_data = Y_star[:, idx_t][idx_x,:].flatten()[:,None]
c_data = C_star[:, idx_t][idx_x,:].flatten()[:,None]
T_eqns = T
N_eqns = N
idx_t = np.concatenate([np.array([0]), np.random.choice(T-2, T_eqns-2, replace=False)+1, np.array([T-1])] )
idx_x = np.random.choice(N, N_eqns, replace=False)
t_eqns = T_star[:, idx_t][idx_x,:].flatten()[:,None]
x_eqns = X_star[:, idx_t][idx_x,:].flatten()[:,None]
y_eqns = Y_star[:, idx_t][idx_x,:].flatten()[:,None]
# Training
model = HFM(t_data, x_data, y_data, c_data,
t_eqns, x_eqns, y_eqns,
layers, batch_size,
Pec = 100, Rey = 100)
model.train(total_time = 40, learning_rate=1e-3)
# Test Data
snap = np.array([100])
t_test = T_star[:,snap]
x_test = X_star[:,snap]
y_test = Y_star[:,snap]
c_test = C_star[:,snap]
u_test = U_star[:,snap]
v_test = V_star[:,snap]
p_test = P_star[:,snap]
# Prediction
c_pred, u_pred, v_pred, p_pred = model.predict(t_test, x_test, y_test)
# Error
error_c = relative_error(c_pred, c_test)
error_u = relative_error(u_pred, u_test)
error_v = relative_error(v_pred, v_test)
error_p = relative_error(p_pred - np.mean(p_pred), p_test - np.mean(p_test))
print('Error c: %e' % (error_c))
print('Error u: %e' % (error_u))
print('Error v: %e' % (error_v))
print('Error p: %e' % (error_p))
################# Save Data ###########################
C_pred = 0*C_star
U_pred = 0*U_star
V_pred = 0*V_star
P_pred = 0*P_star
for snap in range(0,t_star.shape[0]):
t_test = T_star[:,snap:snap+1]
x_test = X_star[:,snap:snap+1]
y_test = Y_star[:,snap:snap+1]
c_test = C_star[:,snap:snap+1]
u_test = U_star[:,snap:snap+1]
v_test = V_star[:,snap:snap+1]
p_test = P_star[:,snap:snap+1]
# Prediction
c_pred, u_pred, v_pred, p_pred = model.predict(t_test, x_test, y_test)
C_pred[:,snap:snap+1] = c_pred
U_pred[:,snap:snap+1] = u_pred
V_pred[:,snap:snap+1] = v_pred
P_pred[:,snap:snap+1] = p_pred
# Error
error_c = relative_error(c_pred, c_test)
error_u = relative_error(u_pred, u_test)
error_v = relative_error(v_pred, v_test)
error_p = relative_error(p_pred - np.mean(p_pred), p_test - np.mean(p_test))
print('Error c: %e' % (error_c))
print('Error u: %e' % (error_u))
print('Error v: %e' % (error_v))
print('Error p: %e' % (error_p))
scipy.io.savemat('../Results/Cylinder2D_flower_results_%d_%d_%s.mat' %(T_data, N_data, time.strftime('%d_%m_%Y')),
{'C_pred':C_pred, 'U_pred':U_pred, 'V_pred':V_pred, 'P_pred':P_pred})
if __name__ == "__main__":
main()
"""
@author: Maziar Raissi
"""
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np
import scipy.io
import time
import sys
from utilities import neural_net, Navier_Stokes_2D, \
tf_session, mean_squared_error, relative_error
class HFM(object):
# notational conventions
# _tf: placeholders for input/output data and points used to regress the equations
# _pred: output of neural network
# _eqns: points used to regress the equations
# _data: input-output data
# _star: preditions
def __init__(self, t_data, x_data, y_data, c_data,
t_eqns, x_eqns, y_eqns,
layers, batch_size,
Pec, Rey):
# specs
self.layers = layers
self.batch_size = batch_size
# flow properties
self.Pec = Pec
self.Rey = Rey
# data
[self.t_data, self.x_data, self.y_data, self.c_data] = [t_data, x_data, y_data, c_data]
[self.t_eqns, self.x_eqns, self.y_eqns] = [t_eqns, x_eqns, y_eqns]
# placeholders
[self.t_data_tf, self.x_data_tf, self.y_data_tf, self.c_data_tf] = [tf.placeholder(tf.float32, shape=[None, 1]) for _ in range(4)]
[self.t_eqns_tf, self.x_eqns_tf, self.y_eqns_tf] = [tf.placeholder(tf.float32, shape=[None, 1]) for _ in range(3)]
# physics "uninformed" neural networks
self.net_cuvp = neural_net(self.t_data, self.x_data, self.y_data, layers = self.layers)
[self.c_data_pred,
self.u_data_pred,
self.v_data_pred,
self.p_data_pred] = self.net_cuvp(self.t_data_tf,
self.x_data_tf,
self.y_data_tf)
# physics "informed" neural networks
[self.c_eqns_pred,
self.u_eqns_pred,
self.v_eqns_pred,
self.p_eqns_pred] = self.net_cuvp(self.t_eqns_tf,
self.x_eqns_tf,
self.y_eqns_tf)
[self.e1_eqns_pred,
self.e2_eqns_pred,
self.e3_eqns_pred,
self.e4_eqns_pred] = Navier_Stokes_2D(self.c_eqns_pred,
self.u_eqns_pred,
self.v_eqns_pred,
self.p_eqns_pred,
self.t_eqns_tf,
self.x_eqns_tf,
self.y_eqns_tf,
self.Pec,
self.Rey)
# loss
self.loss = mean_squared_error(self.c_data_pred, self.c_data_tf) + \
mean_squared_error(self.e1_eqns_pred, 0.0) + \
mean_squared_error(self.e2_eqns_pred, 0.0) + \
mean_squared_error(self.e3_eqns_pred, 0.0) + \
mean_squared_error(self.e4_eqns_pred, 0.0)
# optimizers
self.learning_rate = tf.placeholder(tf.float32, shape=[])
self.optimizer = tf.train.AdamOptimizer(learning_rate = self.learning_rate)
self.train_op = self.optimizer.minimize(self.loss)
self.sess = tf_session()
def train(self, total_time, learning_rate):
N_data = self.t_data.shape[0]
N_eqns = self.t_eqns.shape[0]
start_time = time.time()
running_time = 0
it = 0
while running_time < total_time:
idx_data = np.random.choice(N_data, min(self.batch_size, N_data))
idx_eqns = np.random.choice(N_eqns, self.batch_size)
(t_data_batch,
x_data_batch,
y_data_batch,
c_data_batch) = (self.t_data[idx_data,:],
self.x_data[idx_data,:],
self.y_data[idx_data,:],
self.c_data[idx_data,:])
(t_eqns_batch,
x_eqns_batch,
y_eqns_batch) = (self.t_eqns[idx_eqns,:],
self.x_eqns[idx_eqns,:],
self.y_eqns[idx_eqns,:])
tf_dict = {self.t_data_tf: t_data_batch,
self.x_data_tf: x_data_batch,
self.y_data_tf: y_data_batch,
self.c_data_tf: c_data_batch,
self.t_eqns_tf: t_eqns_batch,
self.x_eqns_tf: x_eqns_batch,
self.y_eqns_tf: y_eqns_batch,
self.learning_rate: learning_rate}
self.sess.run([self.train_op], tf_dict)
# Print
if it % 10 == 0:
elapsed = time.time() - start_time
running_time += elapsed/3600.0
[loss_value,
learning_rate_value] = self.sess.run([self.loss,
self.learning_rate], tf_dict)
print('It: %d, Loss: %.3e, Time: %.2fs, Running Time: %.2fh, Learning Rate: %.1e'
%(it, loss_value, elapsed, running_time, learning_rate_value))
sys.stdout.flush()
start_time = time.time()
it += 1
def predict(self, t_star, x_star, y_star):
tf_dict = {self.t_data_tf: t_star, self.x_data_tf: x_star, self.y_data_tf: y_star}
c_star = self.sess.run(self.c_data_pred, tf_dict)
u_star = self.sess.run(self.u_data_pred, tf_dict)
v_star = self.sess.run(self.v_data_pred, tf_dict)
p_star = self.sess.run(self.p_data_pred, tf_dict)
return c_star, u_star, v_star, p_star
def main():
batch_size = 10000
layers = [3] + 10*[4*50] + [4]
# Load Data
data = scipy.io.loadmat('../Data/Cylinder2D_flower.mat')
t_star = data['t_star'] # T x 1
x_star = data['x_star'] # N x 1
y_star = data['y_star'] # N x 1
T = t_star.shape[0]
N = x_star.shape[0]
U_star = data['U_star'] # N x T
V_star = data['V_star'] # N x T
P_star = data['P_star'] # N x T
C_star = data['C_star'] # N x T
# Rearrange Data
T_star = np.tile(t_star, (1,N)).T # N x T
X_star = np.tile(x_star, (1,T)) # N x T
Y_star = np.tile(y_star, (1,T)) # N x T
######################################################################
######################## Training Data ###############################
######################################################################
T_data = 101
N_data = 15000
idx_t = np.concatenate([np.array([0]), np.random.choice(T-2, T_data-2, replace=False)+1, np.array([T-1])] )
idx_x = np.random.choice(N, N_data, replace=False)
t_data = T_star[:, idx_t][idx_x,:].flatten()[:,None]
x_data = X_star[:, idx_t][idx_x,:].flatten()[:,None]
y_data = Y_star[:, idx_t][idx_x,:].flatten()[:,None]
c_data = C_star[:, idx_t][idx_x,:].flatten()[:,None]
T_eqns = T
N_eqns = N
idx_t = np.concatenate([np.array([0]), np.random.choice(T-2, T_eqns-2, replace=False)+1, np.array([T-1])] )
idx_x = np.random.choice(N, N_eqns, replace=False)
t_eqns = T_star[:, idx_t][idx_x,:].flatten()[:,None]
x_eqns = X_star[:, idx_t][idx_x,:].flatten()[:,None]
y_eqns = Y_star[:, idx_t][idx_x,:].flatten()[:,None]
# add noise
noise = float(sys.argv[1])
c_data = c_data + noise*np.std(c_data)*np.random.randn(c_data.shape[0], c_data.shape[1])
# Training
model = HFM(t_data, x_data, y_data, c_data,
t_eqns, x_eqns, y_eqns,
layers, batch_size,
Pec = 100, Rey = 100)
model.train(total_time = 40, learning_rate=1e-3)
# Test Data
snap = np.array([100])
t_test = T_star[:,snap]
x_test = X_star[:,snap]
y_test = Y_star[:,snap]
c_test = C_star[:,snap]
u_test = U_star[:,snap]
v_test = V_star[:,snap]
p_test = P_star[:,snap]
# Prediction
c_pred, u_pred, v_pred, p_pred = model.predict(t_test, x_test, y_test)
# Error
error_c = relative_error(c_pred, c_test)
error_u = relative_error(u_pred, u_test)
error_v = relative_error(v_pred, v_test)
error_p = relative_error(p_pred - np.mean(p_pred), p_test - np.mean(p_test))
print('Error c: %e' % (error_c))
print('Error u: %e' % (error_u))
print('Error v: %e' % (error_v))
print('Error p: %e' % (error_p))
################# Save Data ###########################
C_pred = 0*C_star
U_pred = 0*U_star
V_pred = 0*V_star
P_pred = 0*P_star
for snap in range(0,t_star.shape[0]):
t_test = T_star[:,snap:snap+1]
x_test = X_star[:,snap:snap+1]
y_test = Y_star[:,snap:snap+1]
c_test = C_star[:,snap:snap+1]
u_test = U_star[:,snap:snap+1]
v_test = V_star[:,snap:snap+1]
p_test = P_star[:,snap:snap+1]
# Prediction
c_pred, u_pred, v_pred, p_pred = model.predict(t_test, x_test, y_test)
C_pred[:,snap:snap+1] = c_pred
U_pred[:,snap:snap+1] = u_pred
V_pred[:,snap:snap+1] = v_pred
P_pred[:,snap:snap+1] = p_pred
# Error
error_c = relative_error(c_pred, c_test)
error_u = relative_error(u_pred, u_test)
error_v = relative_error(v_pred, v_test)
error_p = relative_error(p_pred - np.mean(p_pred), p_test - np.mean(p_test))
print('Error c: %e' % (error_c))
print('Error u: %e' % (error_u))
print('Error v: %e' % (error_v))
print('Error p: %e' % (error_p))
scipy.io.savemat('../Results/Cylinder2D_flower_results_%.2f_noise_%s.mat' %(noise, time.strftime('%d_%m_%Y')),
{'C_pred':C_pred, 'U_pred':U_pred, 'V_pred':V_pred, 'P_pred':P_pred})
if __name__ == "__main__":
main()
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
FROM image.sourcefind.cn:5000/dcu/admin/base/tensorflow:2.13.1-py3.10-dtk24.04.3-ubuntu20.04
theme: jekyll-theme-cayman
title: Hidden Fluid Mechanics
description: A Navier-Stokes Informed Deep Learning Framework for Assimilating Flow Visualization Data
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment