Commit b1354256 authored by Dan O'Shea's avatar Dan O'Shea
Browse files

Formatting fixes

parent 32afad9c
...@@ -366,9 +366,11 @@ class LFADS(object): ...@@ -366,9 +366,11 @@ class LFADS(object):
if datasets and 'alignment_matrix_cxf' in datasets[name].keys(): if datasets and 'alignment_matrix_cxf' in datasets[name].keys():
dataset = datasets[name] dataset = datasets[name]
if hps.do_train_readin: if hps.do_train_readin:
print("Initializing trainable readin matrix with alignment matrix provided for dataset:", name) print("Initializing trainable readin matrix with alignment matrix \
provided for dataset:", name)
else: else:
print("Setting non-trainable readin matrix to alignment matrix provided for dataset:", name) print("Setting non-trainable readin matrix to alignment matrix \
provided for dataset:", name)
in_mat_cxf = dataset['alignment_matrix_cxf'].astype(np.float32) in_mat_cxf = dataset['alignment_matrix_cxf'].astype(np.float32)
if in_mat_cxf.shape != (data_dim, factors_dim): if in_mat_cxf.shape != (data_dim, factors_dim):
raise ValueError("""Alignment matrix must have dimensions %d x %d raise ValueError("""Alignment matrix must have dimensions %d x %d
...@@ -378,9 +380,11 @@ class LFADS(object): ...@@ -378,9 +380,11 @@ class LFADS(object):
if datasets and 'alignment_bias_c' in datasets[name].keys(): if datasets and 'alignment_bias_c' in datasets[name].keys():
dataset = datasets[name] dataset = datasets[name]
if hps.do_train_readin: if hps.do_train_readin:
print("Initializing trainable readin bias with alignment bias provided for dataset:", name) print("Initializing trainable readin bias with alignment bias \
provided for dataset:", name)
else: else:
print("Setting non-trainable readin bias to alignment bias provided for dataset:", name) print("Setting non-trainable readin bias to alignment bias \
provided for dataset:", name)
align_bias_c = dataset['alignment_bias_c'].astype(np.float32) align_bias_c = dataset['alignment_bias_c'].astype(np.float32)
align_bias_1xc = np.expand_dims(align_bias_c, axis=0) align_bias_1xc = np.expand_dims(align_bias_c, axis=0)
if align_bias_1xc.shape[1] != data_dim: if align_bias_1xc.shape[1] != data_dim:
......
...@@ -212,9 +212,9 @@ flags.DEFINE_float("co_prior_var_scale", CO_PRIOR_VAR_SCALE, ...@@ -212,9 +212,9 @@ flags.DEFINE_float("co_prior_var_scale", CO_PRIOR_VAR_SCALE,
"Variance of control input prior distribution.") "Variance of control input prior distribution.")
flags.DEFINE_float("prior_ar_atau", PRIOR_AR_AUTOCORRELATION, flags.DEFINE_float("prior_ar_atau", PRIOR_AR_AUTOCORRELATION,
"Initial autocorrelation of AR(1) priors.") "Initial autocorrelation of AR(1) priors.")
flags.DEFINE_float("prior_ar_nvar", PRIOR_AR_PROCESS_VAR, flags.DEFINE_float("prior_ar_nvar", PRIOR_AR_PROCESS_VAR,
"Initial noise variance for AR(1) priors.") "Initial noise variance for AR(1) priors.")
flags.DEFINE_boolean("do_train_prior_ar_atau", DO_TRAIN_PRIOR_AR_ATAU, flags.DEFINE_boolean("do_train_prior_ar_atau", DO_TRAIN_PRIOR_AR_ATAU,
"Is the value for atau an init, or the constant value?") "Is the value for atau an init, or the constant value?")
...@@ -257,13 +257,13 @@ flags.DEFINE_boolean("do_causal_controller", ...@@ -257,13 +257,13 @@ flags.DEFINE_boolean("do_causal_controller",
# Strictly speaking, feeding either the factors or the rates to the controller # Strictly speaking, feeding either the factors or the rates to the controller
# violates causality, since the g0 gets to see all the data. This may or may not # violates causality, since the g0 gets to see all the data. This may or may not
# be only a theoretical concern. # be only a theoretical concern.
flags.DEFINE_boolean("do_feed_factors_to_controller", flags.DEFINE_boolean("do_feed_factors_to_controller",
DO_FEED_FACTORS_TO_CONTROLLER, DO_FEED_FACTORS_TO_CONTROLLER,
"Should factors[t-1] be input to controller at time t?") "Should factors[t-1] be input to controller at time t?")
flags.DEFINE_string("feedback_factors_or_rates", FEEDBACK_FACTORS_OR_RATES, flags.DEFINE_string("feedback_factors_or_rates", FEEDBACK_FACTORS_OR_RATES,
"Feedback the factors or the rates to the controller? \ "Feedback the factors or the rates to the controller? \
Acceptable values: 'factors' or 'rates'.") Acceptable values: 'factors' or 'rates'.")
flags.DEFINE_integer("controller_input_lag", CONTROLLER_INPUT_LAG, flags.DEFINE_integer("controller_input_lag", CONTROLLER_INPUT_LAG,
"Time lag on the encoding to controller t-lag for \ "Time lag on the encoding to controller t-lag for \
forward, t+lag for reverse.") forward, t+lag for reverse.")
...@@ -320,10 +320,13 @@ flags.DEFINE_boolean("do_reset_learning_rate", DO_RESET_LEARNING_RATE, ...@@ -320,10 +320,13 @@ flags.DEFINE_boolean("do_reset_learning_rate", DO_RESET_LEARNING_RATE,
# for multi-session "stitching" models, the per-session readin matrices map from # for multi-session "stitching" models, the per-session readin matrices map from
# neurons to input factors which are fed into the shared encoder. These are initialized # neurons to input factors which are fed into the shared encoder. These are
# by alignment_matrix_cxf and alignment_bias_c in the input .h5 files. They can be fixed or # initialized by alignment_matrix_cxf and alignment_bias_c in the input .h5
# made trainable. # files. They can be fixed or made trainable.
flags.DEFINE_boolean("do_train_readin", DO_TRAIN_READIN, "Whether to train the readin matrices and bias vectors. False leaves them fixed at their initial values specified by the alignment matrices / vectors.") flags.DEFINE_boolean("do_train_readin", DO_TRAIN_READIN, "Whether to train the \
readin matrices and bias vectors. False leaves them fixed \
at their initial values specified by the alignment \
matrices and vectors.")
# OVERFITTING # OVERFITTING
...@@ -439,8 +442,9 @@ def build_model(hps, kind="train", datasets=None): ...@@ -439,8 +442,9 @@ def build_model(hps, kind="train", datasets=None):
"write_model_params"]: "write_model_params"]:
print("Possible error!!! You are running ", kind, " on a newly \ print("Possible error!!! You are running ", kind, " on a newly \
initialized model!") initialized model!")
# cant print ckpt.model_check_point path if no ckpt # cant print ckpt.model_check_point path if no ckpt
print("Are you sure you sure a checkpoint in ", hps.lfads_save_dir, " exists?") print("Are you sure you sure a checkpoint in ", hps.lfads_save_dir,
" exists?")
tf.global_variables_initializer().run() tf.global_variables_initializer().run()
...@@ -462,7 +466,7 @@ def jsonify_dict(d): ...@@ -462,7 +466,7 @@ def jsonify_dict(d):
Creates a shallow-copied dictionary first, then accomplishes string Creates a shallow-copied dictionary first, then accomplishes string
conversion. conversion.
Args: Args:
d: hyperparameter dictionary d: hyperparameter dictionary
Returns: hyperparameter dictionary with bool's as strings Returns: hyperparameter dictionary with bool's as strings
...@@ -787,4 +791,3 @@ def main(_): ...@@ -787,4 +791,3 @@ def main(_):
if __name__ == "__main__": if __name__ == "__main__":
tf.app.run() tf.app.run()
...@@ -91,7 +91,7 @@ def init_linear(in_size, out_size, do_bias=True, mat_init_value=None, ...@@ -91,7 +91,7 @@ def init_linear(in_size, out_size, do_bias=True, mat_init_value=None,
Args: Args:
in_size: The integer size of the non-batc input dimension. [(x),y] in_size: The integer size of the non-batc input dimension. [(x),y]
out_size: The integer size of non-batch output dimension. [x,(y)] out_size: The integer size of non-batch output dimension. [x,(y)]
do_bias (optional): Add a (learnable) bias vector to the operation, do_bias (optional): Add a (learnable) bias vector to the operation,
if false, b will be an appropriately sized, non-trainable vector if false, b will be an appropriately sized, non-trainable vector
mat_init_value (optional): numpy constant for matrix initialization, if None mat_init_value (optional): numpy constant for matrix initialization, if None
, do random, with additional parameters. , do random, with additional parameters.
...@@ -132,7 +132,8 @@ def init_linear(in_size, out_size, do_bias=True, mat_init_value=None, ...@@ -132,7 +132,8 @@ def init_linear(in_size, out_size, do_bias=True, mat_init_value=None,
if collections: if collections:
w_collections += collections w_collections += collections
if mat_init_value is not None: if mat_init_value is not None:
w = tf.Variable(mat_init_value, name=wname, collections=w_collections, trainable=trainable) w = tf.Variable(mat_init_value, name=wname, collections=w_collections,
trainable=trainable)
else: else:
w = tf.get_variable(wname, [in_size, out_size], initializer=mat_init, w = tf.get_variable(wname, [in_size, out_size], initializer=mat_init,
collections=w_collections, trainable=trainable) collections=w_collections, trainable=trainable)
...@@ -142,7 +143,8 @@ def init_linear(in_size, out_size, do_bias=True, mat_init_value=None, ...@@ -142,7 +143,8 @@ def init_linear(in_size, out_size, do_bias=True, mat_init_value=None,
if collections: if collections:
w_collections += collections w_collections += collections
if mat_init_value is not None: if mat_init_value is not None:
w = tf.Variable(mat_init_value, name=wname, collections=w_collections, trainable=trainable) w = tf.Variable(mat_init_value, name=wname, collections=w_collections,
trainable=trainable)
else: else:
w = tf.get_variable(wname, [in_size, out_size], initializer=mat_init, w = tf.get_variable(wname, [in_size, out_size], initializer=mat_init,
collections=w_collections, trainable=trainable) collections=w_collections, trainable=trainable)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment