Unverified Commit 52e40cb8 authored by Harsh Bardhan Mishra's avatar Harsh Bardhan Mishra Committed by GitHub
Browse files

Fix Lazy Logging (#3108)

parent 07dd4c54
...@@ -218,8 +218,7 @@ def run_epoch(batches, answer_net, is_training): ...@@ -218,8 +218,7 @@ def run_epoch(batches, answer_net, is_training):
loss, _, = sess.run( loss, _, = sess.run(
[answer_net.loss, answer_net.train_op], feed_dict=feed_dict) [answer_net.loss, answer_net.train_op], feed_dict=feed_dict)
if count % 100 == 0: if count % 100 == 0:
logger.debug('%d %g except:%g, loss:%g' % logger.debug('%d %g except:%g, loss:%g', count, used, used / count * len(batches), loss)
(count, used, used / count * len(batches), loss))
loss_sum += loss loss_sum += loss
else: else:
feed_dict = {answer_net.query_word: query, feed_dict = {answer_net.query_word: query,
...@@ -239,8 +238,7 @@ def run_epoch(batches, answer_net, is_training): ...@@ -239,8 +238,7 @@ def run_epoch(batches, answer_net, is_training):
contexts += context contexts += context
ids = np.concatenate((ids, sample_id)) ids = np.concatenate((ids, sample_id))
if count % 100 == 0: if count % 100 == 0:
logger.debug('%d %g except:%g' % logger.debug('%d %g except:%g', count, used, used / count * len(batches))
(count, used, used / count * len(batches)))
loss = loss_sum / len(batches) loss = loss_sum / len(batches)
if is_training: if is_training:
return loss return loss
...@@ -327,7 +325,7 @@ def train_with_graph(graph, qp_pairs, dev_qp_pairs): ...@@ -327,7 +325,7 @@ def train_with_graph(graph, qp_pairs, dev_qp_pairs):
train_batches = data.get_batches(qp_pairs, cfg.batch_size) train_batches = data.get_batches(qp_pairs, cfg.batch_size)
train_loss = run_epoch(train_batches, train_model, True) train_loss = run_epoch(train_batches, train_model, True)
logger.debug('epoch ' + str(epoch) + logger.debug('epoch ' + str(epoch) +
' loss: ' + str(train_loss)) ' loss: ', str(train_loss))
dev_batches = list(data.get_batches( dev_batches = list(data.get_batches(
dev_qp_pairs, cfg.batch_size)) dev_qp_pairs, cfg.batch_size))
_, position1, position2, ids, contexts = run_epoch( _, position1, position2, ids, contexts = run_epoch(
...@@ -361,8 +359,7 @@ def train_with_graph(graph, qp_pairs, dev_qp_pairs): ...@@ -361,8 +359,7 @@ def train_with_graph(graph, qp_pairs, dev_qp_pairs):
with open(os.path.join(save_path, 'epoch%d.score' % epoch), 'wb') as file: with open(os.path.join(save_path, 'epoch%d.score' % epoch), 'wb') as file:
pickle.dump( pickle.dump(
(position1, position2, ids, contexts), file) (position1, position2, ids, contexts), file)
logger.debug('epoch %d acc %g bestacc %g' % logger.debug('epoch %d acc %g bestacc %g', epoch, acc, bestacc)
(epoch, acc, bestacc))
if patience <= iter: if patience <= iter:
break break
logger.debug('save done.') logger.debug('save done.')
......
...@@ -112,7 +112,7 @@ def main(args): ...@@ -112,7 +112,7 @@ def main(args):
if os.path.isfile(load_checkpoint_path): if os.path.isfile(load_checkpoint_path):
model_state_dict = load_checkpoint(load_checkpoint_path) model_state_dict = load_checkpoint(load_checkpoint_path)
logger.info("test : " + load_checkpoint_path) logger.info("test : ", load_checkpoint_path)
logger.info(type(model_state_dict)) logger.info(type(model_state_dict))
model.load_state_dict(model_state_dict) model.load_state_dict(model_state_dict)
......
...@@ -63,7 +63,7 @@ def run(X_train, X_test, y_train, y_test, model): ...@@ -63,7 +63,7 @@ def run(X_train, X_test, y_train, y_test, model):
'''Train model and predict result''' '''Train model and predict result'''
model.fit(X_train, y_train) model.fit(X_train, y_train)
score = model.score(X_test, y_test) score = model.score(X_test, y_test)
LOG.debug('score: %s' % score) LOG.debug('score: %s', score)
nni.report_final_result(score) nni.report_final_result(score)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -74,7 +74,7 @@ def run(X_train, X_test, y_train, y_test, model): ...@@ -74,7 +74,7 @@ def run(X_train, X_test, y_train, y_test, model):
model.fit(X_train, y_train) model.fit(X_train, y_train)
predict_y = model.predict(X_test) predict_y = model.predict(X_test)
score = r2_score(y_test, predict_y) score = r2_score(y_test, predict_y)
LOG.debug('r2 score: %s' % score) LOG.debug('r2 score: %s', score)
nni.report_final_result(score) nni.report_final_result(score)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -387,8 +387,7 @@ class OpEvo(Tuner): ...@@ -387,8 +387,7 @@ class OpEvo(Tuner):
self.population = Population(search_space, self.population = Population(search_space,
self.mutate_rate, self.mutate_rate,
self.optimize_mode) self.optimize_mode)
self.logger.debug('Total search space volume: ' self.logger.debug('Total search space volume: ', str(self.population.volume))
+ str(self.population.volume))
if not self.serve_list: if not self.serve_list:
self.serve_list = self.population.get_offspring( self.serve_list = self.population.get_offspring(
......
...@@ -219,8 +219,7 @@ def run_epoch(batches, answer_net, is_training): ...@@ -219,8 +219,7 @@ def run_epoch(batches, answer_net, is_training):
loss, _, = sess.run( loss, _, = sess.run(
[answer_net.loss, answer_net.train_op], feed_dict=feed_dict) [answer_net.loss, answer_net.train_op], feed_dict=feed_dict)
if count % 100 == 0: if count % 100 == 0:
logger.debug('%d %g except:%g, loss:%g' % logger.debug('%d %g except:%g, loss:%g', count, used, used / count * len(batches), loss)
(count, used, used / count * len(batches), loss))
loss_sum += loss loss_sum += loss
else: else:
feed_dict = {answer_net.query_word: query, feed_dict = {answer_net.query_word: query,
...@@ -240,8 +239,7 @@ def run_epoch(batches, answer_net, is_training): ...@@ -240,8 +239,7 @@ def run_epoch(batches, answer_net, is_training):
contexts += context contexts += context
ids = np.concatenate((ids, sample_id)) ids = np.concatenate((ids, sample_id))
if count % 100 == 0: if count % 100 == 0:
logger.debug('%d %g except:%g' % logger.debug('%d %g except:%g', count, used, used / count * len(batches))
(count, used, used / count * len(batches)))
loss = loss_sum / len(batches) loss = loss_sum / len(batches)
if is_training: if is_training:
return loss return loss
...@@ -333,7 +331,7 @@ def train_with_graph(p_graph, qp_pairs, dev_qp_pairs): ...@@ -333,7 +331,7 @@ def train_with_graph(p_graph, qp_pairs, dev_qp_pairs):
train_batches = data.get_batches(qp_pairs, cfg.batch_size) train_batches = data.get_batches(qp_pairs, cfg.batch_size)
train_loss = run_epoch(train_batches, train_model, True) train_loss = run_epoch(train_batches, train_model, True)
logger.debug('epoch ' + str(epoch) + logger.debug('epoch ' + str(epoch) +
' loss: ' + str(train_loss)) ' loss: ', str(train_loss))
dev_batches = list(data.get_batches( dev_batches = list(data.get_batches(
dev_qp_pairs, cfg.batch_size)) dev_qp_pairs, cfg.batch_size))
_, position1, position2, ids, contexts = run_epoch( _, position1, position2, ids, contexts = run_epoch(
...@@ -369,8 +367,7 @@ def train_with_graph(p_graph, qp_pairs, dev_qp_pairs): ...@@ -369,8 +367,7 @@ def train_with_graph(p_graph, qp_pairs, dev_qp_pairs):
with open(os.path.join(save_path, 'epoch%d.score' % epoch), 'wb') as file: with open(os.path.join(save_path, 'epoch%d.score' % epoch), 'wb') as file:
pickle.dump( pickle.dump(
(position1, position2, ids, contexts), file) (position1, position2, ids, contexts), file)
logger.debug('epoch %d acc %g bestacc %g' % logger.debug('epoch %d acc %g bestacc %g', epoch, acc, bestacc)
(epoch, acc, bestacc))
if patience <= iter: if patience <= iter:
break break
logger.debug('save done.') logger.debug('save done.')
......
...@@ -35,8 +35,7 @@ class SimpleTuner(Tuner): ...@@ -35,8 +35,7 @@ class SimpleTuner(Tuner):
'checksum': None, 'checksum': None,
'path': '', 'path': '',
} }
_logger.info('generate parameter for father trial %s' % _logger.info('generate parameter for father trial %s', parameter_id)
parameter_id)
self.thread_lock.release() self.thread_lock.release()
return { return {
'prev_id': 0, 'prev_id': 0,
......
...@@ -18,7 +18,7 @@ class NaiveAssessor(Assessor): ...@@ -18,7 +18,7 @@ class NaiveAssessor(Assessor):
_logger.info('init') _logger.info('init')
def assess_trial(self, trial_job_id, trial_history): def assess_trial(self, trial_job_id, trial_history):
_logger.info('assess trial %s %s' % (trial_job_id, trial_history)) _logger.info('assess trial %s %s', trial_job_id, trial_history)
id_ = trial_history[0] id_ = trial_history[0]
if id_ in self._killed: if id_ in self._killed:
......
...@@ -21,17 +21,17 @@ class NaiveTuner(Tuner): ...@@ -21,17 +21,17 @@ class NaiveTuner(Tuner):
def generate_parameters(self, parameter_id, **kwargs): def generate_parameters(self, parameter_id, **kwargs):
self.cur += 1 self.cur += 1
_logger.info('generate parameters: %s' % self.cur) _logger.info('generate parameters: %s', self.cur)
return { 'x': self.cur } return { 'x': self.cur }
def receive_trial_result(self, parameter_id, parameters, value, **kwargs): def receive_trial_result(self, parameter_id, parameters, value, **kwargs):
reward = extract_scalar_reward(value) reward = extract_scalar_reward(value)
_logger.info('receive trial result: %s, %s, %s' % (parameter_id, parameters, reward)) _logger.info('receive trial result: %s, %s, %s', parameter_id, parameters, reward)
_result.write('%d %d\n' % (parameters['x'], reward)) _result.write('%d %d\n' % (parameters['x'], reward))
_result.flush() _result.flush()
def update_search_space(self, search_space): def update_search_space(self, search_space):
_logger.info('update_search_space: %s' % search_space) _logger.info('update_search_space: %s', search_space)
with open(os.path.join(_pwd, 'tuner_search_space.json'), 'w') as file_: with open(os.path.join(_pwd, 'tuner_search_space.json'), 'w') as file_:
json.dump(search_space, file_) json.dump(search_space, file_)
......
...@@ -38,8 +38,7 @@ class MnistNetwork(object): ...@@ -38,8 +38,7 @@ class MnistNetwork(object):
input_dim = int(math.sqrt(self.x_dim)) input_dim = int(math.sqrt(self.x_dim))
except: except:
logger.debug( logger.debug(
'input dim cannot be sqrt and reshape. input dim: ' + 'input dim cannot be sqrt and reshape. input dim: ', str(self.x_dim))
str(self.x_dim))
raise raise
x_image = tf.reshape(self.x, [-1, input_dim, input_dim, 1]) x_image = tf.reshape(self.x, [-1, input_dim, input_dim, 1])
with tf.name_scope('conv1'): with tf.name_scope('conv1'):
...@@ -132,7 +131,7 @@ def main(): ...@@ -132,7 +131,7 @@ def main():
mnist_network.build_network() mnist_network.build_network()
logger.debug('Mnist build network done.') logger.debug('Mnist build network done.')
graph_location = tempfile.mkdtemp() graph_location = tempfile.mkdtemp()
logger.debug('Saving graph to: %s' % graph_location) logger.debug('Saving graph to: %s', graph_location)
train_writer = tf.summary.FileWriter(graph_location) train_writer = tf.summary.FileWriter(graph_location)
train_writer.add_graph(tf.get_default_graph()) train_writer.add_graph(tf.get_default_graph())
test_acc = 0.0 test_acc = 0.0
......
...@@ -53,7 +53,7 @@ class MnistNetwork(object): ...@@ -53,7 +53,7 @@ class MnistNetwork(object):
input_dim = int(math.sqrt(self.x_dim)) input_dim = int(math.sqrt(self.x_dim))
except: except:
#print('input dim cannot be sqrt and reshape. input dim: ' + str(self.x_dim)) #print('input dim cannot be sqrt and reshape. input dim: ' + str(self.x_dim))
logger.debug('input dim cannot be sqrt and reshape. input dim: ' + str(self.x_dim)) logger.debug('input dim cannot be sqrt and reshape. input dim: ', str(self.x_dim))
raise raise
x_image = tf.reshape(self.x, [-1, input_dim, input_dim, 1]) x_image = tf.reshape(self.x, [-1, input_dim, input_dim, 1])
...@@ -147,7 +147,7 @@ def main(): ...@@ -147,7 +147,7 @@ def main():
# Write log # Write log
graph_location = tempfile.mkdtemp() graph_location = tempfile.mkdtemp()
logger.debug('Saving graph to: %s' % graph_location) logger.debug('Saving graph to: %s', graph_location)
# print('Saving graph to: %s' % graph_location) # print('Saving graph to: %s' % graph_location)
train_writer = tf.summary.FileWriter(graph_location) train_writer = tf.summary.FileWriter(graph_location)
train_writer.add_graph(tf.get_default_graph()) train_writer.add_graph(tf.get_default_graph())
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment