Unverified Commit 52e40cb8 authored by Harsh Bardhan Mishra's avatar Harsh Bardhan Mishra Committed by GitHub
Browse files

Fix Lazy Logging (#3108)

parent 07dd4c54
......@@ -218,8 +218,7 @@ def run_epoch(batches, answer_net, is_training):
loss, _, = sess.run(
[answer_net.loss, answer_net.train_op], feed_dict=feed_dict)
if count % 100 == 0:
logger.debug('%d %g except:%g, loss:%g' %
(count, used, used / count * len(batches), loss))
logger.debug('%d %g except:%g, loss:%g', count, used, used / count * len(batches), loss)
loss_sum += loss
else:
feed_dict = {answer_net.query_word: query,
......@@ -239,8 +238,7 @@ def run_epoch(batches, answer_net, is_training):
contexts += context
ids = np.concatenate((ids, sample_id))
if count % 100 == 0:
logger.debug('%d %g except:%g' %
(count, used, used / count * len(batches)))
logger.debug('%d %g except:%g', count, used, used / count * len(batches))
loss = loss_sum / len(batches)
if is_training:
return loss
......@@ -327,7 +325,7 @@ def train_with_graph(graph, qp_pairs, dev_qp_pairs):
train_batches = data.get_batches(qp_pairs, cfg.batch_size)
train_loss = run_epoch(train_batches, train_model, True)
logger.debug('epoch ' + str(epoch) +
' loss: ' + str(train_loss))
' loss: ', str(train_loss))
dev_batches = list(data.get_batches(
dev_qp_pairs, cfg.batch_size))
_, position1, position2, ids, contexts = run_epoch(
......@@ -361,8 +359,7 @@ def train_with_graph(graph, qp_pairs, dev_qp_pairs):
with open(os.path.join(save_path, 'epoch%d.score' % epoch), 'wb') as file:
pickle.dump(
(position1, position2, ids, contexts), file)
logger.debug('epoch %d acc %g bestacc %g' %
(epoch, acc, bestacc))
logger.debug('epoch %d acc %g bestacc %g', epoch, acc, bestacc)
if patience <= iter:
break
logger.debug('save done.')
......
......@@ -112,7 +112,7 @@ def main(args):
if os.path.isfile(load_checkpoint_path):
model_state_dict = load_checkpoint(load_checkpoint_path)
logger.info("test : " + load_checkpoint_path)
logger.info("test : ", load_checkpoint_path)
logger.info(type(model_state_dict))
model.load_state_dict(model_state_dict)
......
......@@ -63,7 +63,7 @@ def run(X_train, X_test, y_train, y_test, model):
'''Train model and predict result'''
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
LOG.debug('score: %s' % score)
LOG.debug('score: %s', score)
nni.report_final_result(score)
if __name__ == '__main__':
......
......@@ -74,7 +74,7 @@ def run(X_train, X_test, y_train, y_test, model):
model.fit(X_train, y_train)
predict_y = model.predict(X_test)
score = r2_score(y_test, predict_y)
LOG.debug('r2 score: %s' % score)
LOG.debug('r2 score: %s', score)
nni.report_final_result(score)
if __name__ == '__main__':
......
......@@ -387,8 +387,7 @@ class OpEvo(Tuner):
self.population = Population(search_space,
self.mutate_rate,
self.optimize_mode)
self.logger.debug('Total search space volume: '
+ str(self.population.volume))
self.logger.debug('Total search space volume: ', str(self.population.volume))
if not self.serve_list:
self.serve_list = self.population.get_offspring(
......
......@@ -219,8 +219,7 @@ def run_epoch(batches, answer_net, is_training):
loss, _, = sess.run(
[answer_net.loss, answer_net.train_op], feed_dict=feed_dict)
if count % 100 == 0:
logger.debug('%d %g except:%g, loss:%g' %
(count, used, used / count * len(batches), loss))
logger.debug('%d %g except:%g, loss:%g', count, used, used / count * len(batches), loss)
loss_sum += loss
else:
feed_dict = {answer_net.query_word: query,
......@@ -240,8 +239,7 @@ def run_epoch(batches, answer_net, is_training):
contexts += context
ids = np.concatenate((ids, sample_id))
if count % 100 == 0:
logger.debug('%d %g except:%g' %
(count, used, used / count * len(batches)))
logger.debug('%d %g except:%g', count, used, used / count * len(batches))
loss = loss_sum / len(batches)
if is_training:
return loss
......@@ -333,7 +331,7 @@ def train_with_graph(p_graph, qp_pairs, dev_qp_pairs):
train_batches = data.get_batches(qp_pairs, cfg.batch_size)
train_loss = run_epoch(train_batches, train_model, True)
logger.debug('epoch ' + str(epoch) +
' loss: ' + str(train_loss))
' loss: ', str(train_loss))
dev_batches = list(data.get_batches(
dev_qp_pairs, cfg.batch_size))
_, position1, position2, ids, contexts = run_epoch(
......@@ -369,8 +367,7 @@ def train_with_graph(p_graph, qp_pairs, dev_qp_pairs):
with open(os.path.join(save_path, 'epoch%d.score' % epoch), 'wb') as file:
pickle.dump(
(position1, position2, ids, contexts), file)
logger.debug('epoch %d acc %g bestacc %g' %
(epoch, acc, bestacc))
logger.debug('epoch %d acc %g bestacc %g', epoch, acc, bestacc)
if patience <= iter:
break
logger.debug('save done.')
......
......@@ -35,8 +35,7 @@ class SimpleTuner(Tuner):
'checksum': None,
'path': '',
}
_logger.info('generate parameter for father trial %s' %
parameter_id)
_logger.info('generate parameter for father trial %s', parameter_id)
self.thread_lock.release()
return {
'prev_id': 0,
......
......@@ -18,7 +18,7 @@ class NaiveAssessor(Assessor):
_logger.info('init')
def assess_trial(self, trial_job_id, trial_history):
_logger.info('assess trial %s %s' % (trial_job_id, trial_history))
_logger.info('assess trial %s %s', trial_job_id, trial_history)
id_ = trial_history[0]
if id_ in self._killed:
......
......@@ -21,17 +21,17 @@ class NaiveTuner(Tuner):
def generate_parameters(self, parameter_id, **kwargs):
self.cur += 1
_logger.info('generate parameters: %s' % self.cur)
_logger.info('generate parameters: %s', self.cur)
return { 'x': self.cur }
def receive_trial_result(self, parameter_id, parameters, value, **kwargs):
reward = extract_scalar_reward(value)
_logger.info('receive trial result: %s, %s, %s' % (parameter_id, parameters, reward))
_logger.info('receive trial result: %s, %s, %s', parameter_id, parameters, reward)
_result.write('%d %d\n' % (parameters['x'], reward))
_result.flush()
def update_search_space(self, search_space):
_logger.info('update_search_space: %s' % search_space)
_logger.info('update_search_space: %s', search_space)
with open(os.path.join(_pwd, 'tuner_search_space.json'), 'w') as file_:
json.dump(search_space, file_)
......
......@@ -38,8 +38,7 @@ class MnistNetwork(object):
input_dim = int(math.sqrt(self.x_dim))
except:
logger.debug(
'input dim cannot be sqrt and reshape. input dim: ' +
str(self.x_dim))
'input dim cannot be sqrt and reshape. input dim: ', str(self.x_dim))
raise
x_image = tf.reshape(self.x, [-1, input_dim, input_dim, 1])
with tf.name_scope('conv1'):
......@@ -132,7 +131,7 @@ def main():
mnist_network.build_network()
logger.debug('Mnist build network done.')
graph_location = tempfile.mkdtemp()
logger.debug('Saving graph to: %s' % graph_location)
logger.debug('Saving graph to: %s', graph_location)
train_writer = tf.summary.FileWriter(graph_location)
train_writer.add_graph(tf.get_default_graph())
test_acc = 0.0
......
......@@ -53,7 +53,7 @@ class MnistNetwork(object):
input_dim = int(math.sqrt(self.x_dim))
except:
#print('input dim cannot be sqrt and reshape. input dim: ' + str(self.x_dim))
logger.debug('input dim cannot be sqrt and reshape. input dim: ' + str(self.x_dim))
logger.debug('input dim cannot be sqrt and reshape. input dim: ', str(self.x_dim))
raise
x_image = tf.reshape(self.x, [-1, input_dim, input_dim, 1])
......@@ -147,7 +147,7 @@ def main():
# Write log
graph_location = tempfile.mkdtemp()
logger.debug('Saving graph to: %s' % graph_location)
logger.debug('Saving graph to: %s', graph_location)
# print('Saving graph to: %s' % graph_location)
train_writer = tf.summary.FileWriter(graph_location)
train_writer.add_graph(tf.get_default_graph())
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment