"tests/python/common/function/test_basics.py" did not exist on "3e43d7b8d203df1f2e2e2f0b5c029dfebeec549b"
Commit fdcaeba0 authored by Deepak Narayanan's avatar Deepak Narayanan
Browse files

Remove timing labels that don't make sense

parent 7d367b1d
...@@ -363,7 +363,6 @@ def forward_and_backward_steps_with_communication(forward_step_func, data_iterat ...@@ -363,7 +363,6 @@ def forward_and_backward_steps_with_communication(forward_step_func, data_iterat
input_tensors, output_tensors, input_tensors, output_tensors,
losses_reduced, timers): losses_reduced, timers):
# Forward model for one step. # Forward model for one step.
timers('forward').start()
timers('forward-compute').start() timers('forward-compute').start()
output_tensor = forward_step_func(data_iterator, model, input_tensor) output_tensor = forward_step_func(data_iterator, model, input_tensor)
timers('forward-compute').stop() timers('forward-compute').stop()
...@@ -381,7 +380,6 @@ def forward_and_backward_steps_with_communication(forward_step_func, data_iterat ...@@ -381,7 +380,6 @@ def forward_and_backward_steps_with_communication(forward_step_func, data_iterat
recv_forward=False, recv_forward=False,
recv_backward=True) recv_backward=True)
timers('forward-send-backward-recv').stop() timers('forward-send-backward-recv').stop()
timers('forward').stop()
input_tensors.append(input_tensor) input_tensors.append(input_tensor)
output_tensors.append(output_tensor) output_tensors.append(output_tensor)
...@@ -390,7 +388,6 @@ def forward_and_backward_steps_with_communication(forward_step_func, data_iterat ...@@ -390,7 +388,6 @@ def forward_and_backward_steps_with_communication(forward_step_func, data_iterat
output_tensor = output_tensors.pop(0) output_tensor = output_tensors.pop(0)
# Backward pass for one step. # Backward pass for one step.
timers('backward').start()
timers('backward-compute').start() timers('backward-compute').start()
input_grad_tensor = \ input_grad_tensor = \
backward_step(optimizer, model, input_tensor, output_tensor, output_tensor_grad) backward_step(optimizer, model, input_tensor, output_tensor, output_tensor_grad)
...@@ -406,7 +403,6 @@ def forward_and_backward_steps_with_communication(forward_step_func, data_iterat ...@@ -406,7 +403,6 @@ def forward_and_backward_steps_with_communication(forward_step_func, data_iterat
timers('backward-send-forward-recv').stop() timers('backward-send-forward-recv').stop()
else: else:
input_tensor = None input_tensor = None
timers('backward').stop()
return input_tensor return input_tensor
...@@ -437,7 +433,6 @@ def train_step(forward_step_func, data_iterator, ...@@ -437,7 +433,6 @@ def train_step(forward_step_func, data_iterator,
losses_reduced = [] losses_reduced = []
# Run warmup forward passes. # Run warmup forward passes.
timers('forward').start()
for i in range(num_warmup_microbatches): for i in range(num_warmup_microbatches):
if args.pipeline_model_parallel_size > 1: if args.pipeline_model_parallel_size > 1:
forward_step_with_communication( forward_step_with_communication(
...@@ -453,7 +448,6 @@ def train_step(forward_step_func, data_iterator, ...@@ -453,7 +448,6 @@ def train_step(forward_step_func, data_iterator,
input_tensors.append(input_tensor) input_tensors.append(input_tensor)
output_tensors.append(output_tensor) output_tensors.append(output_tensor)
timers('forward-compute').stop() timers('forward-compute').stop()
timers('forward').stop()
# Before running 1F1B, need to receive first forward tensor. # Before running 1F1B, need to receive first forward tensor.
if (num_microbatches_in_minibatch - num_warmup_microbatches) > 0: if (num_microbatches_in_minibatch - num_warmup_microbatches) > 0:
...@@ -478,7 +472,6 @@ def train_step(forward_step_func, data_iterator, ...@@ -478,7 +472,6 @@ def train_step(forward_step_func, data_iterator,
losses_reduced, timers) losses_reduced, timers)
# Run cooldown backward passes. # Run cooldown backward passes.
timers('backward').start()
for i in range(num_warmup_microbatches): for i in range(num_warmup_microbatches):
if args.pipeline_model_parallel_size > 1: if args.pipeline_model_parallel_size > 1:
backward_step_with_communication( backward_step_with_communication(
...@@ -535,7 +528,6 @@ def train_step(forward_step_func, data_iterator, ...@@ -535,7 +528,6 @@ def train_step(forward_step_func, data_iterator,
else: else:
optimizer.clip_master_grads(args.clip_grad) optimizer.clip_master_grads(args.clip_grad)
timers('backward-clip-grad').stop() timers('backward-clip-grad').stop()
timers('backward').stop()
# Update parameters. # Update parameters.
timers('optimizer').start() timers('optimizer').start()
...@@ -593,12 +585,10 @@ def training_log(loss_dict, total_loss_dict, learning_rate, iteration, ...@@ -593,12 +585,10 @@ def training_log(loss_dict, total_loss_dict, learning_rate, iteration,
def add_to_logging(name): def add_to_logging(name):
if name in timers.timers: if name in timers.timers:
timers_to_log.append(name) timers_to_log.append(name)
add_to_logging('forward')
add_to_logging('forward-compute') add_to_logging('forward-compute')
add_to_logging('forward-recv') add_to_logging('forward-recv')
add_to_logging('forward-send') add_to_logging('forward-send')
add_to_logging('forward-send-backward-recv') add_to_logging('forward-send-backward-recv')
add_to_logging('backward')
add_to_logging('backward-compute') add_to_logging('backward-compute')
add_to_logging('backward-recv') add_to_logging('backward-recv')
add_to_logging('backward-send') add_to_logging('backward-send')
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment