Commit c1e4526b authored by Lawrence McAfee's avatar Lawrence McAfee
Browse files

renamed 'free' -> 'deallocate'

parent d10f81c5
...@@ -76,8 +76,8 @@ def get_forward_backward_func(): ...@@ -76,8 +76,8 @@ def get_forward_backward_func():
# ) # )
# # <<< # # <<<
# <<< # <<<
def free_output_tensor(out): def deallocate_output_tensor(out):
'''Pseudo-free (i.e., set to scalar) the output tensor's '.data' field. '''Pseudo-deallocate (i.e., set to scalar) the output tensor's '.data' field.
This method should be called right after the output tensor has been This method should be called right after the output tensor has been
sent to the next pipeline stage. At this point, the output tensor is sent to the next pipeline stage. At this point, the output tensor is
...@@ -96,7 +96,7 @@ def free_output_tensor(out): ...@@ -96,7 +96,7 @@ def free_output_tensor(out):
def custom_backward(output, grad_output): def custom_backward(output, grad_output):
'''Directly call C++ autograd engine. '''Directly call C++ autograd engine.
To make the 'free_output_tensor' (above) optimization work, the C++ To make the 'deallocate_output_tensor' (above) optimization work, the C++
autograd engine must be called directly, bypassing Pytorch's autograd engine must be called directly, bypassing Pytorch's
torch.autograd.backward. Pytorch's 'backward' checks that the output and torch.autograd.backward. Pytorch's 'backward' checks that the output and
grad have the same shape, while C++'s 'backward' does not. grad have the same shape, while C++'s 'backward' does not.
...@@ -428,7 +428,7 @@ def forward_backward_pipelining_with_interleaving(forward_step_func, data_iterat ...@@ -428,7 +428,7 @@ def forward_backward_pipelining_with_interleaving(forward_step_func, data_iterat
# >>> # >>>
pax({"output_tensor": output_tensor}) pax({"output_tensor": output_tensor})
# <<< # <<<
free_output_tensor(output_tensor) deallocate_output_tensor(output_tensor)
# Run 1F1B in steady state. # Run 1F1B in steady state.
for k in range(num_microbatches_remaining): for k in range(num_microbatches_remaining):
...@@ -492,7 +492,7 @@ def forward_backward_pipelining_with_interleaving(forward_step_func, data_iterat ...@@ -492,7 +492,7 @@ def forward_backward_pipelining_with_interleaving(forward_step_func, data_iterat
output_tensor, input_tensor_grad, output_tensor, input_tensor_grad,
recv_prev=recv_prev, recv_next=recv_next, recv_prev=recv_prev, recv_next=recv_next,
tensor_shape=tensor_shape, timers=timers) tensor_shape=tensor_shape, timers=timers)
free_output_tensor(output_tensor) deallocate_output_tensor(output_tensor)
# Put input_tensor and output_tensor_grad in data structures in the # Put input_tensor and output_tensor_grad in data structures in the
# right location. # right location.
...@@ -668,7 +668,7 @@ def forward_backward_pipelining_without_interleaving(forward_step_func, data_ite ...@@ -668,7 +668,7 @@ def forward_backward_pipelining_without_interleaving(forward_step_func, data_ite
if not forward_only: if not forward_only:
input_tensors.append(input_tensor) input_tensors.append(input_tensor)
output_tensors.append(output_tensor) output_tensors.append(output_tensor)
free_output_tensor(output_tensor[0]) deallocate_output_tensor(output_tensor[0])
# Before running 1F1B, need to receive first forward tensor. # Before running 1F1B, need to receive first forward tensor.
# If all microbatches are run in warmup / cooldown phase, then no need to # If all microbatches are run in warmup / cooldown phase, then no need to
...@@ -697,7 +697,7 @@ def forward_backward_pipelining_without_interleaving(forward_step_func, data_ite ...@@ -697,7 +697,7 @@ def forward_backward_pipelining_without_interleaving(forward_step_func, data_ite
# Add input_tensor and output_tensor to end of list. # Add input_tensor and output_tensor to end of list.
input_tensors.append(input_tensor) input_tensors.append(input_tensor)
output_tensors.append(output_tensor) output_tensors.append(output_tensor)
free_output_tensor(output_tensor[0]) deallocate_output_tensor(output_tensor[0])
# Pop input_tensor and output_tensor from the start of the list for # Pop input_tensor and output_tensor from the start of the list for
# the backward pass. # the backward pass.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment