Commit a0ae9e91 authored by Michael Carilli's avatar Michael Carilli
Browse files

Removing some print statements

parent c7dcb0e1
...@@ -390,8 +390,6 @@ class DistributedDataParallel(Module): ...@@ -390,8 +390,6 @@ class DistributedDataParallel(Module):
def allreduce_fallback(self): def allreduce_fallback(self):
grads = [param.grad.data for param in self.module.parameters() if param.grad is not None] grads = [param.grad.data for param in self.module.parameters() if param.grad is not None]
# print("In allreduce_fallback: {}".format(len(grads)))
split_buckets = split_half_float_double(grads) split_buckets = split_half_float_double(grads)
# If retain_allreduce_buffers is True and delay_allreduce is False, # If retain_allreduce_buffers is True and delay_allreduce is False,
...@@ -416,7 +414,6 @@ class DistributedDataParallel(Module): ...@@ -416,7 +414,6 @@ class DistributedDataParallel(Module):
self.buckets[bucket_idx][bucket_loc] = param.grad.data self.buckets[bucket_idx][bucket_loc] = param.grad.data
self.buckets_ready_size[bucket_idx] += 1 self.buckets_ready_size[bucket_idx] += 1
# print(self.buckets_ready_size)
if self.buckets_ready_size[bucket_idx] == self.bucket_sizes[bucket_idx]: if self.buckets_ready_size[bucket_idx] == self.bucket_sizes[bucket_idx]:
if bucket_idx == self.next_bucket: if bucket_idx == self.next_bucket:
...@@ -477,8 +474,6 @@ class DistributedDataParallel(Module): ...@@ -477,8 +474,6 @@ class DistributedDataParallel(Module):
self.next_bucket = 0 self.next_bucket = 0
self.ready_buckets_not_reduced = set() self.ready_buckets_not_reduced = set()
# print(len(param_list), len(self.active_params), [len(b) for b in self.buckets],
# self.needs_refresh)
self.active_params = param_list self.active_params = param_list
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment