Commit 250bac89 authored by LXYTSOS's avatar LXYTSOS Committed by Soumith Chintala
Browse files

utils.py in references can't work with pytorch-cpu (#1023)

* can't work with pytorch-cpu fixed

utils.py can't work with pytorch-cpu because of this line of code `memory=torch.cuda.max_memory_allocated()`

* can't work with pytorch-cpu fixed

utils.py can't work with pytorch-cpu because of this line of code 'memory=torch.cuda.max_memory_allocated()'
parent 0fb41c28
...@@ -115,15 +115,25 @@ class MetricLogger(object): ...@@ -115,15 +115,25 @@ class MetricLogger(object):
iter_time = SmoothedValue(fmt='{avg:.4f}') iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}') data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd' space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
log_msg = self.delimiter.join([ if torch.cuda.is_available():
header, log_msg = self.delimiter.join([
'[{0' + space_fmt + '}/{1}]', header,
'eta: {eta}', '[{0' + space_fmt + '}/{1}]',
'{meters}', 'eta: {eta}',
'time: {time}', '{meters}',
'data: {data}', 'time: {time}',
'max mem: {memory:.0f}' 'data: {data}',
]) 'max mem: {memory:.0f}'
])
else:
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
])
MB = 1024.0 * 1024.0 MB = 1024.0 * 1024.0
for obj in iterable: for obj in iterable:
data_time.update(time.time() - end) data_time.update(time.time() - end)
...@@ -132,11 +142,17 @@ class MetricLogger(object): ...@@ -132,11 +142,17 @@ class MetricLogger(object):
if i % print_freq == 0: if i % print_freq == 0:
eta_seconds = iter_time.global_avg * (len(iterable) - i) eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
print(log_msg.format( if torch.cuda.is_available():
i, len(iterable), eta=eta_string, print(log_msg.format(
meters=str(self), i, len(iterable), eta=eta_string,
time=str(iter_time), data=str(data_time), meters=str(self),
memory=torch.cuda.max_memory_allocated() / MB)) time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1 i += 1
end = time.time() end = time.time()
total_time = time.time() - start_time total_time = time.time() - start_time
......
...@@ -188,15 +188,25 @@ class MetricLogger(object): ...@@ -188,15 +188,25 @@ class MetricLogger(object):
iter_time = SmoothedValue(fmt='{avg:.4f}') iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}') data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd' space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
log_msg = self.delimiter.join([ if torch.cuda.is_available():
header, log_msg = self.delimiter.join([
'[{0' + space_fmt + '}/{1}]', header,
'eta: {eta}', '[{0' + space_fmt + '}/{1}]',
'{meters}', 'eta: {eta}',
'time: {time}', '{meters}',
'data: {data}', 'time: {time}',
'max mem: {memory:.0f}' 'data: {data}',
]) 'max mem: {memory:.0f}'
])
else:
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
])
MB = 1024.0 * 1024.0 MB = 1024.0 * 1024.0
for obj in iterable: for obj in iterable:
data_time.update(time.time() - end) data_time.update(time.time() - end)
...@@ -205,11 +215,17 @@ class MetricLogger(object): ...@@ -205,11 +215,17 @@ class MetricLogger(object):
if i % print_freq == 0 or i == len(iterable) - 1: if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i) eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
print(log_msg.format( if torch.cuda.is_available():
i, len(iterable), eta=eta_string, print(log_msg.format(
meters=str(self), i, len(iterable), eta=eta_string,
time=str(iter_time), data=str(data_time), meters=str(self),
memory=torch.cuda.max_memory_allocated() / MB)) time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1 i += 1
end = time.time() end = time.time()
total_time = time.time() - start_time total_time = time.time() - start_time
......
...@@ -161,15 +161,25 @@ class MetricLogger(object): ...@@ -161,15 +161,25 @@ class MetricLogger(object):
iter_time = SmoothedValue(fmt='{avg:.4f}') iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}') data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd' space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
log_msg = self.delimiter.join([ if torch.cuda.is_available():
header, log_msg = self.delimiter.join([
'[{0' + space_fmt + '}/{1}]', header,
'eta: {eta}', '[{0' + space_fmt + '}/{1}]',
'{meters}', 'eta: {eta}',
'time: {time}', '{meters}',
'data: {data}', 'time: {time}',
'max mem: {memory:.0f}' 'data: {data}',
]) 'max mem: {memory:.0f}'
])
else:
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
])
MB = 1024.0 * 1024.0 MB = 1024.0 * 1024.0
for obj in iterable: for obj in iterable:
data_time.update(time.time() - end) data_time.update(time.time() - end)
...@@ -178,11 +188,17 @@ class MetricLogger(object): ...@@ -178,11 +188,17 @@ class MetricLogger(object):
if i % print_freq == 0: if i % print_freq == 0:
eta_seconds = iter_time.global_avg * (len(iterable) - i) eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
print(log_msg.format( if torch.cuda.is_available():
i, len(iterable), eta=eta_string, print(log_msg.format(
meters=str(self), i, len(iterable), eta=eta_string,
time=str(iter_time), data=str(data_time), meters=str(self),
memory=torch.cuda.max_memory_allocated() / MB)) time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1 i += 1
end = time.time() end = time.time()
total_time = time.time() - start_time total_time = time.time() - start_time
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment