Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
apex
Commits
646fc0d0
"examples/git@developer.sourcefind.cn:OpenDAS/dgl.git" did not exist on "b8886900837e3fc73972215b7c5e9b3e127acbfc"
Commit
646fc0d0
authored
Jan 23, 2019
by
Michael Carilli
Browse files
commenting out print statements
parent
56ea6d78
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
5 additions
and
5 deletions
+5
-5
apex/amp/utils.py
apex/amp/utils.py
+1
-1
apex/parallel/distributed.py
apex/parallel/distributed.py
+4
-4
No files found.
apex/amp/utils.py
View file @
646fc0d0
...
@@ -82,7 +82,7 @@ def casted_args(cast_fn, args, kwargs):
...
@@ -82,7 +82,7 @@ def casted_args(cast_fn, args, kwargs):
return
new_args
return
new_args
def
cached_cast
(
cast_fn
,
x
,
cache
):
def
cached_cast
(
cast_fn
,
x
,
cache
):
print
(
"Calling cached_cast"
)
#
print("Calling cached_cast")
if
is_nested
(
x
):
if
is_nested
(
x
):
return
type
(
x
)([
cached_cast
(
y
)
for
y
in
x
])
return
type
(
x
)([
cached_cast
(
y
)
for
y
in
x
])
if
x
in
cache
:
if
x
in
cache
:
...
...
apex/parallel/distributed.py
View file @
646fc0d0
...
@@ -390,7 +390,7 @@ class DistributedDataParallel(Module):
...
@@ -390,7 +390,7 @@ class DistributedDataParallel(Module):
def
allreduce_fallback
(
self
):
def
allreduce_fallback
(
self
):
grads
=
[
param
.
grad
.
data
for
param
in
self
.
module
.
parameters
()
if
param
.
grad
is
not
None
]
grads
=
[
param
.
grad
.
data
for
param
in
self
.
module
.
parameters
()
if
param
.
grad
is
not
None
]
print
(
"In allreduce_fallback: {}"
.
format
(
len
(
grads
)))
#
print("In allreduce_fallback: {}".format(len(grads)))
split_buckets
=
split_half_float_double
(
grads
)
split_buckets
=
split_half_float_double
(
grads
)
...
@@ -416,7 +416,7 @@ class DistributedDataParallel(Module):
...
@@ -416,7 +416,7 @@ class DistributedDataParallel(Module):
self
.
buckets
[
bucket_idx
][
bucket_loc
]
=
param
.
grad
.
data
self
.
buckets
[
bucket_idx
][
bucket_loc
]
=
param
.
grad
.
data
self
.
buckets_ready_size
[
bucket_idx
]
+=
1
self
.
buckets_ready_size
[
bucket_idx
]
+=
1
print
(
self
.
buckets_ready_size
)
#
print(self.buckets_ready_size)
if
self
.
buckets_ready_size
[
bucket_idx
]
==
self
.
bucket_sizes
[
bucket_idx
]:
if
self
.
buckets_ready_size
[
bucket_idx
]
==
self
.
bucket_sizes
[
bucket_idx
]:
if
bucket_idx
==
self
.
next_bucket
:
if
bucket_idx
==
self
.
next_bucket
:
...
@@ -477,8 +477,8 @@ class DistributedDataParallel(Module):
...
@@ -477,8 +477,8 @@ class DistributedDataParallel(Module):
self
.
next_bucket
=
0
self
.
next_bucket
=
0
self
.
ready_buckets_not_reduced
=
set
()
self
.
ready_buckets_not_reduced
=
set
()
print
(
len
(
param_list
),
len
(
self
.
active_params
),
[
len
(
b
)
for
b
in
self
.
buckets
],
#
print(len(param_list), len(self.active_params), [len(b) for b in self.buckets],
self
.
needs_refresh
)
#
self.needs_refresh)
self
.
active_params
=
param_list
self
.
active_params
=
param_list
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment