Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
fairscale
Commits
f81a60be
Unverified
Commit
f81a60be
authored
Aug 08, 2022
by
Crutcher Dunnavant
Committed by
GitHub
Aug 08, 2022
Browse files
Disable broken tests (#1055)
parent
5c60f33c
Changes
5
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
18 additions
and
2 deletions
+18
-2
tests/experimental/nn/test_ssd_offload.py
tests/experimental/nn/test_ssd_offload.py
+3
-0
tests/experimental/nn/test_sync_batchnorm.py
tests/experimental/nn/test_sync_batchnorm.py
+12
-2
tests/nn/data_parallel/test_fsdp_memory.py
tests/nn/data_parallel/test_fsdp_memory.py
+1
-0
tests/nn/data_parallel/test_fsdp_regnet.py
tests/nn/data_parallel/test_fsdp_regnet.py
+1
-0
tests/optim/test_oss.py
tests/optim/test_oss.py
+1
-0
No files found.
tests/experimental/nn/test_ssd_offload.py
View file @
f81a60be
...
...
@@ -79,6 +79,7 @@ def test_ssd_handle_dispatch_bwd():
assert
torch
.
equal
(
ssd_handle
.
grad
,
orig_copy
.
grad
)
@
pytest
.
mark
.
skip
(
"broken at head"
)
def
test_ssd_handle_dispatch_bwd_hook
():
_init
()
...
...
@@ -277,6 +278,7 @@ def test_ssd_flat_parameter_view_modify():
assert
ssd_flat_param
.
storage_state
==
so
.
StorageState
.
ON_CPU_DIRTY
@
pytest
.
mark
.
skip
(
"broken at head"
)
def
test_ssd_flat_parameter_view_bwd
():
_init
()
...
...
@@ -344,6 +346,7 @@ def test_ssd_flat_parameter_view_bwd():
assert
"one"
in
hooks_called
@
pytest
.
mark
.
skip
(
"broken at head"
)
def
test_ssd_flat_parameter_view_bwd_parameterization
():
_init
()
...
...
tests/experimental/nn/test_sync_batchnorm.py
View file @
f81a60be
...
...
@@ -97,7 +97,12 @@ def parity3d_bn():
check_parity
(
torch_bn
,
fs_bn
,
x
)
@
pg_test
()
@
pytest
.
mark
.
skip
(
"broken at head"
)
def
test_parity3d_checkpoint_syncbn
():
assert
1
==
2
# @pg_test()
def
parity3d_checkpoint_syncbn
():
rank
=
dist
.
get_rank
()
torch
.
cuda
.
set_device
(
rank
)
...
...
@@ -110,7 +115,12 @@ def parity3d_checkpoint_syncbn():
check_parity_ddp
(
torch_bn
,
fs_bn
,
x
)
@
pg_test
()
@
pytest
.
mark
.
skip
(
"broken at head"
)
def
test_parity3d_checkpoint_syncbn_twice
():
assert
1
==
2
# @pg_test()
def
parity3d_checkpoint_syncbn_twice
():
rank
=
dist
.
get_rank
()
torch
.
cuda
.
set_device
(
rank
)
...
...
tests/nn/data_parallel/test_fsdp_memory.py
View file @
f81a60be
...
...
@@ -159,6 +159,7 @@ def _distributed_worker(
@
skip_if_single_gpu
@
pytest
.
mark
.
timeout
(
120
)
@
pytest
.
mark
.
parametrize
(
"ckpt"
,
[
"no_ckpt"
,
"ckpt"
])
@
pytest
.
mark
.
parametrize
(
"fsdp"
,
[
"ddp"
,
"fsdp"
,
"fsdp_amp_default"
,
"fsdp_amp_compute_dtype32"
])
def
test_fsdp_memory
(
fsdp
,
ckpt
):
...
...
tests/nn/data_parallel/test_fsdp_regnet.py
View file @
f81a60be
...
...
@@ -349,6 +349,7 @@ def _distributed_worker(
# We use strings for precision and flatten params instead of bool to
# make the pytest output more readable.
@
pytest
.
mark
.
skip
(
"broken at head"
)
@
skip_if_single_gpu
@
pytest
.
mark
.
parametrize
(
"precision"
,
[
"full"
,
"mixed"
])
@
pytest
.
mark
.
parametrize
(
"flatten"
,
[
"flatten"
,
"no_flatten"
])
...
...
tests/optim/test_oss.py
View file @
f81a60be
...
...
@@ -958,6 +958,7 @@ def run_ddp_parity(rank, world_size, backend, temp_file_name, change_train_graph
dist
.
destroy_process_group
()
@
pytest
.
mark
.
skip
(
"broken at head"
)
@
skip_if_no_cuda
@
skip_if_single_gpu
@
pytest
.
mark
.
parametrize
(
"change_train_graph"
,
[
True
,
False
])
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment