"...git@developer.sourcefind.cn:renzhc/diffusers_dcu.git" did not exist on "922c5f5c3c2e887ac9832a9e460619005e0af8ae"
Commit 82ded7e7 authored by Zhaoheng Ni's avatar Zhaoheng Ni Committed by Facebook GitHub Bot
Browse files

Fix CI tests on gpu machines (#2982)

Summary:
XLS-R tests are supposed to be skipped on gpu machines, but they are forced to run in [_skipIf](https://github.com/pytorch/audio/blob/main/test/torchaudio_unittest/common_utils/case_utils.py#L143-L145) decorator. This PR skips the XLS-R tests if the machine is CI and CUDA is available.

Pull Request resolved: https://github.com/pytorch/audio/pull/2982

Reviewed By: xiaohui-zhang

Differential Revision: D42520292

Pulled By: nateanl

fbshipit-source-id: c6ee4d4a801245226c26d9cd13e039e8d910add2
parent 55575a53
...@@ -678,8 +678,9 @@ jobs: ...@@ -678,8 +678,9 @@ jobs:
name: Run tests name: Run tests
environment: environment:
TORCHAUDIO_TEST_ALLOW_SKIP_IF_ON_PYTHON_310: true TORCHAUDIO_TEST_ALLOW_SKIP_IF_ON_PYTHON_310: true
TORCHAUDIO_TEST_ALLOW_SKIP_IF_CUDA_SMALL_MEMORY: true
command: | command: |
docker run -t --gpus all -v $PWD:$PWD -w $PWD -e "CI=${CI}" -e TORCHAUDIO_TEST_ALLOW_SKIP_IF_ON_PYTHON_310 "${image_name}" .circleci/unittest/linux/scripts/run_test.sh docker run -t --gpus all -v $PWD:$PWD -w $PWD -e "CI=${CI}" -e TORCHAUDIO_TEST_ALLOW_SKIP_IF_ON_PYTHON_310 -e TORCHAUDIO_TEST_ALLOW_SKIP_IF_CUDA_SMALL_MEMORY "${image_name}" .circleci/unittest/linux/scripts/run_test.sh
- store_test_results: - store_test_results:
path: test-results path: test-results
- store_artifacts: - store_artifacts:
...@@ -760,6 +761,7 @@ jobs: ...@@ -760,6 +761,7 @@ jobs:
TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_SOX: true TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_SOX: true
TORCHAUDIO_TEST_ALLOW_SKIP_IF_ON_PYTHON_310: true TORCHAUDIO_TEST_ALLOW_SKIP_IF_ON_PYTHON_310: true
TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_MOD_sentencepiece: true TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_MOD_sentencepiece: true
TORCHAUDIO_TEST_ALLOW_SKIP_IF_CUDA_SMALL_MEMORY: true
- store_test_results: - store_test_results:
path: test-results path: test-results
- store_artifacts: - store_artifacts:
......
...@@ -678,8 +678,9 @@ jobs: ...@@ -678,8 +678,9 @@ jobs:
name: Run tests name: Run tests
environment: environment:
TORCHAUDIO_TEST_ALLOW_SKIP_IF_ON_PYTHON_310: true TORCHAUDIO_TEST_ALLOW_SKIP_IF_ON_PYTHON_310: true
TORCHAUDIO_TEST_ALLOW_SKIP_IF_CUDA_SMALL_MEMORY: true
command: | command: |
docker run -t --gpus all -v $PWD:$PWD -w $PWD -e "CI=${CI}" -e TORCHAUDIO_TEST_ALLOW_SKIP_IF_ON_PYTHON_310 "${image_name}" .circleci/unittest/linux/scripts/run_test.sh docker run -t --gpus all -v $PWD:$PWD -w $PWD -e "CI=${CI}" -e TORCHAUDIO_TEST_ALLOW_SKIP_IF_ON_PYTHON_310 -e TORCHAUDIO_TEST_ALLOW_SKIP_IF_CUDA_SMALL_MEMORY "${image_name}" .circleci/unittest/linux/scripts/run_test.sh
- store_test_results: - store_test_results:
path: test-results path: test-results
- store_artifacts: - store_artifacts:
...@@ -760,6 +761,7 @@ jobs: ...@@ -760,6 +761,7 @@ jobs:
TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_SOX: true TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_SOX: true
TORCHAUDIO_TEST_ALLOW_SKIP_IF_ON_PYTHON_310: true TORCHAUDIO_TEST_ALLOW_SKIP_IF_ON_PYTHON_310: true
TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_MOD_sentencepiece: true TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_MOD_sentencepiece: true
TORCHAUDIO_TEST_ALLOW_SKIP_IF_CUDA_SMALL_MEMORY: true
- store_test_results: - store_test_results:
path: test-results path: test-results
- store_artifacts: - store_artifacts:
......
...@@ -150,8 +150,8 @@ class TestFairseqIntegration(TorchaudioTestCase): ...@@ -150,8 +150,8 @@ class TestFairseqIntegration(TorchaudioTestCase):
for i, (ref, _) in enumerate(refs["layer_results"]): for i, (ref, _) in enumerate(refs["layer_results"]):
self.assertEqual(hyp[i], ref.transpose(0, 1)) self.assertEqual(hyp[i], ref.transpose(0, 1))
@skipIfCudaSmallMemory
@XLSR_PRETRAINING_CONFIGS @XLSR_PRETRAINING_CONFIGS
@skipIfCudaSmallMemory
def test_import_xlsr_pretraining_model(self, config, factory_func): def test_import_xlsr_pretraining_model(self, config, factory_func):
"""XLS-R pretraining models from fairseq can be imported and yields the same results""" """XLS-R pretraining models from fairseq can be imported and yields the same results"""
batch_size, num_frames = 3, 1024 batch_size, num_frames = 3, 1024
...@@ -222,8 +222,8 @@ class TestFairseqIntegration(TorchaudioTestCase): ...@@ -222,8 +222,8 @@ class TestFairseqIntegration(TorchaudioTestCase):
def test_wav2vec2_recreate_pretraining_model(self, config, factory_func): def test_wav2vec2_recreate_pretraining_model(self, config, factory_func):
self._test_recreate_pretraining_model(config, factory_func) self._test_recreate_pretraining_model(config, factory_func)
@skipIfCudaSmallMemory
@XLSR_PRETRAINING_CONFIGS @XLSR_PRETRAINING_CONFIGS
@skipIfCudaSmallMemory
def test_xlsr_recreate_pretraining_model(self, config, factory_func): def test_xlsr_recreate_pretraining_model(self, config, factory_func):
self._test_recreate_pretraining_model(config, factory_func) self._test_recreate_pretraining_model(config, factory_func)
......
...@@ -182,8 +182,8 @@ class TestHFIntegration(TorchaudioTestCase): ...@@ -182,8 +182,8 @@ class TestHFIntegration(TorchaudioTestCase):
imported = import_huggingface_model(original).eval() imported = import_huggingface_model(original).eval()
self._test_import_pretrain(original, imported, config) self._test_import_pretrain(original, imported, config)
@skipIfCudaSmallMemory
@XLSR_PRETRAIN_CONFIGS @XLSR_PRETRAIN_CONFIGS
@skipIfCudaSmallMemory
def test_import_xlsr_pretrain(self, config, _): def test_import_xlsr_pretrain(self, config, _):
"""XLS-R models from HF transformers can be imported and yields the same results""" """XLS-R models from HF transformers can be imported and yields the same results"""
original = self._get_model(config).eval() original = self._get_model(config).eval()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment