Unverified Commit 28cf8730 authored by Stas Bekman's avatar Stas Bekman Committed by GitHub
Browse files

[testing] skip decorators: docs, tests, bugs (#7334)

* skip decorators: docs, tests, bugs

* another important note

* style

* bloody style

* add @pytest.mark.parametrize

* add note

* no idea what it wants :(
parent df536438
......@@ -412,6 +412,16 @@ And we have these decorators that require the condition described by the marker.
@require_torch_and_cuda
```
Some decorators like ``@parametrized`` rewrite test names, therefore ``@require_*`` skip decorators have to be listed last for them to work correctly. Here is an example of the correct usage:
.. code-block:: python
@parameterized.expand(...)
@require_multigpu
def test_integration_foo():
There is no problem whatsoever with ``@pytest.mark.parametrize`` (but it only works with non-unittests) - can use it in any order.
This section will be expanded soon once our work in progress on those decorators is finished.
Inside tests:
......@@ -757,7 +767,7 @@ To run such tests set ``RUN_SLOW=1`` env var, e.g.:
RUN_SLOW=1 pytest tests
It's important that the decorator ``@slow`` appears last in the stack of decorators, as some decorators like ``parametrized`` may interfere with its normal functioning. Here is an example of the correct usage:
Some decorators like ``@parametrized`` rewrite test names, therefore ``@slow`` and the rest of the skip decorators ``@require_*`` have to be listed last for them to work correctly. Here is an example of the correct usage:
.. code-block:: python
......
......@@ -62,8 +62,9 @@ def slow(test_case):
"""
if not _run_slow_tests:
test_case = unittest.skip("test is slow")(test_case)
return test_case
return unittest.skip("test is slow")(test_case)
else:
return test_case
def custom_tokenizers(test_case):
......@@ -75,8 +76,9 @@ def custom_tokenizers(test_case):
to a truthy value to run them.
"""
if not _run_custom_tokenizers:
test_case = unittest.skip("test of custom tokenizers")(test_case)
return test_case
return unittest.skip("test of custom tokenizers")(test_case)
else:
return test_case
def require_torch(test_case):
......@@ -87,8 +89,9 @@ def require_torch(test_case):
"""
if not _torch_available:
test_case = unittest.skip("test requires PyTorch")(test_case)
return test_case
return unittest.skip("test requires PyTorch")(test_case)
else:
return test_case
def require_tf(test_case):
......@@ -99,8 +102,9 @@ def require_tf(test_case):
"""
if not _tf_available:
test_case = unittest.skip("test requires TensorFlow")(test_case)
return test_case
return unittest.skip("test requires TensorFlow")(test_case)
else:
return test_case
def require_multigpu(test_case):
......@@ -119,7 +123,8 @@ def require_multigpu(test_case):
if torch.cuda.device_count() < 2:
return unittest.skip("test requires multiple GPUs")(test_case)
return test_case
else:
return test_case
def require_non_multigpu(test_case):
......@@ -133,7 +138,8 @@ def require_non_multigpu(test_case):
if torch.cuda.device_count() > 1:
return unittest.skip("test requires 0 or 1 GPU")(test_case)
return test_case
else:
return test_case
def require_torch_tpu(test_case):
......@@ -142,8 +148,8 @@ def require_torch_tpu(test_case):
"""
if not _torch_tpu_available:
return unittest.skip("test requires PyTorch TPU")
return test_case
else:
return test_case
if _torch_available:
......@@ -154,9 +160,9 @@ else:
def require_torch_and_cuda(test_case):
"""Decorator marking a test that requires CUDA and PyTorch). """
"""Decorator marking a test that requires CUDA and PyTorch. """
if torch_device != "cuda":
return unittest.skip("test requires CUDA")
return unittest.skip("test requires CUDA")(test_case)
else:
return test_case
......@@ -165,15 +171,17 @@ def require_datasets(test_case):
"""Decorator marking a test that requires datasets."""
if not _datasets_available:
test_case = unittest.skip("test requires Datasets")(test_case)
return test_case
return unittest.skip("test requires `datasets`")(test_case)
else:
return test_case
def require_faiss(test_case):
"""Decorator marking a test that requires faiss."""
if not _faiss_available:
test_case = unittest.skip("test requires Faiss")(test_case)
return test_case
return unittest.skip("test requires `faiss`")(test_case)
else:
return test_case
def get_tests_dir():
......
# coding=utf-8
# Copyright 2019-present, the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
# this test validates that we can stack skip decorators in groups and whether
# they work correctly with other decorators
#
# since the decorators have already built their decision params (like checking
# env[], we can't mock the env and test each of the combinations), so ideally
# the following 4 should be run. But since we have different CI jobs running
# different configs, all combinations should get covered
#
# USE_CUDA=1 RUN_SLOW=1 pytest -rA tests/test_skip_decorators.py
# USE_CUDA=0 RUN_SLOW=1 pytest -rA tests/test_skip_decorators.py
# USE_CUDA=0 RUN_SLOW=0 pytest -rA tests/test_skip_decorators.py
# USE_CUDA=1 RUN_SLOW=0 pytest -rA tests/test_skip_decorators.py
import os
import unittest
import pytest
from parameterized import parameterized
from transformers.testing_utils import require_torch, require_torch_and_cuda, slow, torch_device
# skipping in unittest tests
params = [(1,)]
# test that we can stack our skip decorators with 3rd party decorators
def check_slow():
run_slow = bool(os.getenv("RUN_SLOW", 0))
if run_slow:
assert True
else:
assert False, "should have been skipped"
# test that we can stack our skip decorators
def check_slow_torch_cuda():
run_slow = bool(os.getenv("RUN_SLOW", 0))
if run_slow and torch_device == "cuda":
assert True
else:
assert False, "should have been skipped"
@require_torch
class SkipTester(unittest.TestCase):
@slow
@require_torch_and_cuda
def test_2_skips_slow_first(self):
check_slow_torch_cuda()
@require_torch_and_cuda
@slow
def test_2_skips_slow_last(self):
check_slow_torch_cuda()
# The combination of any skip decorator, followed by parameterized fails to skip the tests
# 1. @slow manages to correctly skip `test_param_slow_first`
# 2. but then `parameterized` creates new tests, with a unique name for each parameter groups.
# It has no idea that they are to be skipped and so they all run, ignoring @slow
# Therefore skip decorators must come after `parameterized`
#
# @slow
# @parameterized.expand(params)
# def test_param_slow_first(self, param=None):
# check_slow()
# This works as expected:
# 1. `parameterized` creates new tests with unique names
# 2. each of them gets an opportunity to be skipped
@parameterized.expand(params)
@slow
def test_param_slow_last(self, param=None):
check_slow()
# skipping in non-unittest tests
# no problem at all here
@slow
@require_torch_and_cuda
def test_pytest_2_skips_slow_first():
check_slow_torch_cuda()
@require_torch_and_cuda
@slow
def test_pytest_2_skips_slow_last():
check_slow_torch_cuda()
@slow
@pytest.mark.parametrize("param", [1])
def test_pytest_param_slow_first(param):
check_slow()
@pytest.mark.parametrize("param", [1])
@slow
def test_pytest_param_slow_last(param):
check_slow()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment