"...git@developer.sourcefind.cn:chenpangpang/transformers.git" did not exist on "927904bc911225c1ea6087c5e24aa77d265bfb9e"
Unverified Commit 4eb61f8e authored by Stas Bekman's avatar Stas Bekman Committed by GitHub
Browse files

remove USE_CUDA (#7861)

parent ea1507fb
...@@ -59,7 +59,6 @@ jobs: ...@@ -59,7 +59,6 @@ jobs:
TF_FORCE_GPU_ALLOW_GROWTH: "true" TF_FORCE_GPU_ALLOW_GROWTH: "true"
# TF_GPU_MEMORY_LIMIT: 4096 # TF_GPU_MEMORY_LIMIT: 4096
OMP_NUM_THREADS: 1 OMP_NUM_THREADS: 1
USE_CUDA: yes
run: | run: |
source .env/bin/activate source .env/bin/activate
python -m pytest -n 2 --dist=loadfile -s ./tests/ python -m pytest -n 2 --dist=loadfile -s ./tests/
...@@ -110,7 +109,6 @@ jobs: ...@@ -110,7 +109,6 @@ jobs:
TF_FORCE_GPU_ALLOW_GROWTH: "true" TF_FORCE_GPU_ALLOW_GROWTH: "true"
# TF_GPU_MEMORY_LIMIT: 4096 # TF_GPU_MEMORY_LIMIT: 4096
OMP_NUM_THREADS: 1 OMP_NUM_THREADS: 1
USE_CUDA: yes
run: | run: |
source .env/bin/activate source .env/bin/activate
python -m pytest -n 2 --dist=loadfile -s ./tests/ python -m pytest -n 2 --dist=loadfile -s ./tests/
...@@ -57,7 +57,6 @@ jobs: ...@@ -57,7 +57,6 @@ jobs:
TF_FORCE_GPU_ALLOW_GROWTH: "true" TF_FORCE_GPU_ALLOW_GROWTH: "true"
OMP_NUM_THREADS: 1 OMP_NUM_THREADS: 1
RUN_SLOW: yes RUN_SLOW: yes
USE_CUDA: yes
run: | run: |
source .env/bin/activate source .env/bin/activate
python -m pytest -n 1 --dist=loadfile -s ./tests/ python -m pytest -n 1 --dist=loadfile -s ./tests/
...@@ -67,7 +66,6 @@ jobs: ...@@ -67,7 +66,6 @@ jobs:
TF_FORCE_GPU_ALLOW_GROWTH: "true" TF_FORCE_GPU_ALLOW_GROWTH: "true"
OMP_NUM_THREADS: 1 OMP_NUM_THREADS: 1
RUN_SLOW: yes RUN_SLOW: yes
USE_CUDA: yes
run: | run: |
source .env/bin/activate source .env/bin/activate
pip install -r examples/requirements.txt pip install -r examples/requirements.txt
...@@ -120,7 +118,6 @@ jobs: ...@@ -120,7 +118,6 @@ jobs:
TF_FORCE_GPU_ALLOW_GROWTH: "true" TF_FORCE_GPU_ALLOW_GROWTH: "true"
OMP_NUM_THREADS: 1 OMP_NUM_THREADS: 1
RUN_SLOW: yes RUN_SLOW: yes
USE_CUDA: yes
run: | run: |
source .env/bin/activate source .env/bin/activate
python -m pytest -n 1 --dist=loadfile -s ./tests/ python -m pytest -n 1 --dist=loadfile -s ./tests/
...@@ -130,7 +127,6 @@ jobs: ...@@ -130,7 +127,6 @@ jobs:
TF_FORCE_GPU_ALLOW_GROWTH: "true" TF_FORCE_GPU_ALLOW_GROWTH: "true"
OMP_NUM_THREADS: 1 OMP_NUM_THREADS: 1
RUN_SLOW: yes RUN_SLOW: yes
USE_CUDA: yes
run: | run: |
source .env/bin/activate source .env/bin/activate
pip install -r examples/requirements.txt pip install -r examples/requirements.txt
......
...@@ -22,12 +22,12 @@ How transformers are tested ...@@ -22,12 +22,12 @@ How transformers are tested
* `self-hosted (push) <https://github.com/huggingface/transformers/blob/master/.github/workflows/self-push.yml>`__: runs fast tests on GPU only on commits on ``master``. It only runs if a commit on ``master`` has updated the code in one of the following folders: ``src``, ``tests``, ``.github`` (to prevent running on added model cards, notebooks, etc.) * `self-hosted (push) <https://github.com/huggingface/transformers/blob/master/.github/workflows/self-push.yml>`__: runs fast tests on GPU only on commits on ``master``. It only runs if a commit on ``master`` has updated the code in one of the following folders: ``src``, ``tests``, ``.github`` (to prevent running on added model cards, notebooks, etc.)
* `self-hosted runner <https://github.com/huggingface/transformers/blob/master/.github/workflows/self-scheduled.yml>`__: runs slow tests on ``tests`` and ``examples``: * `self-hosted runner <https://github.com/huggingface/transformers/blob/master/.github/workflows/self-scheduled.yml>`__: runs normal and slow tests on GPU in ``tests`` and ``examples``:
.. code-block:: bash .. code-block:: bash
RUN_SLOW=1 USE_CUDA=1 pytest tests/ RUN_SLOW=1 pytest tests/
RUN_SLOW=1 USE_CUDA=1 pytest examples/ RUN_SLOW=1 pytest examples/
The results can be observed `here <https://github.com/huggingface/transformers/actions>`__. The results can be observed `here <https://github.com/huggingface/transformers/actions>`__.
...@@ -393,7 +393,7 @@ On a GPU-enabled setup, to test in CPU-only mode add ``CUDA_VISIBLE_DEVICES=""`` ...@@ -393,7 +393,7 @@ On a GPU-enabled setup, to test in CPU-only mode add ``CUDA_VISIBLE_DEVICES=""``
CUDA_VISIBLE_DEVICES="" pytest tests/test_logging.py CUDA_VISIBLE_DEVICES="" pytest tests/test_logging.py
or if you have multiple gpus, you can tell which one to use in this test session, e.g. to use only the second gpu if you have gpus ``0`` and ``1``, you can run: or if you have multiple gpus, you can specify which one is to be used by ``pytest``. For example, to use only the second gpu if you have gpus ``0`` and ``1``, you can run:
.. code-block:: bash .. code-block:: bash
......
...@@ -2,5 +2,5 @@ ...@@ -2,5 +2,5 @@
# these scripts need to be run before any changes to FSMT-related code - it should cover all bases # these scripts need to be run before any changes to FSMT-related code - it should cover all bases
USE_CUDA=0 RUN_SLOW=1 pytest --disable-warnings tests/test_tokenization_fsmt.py tests/test_configuration_auto.py tests/test_modeling_fsmt.py examples/seq2seq/test_fsmt_bleu_score.py CUDA_VISIBLE_DEVICES="" RUN_SLOW=1 pytest --disable-warnings tests/test_tokenization_fsmt.py tests/test_configuration_auto.py tests/test_modeling_fsmt.py examples/seq2seq/test_fsmt_bleu_score.py
USE_CUDA=1 RUN_SLOW=1 pytest --disable-warnings tests/test_tokenization_fsmt.py tests/test_configuration_auto.py tests/test_modeling_fsmt.py examples/seq2seq/test_fsmt_bleu_score.py RUN_SLOW=1 pytest --disable-warnings tests/test_tokenization_fsmt.py tests/test_configuration_auto.py tests/test_modeling_fsmt.py examples/seq2seq/test_fsmt_bleu_score.py
...@@ -187,8 +187,10 @@ def require_torch_tpu(test_case): ...@@ -187,8 +187,10 @@ def require_torch_tpu(test_case):
if _torch_available: if _torch_available:
# Set the USE_CUDA environment variable to select a GPU. # Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
torch_device = "cuda" if parse_flag_from_env("USE_CUDA") else "cpu" import torch
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
else: else:
torch_device = None torch_device = None
...@@ -485,9 +487,9 @@ class TestCasePlus(unittest.TestCase): ...@@ -485,9 +487,9 @@ class TestCasePlus(unittest.TestCase):
def mockenv(**kwargs): def mockenv(**kwargs):
"""this is a convenience wrapper, that allows this: """this is a convenience wrapper, that allows this:
@mockenv(USE_CUDA=True, USE_TF=False) @mockenv(RUN_SLOW=True, USE_TF=False)
def test_something(): def test_something():
use_cuda = os.getenv("USE_CUDA", False) run_slow = os.getenv("RUN_SLOW", False)
use_tf = os.getenv("USE_TF", False) use_tf = os.getenv("USE_TF", False)
""" """
return unittest.mock.patch.dict(os.environ, kwargs) return unittest.mock.patch.dict(os.environ, kwargs)
...@@ -23,10 +23,10 @@ ...@@ -23,10 +23,10 @@
# the following 4 should be run. But since we have different CI jobs running # the following 4 should be run. But since we have different CI jobs running
# different configs, all combinations should get covered # different configs, all combinations should get covered
# #
# USE_CUDA=1 RUN_SLOW=1 pytest -rA tests/test_skip_decorators.py # RUN_SLOW=1 pytest -rA tests/test_skip_decorators.py
# USE_CUDA=0 RUN_SLOW=1 pytest -rA tests/test_skip_decorators.py # RUN_SLOW=1 CUDA_VISIBLE_DEVICES="" pytest -rA tests/test_skip_decorators.py
# USE_CUDA=0 RUN_SLOW=0 pytest -rA tests/test_skip_decorators.py # RUN_SLOW=0 pytest -rA tests/test_skip_decorators.py
# USE_CUDA=1 RUN_SLOW=0 pytest -rA tests/test_skip_decorators.py # RUN_SLOW=0 CUDA_VISIBLE_DEVICES="" pytest -rA tests/test_skip_decorators.py
import os import os
import unittest import unittest
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment