"tests/vscode:/vscode.git/clone" did not exist on "6e4bc670993e498e0709dfdce7fa54e5ec94fdba"
Unverified Commit 831f3144 authored by Younes Belkada's avatar Younes Belkada Committed by GitHub
Browse files

[`tests`] add `accelerate` marker (#21743)

* add `accelerate` marker

* add to docs

* Update docs/source/en/testing.mdx
parent c51dc4f9
...@@ -176,6 +176,15 @@ If you want to include only tests that include both patterns, `and` is to be use ...@@ -176,6 +176,15 @@ If you want to include only tests that include both patterns, `and` is to be use
```bash ```bash
pytest -k "test and ada" tests/test_optimization.py pytest -k "test and ada" tests/test_optimization.py
``` ```
### Run `accelerate` tests
Sometimes you need to run `accelerate` tests on your models. For that you can just add `-m accelerate_tests` to your command, if let's say you want to run these tests on `OPT` run:
```bash
RUN_SLOW=1 pytest -m accelerate_tests tests/models/opt/test_modeling_opt.py
```
### Run documentation tests ### Run documentation tests
In order to test whether the documentation examples are correct, you should check that the `doctests` are passing. In order to test whether the documentation examples are correct, you should check that the `doctests` are passing.
......
...@@ -32,6 +32,7 @@ from typing import Dict, List, Tuple ...@@ -32,6 +32,7 @@ from typing import Dict, List, Tuple
import numpy as np import numpy as np
from huggingface_hub import HfFolder, delete_repo, set_access_token from huggingface_hub import HfFolder, delete_repo, set_access_token
from huggingface_hub.file_download import http_get from huggingface_hub.file_download import http_get
from pytest import mark
from requests.exceptions import HTTPError from requests.exceptions import HTTPError
import transformers import transformers
...@@ -2463,6 +2464,7 @@ class ModelTesterMixin: ...@@ -2463,6 +2464,7 @@ class ModelTesterMixin:
self.assertEqual(param.device, torch.device(param_device)) self.assertEqual(param.device, torch.device(param_device))
@require_accelerate @require_accelerate
@mark.accelerate_tests
@require_torch_gpu @require_torch_gpu
def test_disk_offload(self): def test_disk_offload(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
...@@ -2498,6 +2500,7 @@ class ModelTesterMixin: ...@@ -2498,6 +2500,7 @@ class ModelTesterMixin:
self.assertTrue(torch.allclose(base_output[0], new_output[0])) self.assertTrue(torch.allclose(base_output[0], new_output[0]))
@require_accelerate @require_accelerate
@mark.accelerate_tests
@require_torch_gpu @require_torch_gpu
def test_cpu_offload(self): def test_cpu_offload(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
...@@ -2533,6 +2536,7 @@ class ModelTesterMixin: ...@@ -2533,6 +2536,7 @@ class ModelTesterMixin:
self.assertTrue(torch.allclose(base_output[0], new_output[0])) self.assertTrue(torch.allclose(base_output[0], new_output[0]))
@require_accelerate @require_accelerate
@mark.accelerate_tests
@require_torch_multi_gpu @require_torch_multi_gpu
def test_model_parallelism(self): def test_model_parallelism(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
...@@ -3172,6 +3176,7 @@ class ModelUtilsTest(TestCasePlus): ...@@ -3172,6 +3176,7 @@ class ModelUtilsTest(TestCasePlus):
self.assertIsNotNone(model) self.assertIsNotNone(model)
@require_accelerate @require_accelerate
@mark.accelerate_tests
def test_from_pretrained_low_cpu_mem_usage_functional(self): def test_from_pretrained_low_cpu_mem_usage_functional(self):
# test that we can use `from_pretrained(..., low_cpu_mem_usage=True)` with normal and # test that we can use `from_pretrained(..., low_cpu_mem_usage=True)` with normal and
# sharded models # sharded models
...@@ -3185,6 +3190,7 @@ class ModelUtilsTest(TestCasePlus): ...@@ -3185,6 +3190,7 @@ class ModelUtilsTest(TestCasePlus):
@require_usr_bin_time @require_usr_bin_time
@require_accelerate @require_accelerate
@mark.accelerate_tests
def test_from_pretrained_low_cpu_mem_usage_measured(self): def test_from_pretrained_low_cpu_mem_usage_measured(self):
# test that `from_pretrained(..., low_cpu_mem_usage=True)` uses less cpu memory than default # test that `from_pretrained(..., low_cpu_mem_usage=True)` uses less cpu memory than default
...@@ -3224,6 +3230,7 @@ class ModelUtilsTest(TestCasePlus): ...@@ -3224,6 +3230,7 @@ class ModelUtilsTest(TestCasePlus):
# cuda memory tracking and then we should be able to do a much more precise test. # cuda memory tracking and then we should be able to do a much more precise test.
@require_accelerate @require_accelerate
@mark.accelerate_tests
@require_torch_multi_gpu @require_torch_multi_gpu
@slow @slow
def test_model_parallelism_gpt2(self): def test_model_parallelism_gpt2(self):
...@@ -3241,6 +3248,7 @@ class ModelUtilsTest(TestCasePlus): ...@@ -3241,6 +3248,7 @@ class ModelUtilsTest(TestCasePlus):
self.assertEqual(text_output, "Hello, my name is John. I'm a writer, and I'm a writer. I'm") self.assertEqual(text_output, "Hello, my name is John. I'm a writer, and I'm a writer. I'm")
@require_accelerate @require_accelerate
@mark.accelerate_tests
@require_torch_gpu @require_torch_gpu
def test_from_pretrained_disk_offload_task_model(self): def test_from_pretrained_disk_offload_task_model(self):
model = AutoModel.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = AutoModel.from_pretrained("hf-internal-testing/tiny-random-gpt2")
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment