"test/integration_tests/tacotron2_pipeline_test.py" did not exist on "217fb684e23b4ec0aae581ca8feccdea4e52a039"
Unverified Commit e6b811f0 authored by Stas Bekman's avatar Stas Bekman Committed by GitHub
Browse files

[testing] replace hardcoded paths to allow running tests from anywhere (#6523)

* [testing] replace hardcoded paths to allow running tests from anywhere

* fix the merge conflict
parent 9d1b4db2
import inspect
import os
import re
import shutil
......@@ -144,6 +145,15 @@ def require_torch_and_cuda(test_case):
return test_case
def get_tests_dir():
"""
returns the full path to the `tests` dir, so that the tests can be invoked from anywhere
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
return os.path.abspath(os.path.dirname(caller__file__))
#
# Helper functions for dealing with testing text outputs
# The original code came from:
......
......@@ -15,7 +15,7 @@ from transformers import (
TransfoXLTokenizer,
is_torch_available,
)
from transformers.testing_utils import require_torch
from transformers.testing_utils import get_tests_dir, require_torch
from transformers.tokenization_distilbert import DistilBertTokenizerFast
from transformers.tokenization_openai import OpenAIGPTTokenizerFast
from transformers.tokenization_roberta import RobertaTokenizerFast
......@@ -42,7 +42,7 @@ class CommonFastTokenizerTest(unittest.TestCase):
TOKENIZERS_CLASSES = frozenset([])
def setUp(self) -> None:
with open("tests/fixtures/sample_text.txt", encoding="utf-8") as f_data:
with open(f"{get_tests_dir()}/fixtures/sample_text.txt", encoding="utf-8") as f_data:
self._data = f_data.read().replace("\n\n", "\n").strip()
def test_all_tokenizers(self):
......
......@@ -4,7 +4,7 @@ import nlp
import numpy as np
from transformers import AutoTokenizer, TrainingArguments, is_torch_available
from transformers.testing_utils import require_torch
from transformers.testing_utils import get_tests_dir, require_torch
if is_torch_available():
......@@ -20,7 +20,7 @@ if is_torch_available():
)
PATH_SAMPLE_TEXT = "./tests/fixtures/sample_text.txt"
PATH_SAMPLE_TEXT = f"{get_tests_dir()}/fixtures/sample_text.txt"
class RegressionDataset:
......@@ -262,7 +262,7 @@ class TrainerIntegrationTest(unittest.TestCase):
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
model = AutoModelForSequenceClassification.from_pretrained(MODEL_ID)
data_args = GlueDataTrainingArguments(
task_name="mrpc", data_dir="./tests/fixtures/tests_samples/MRPC", overwrite_cache=True
task_name="mrpc", data_dir=f"{get_tests_dir()}/fixtures/tests_samples/MRPC", overwrite_cache=True
)
eval_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="dev")
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment