Unverified Commit e6b811f0 authored by Stas Bekman's avatar Stas Bekman Committed by GitHub
Browse files

[testing] replace hardcoded paths to allow running tests from anywhere (#6523)

* [testing] replace hardcoded paths to allow running tests from anywhere

* fix the merge conflict
parent 9d1b4db2
import inspect
import os import os
import re import re
import shutil import shutil
...@@ -144,6 +145,15 @@ def require_torch_and_cuda(test_case): ...@@ -144,6 +145,15 @@ def require_torch_and_cuda(test_case):
return test_case return test_case
def get_tests_dir():
"""
returns the full path to the `tests` dir, so that the tests can be invoked from anywhere
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
return os.path.abspath(os.path.dirname(caller__file__))
# #
# Helper functions for dealing with testing text outputs # Helper functions for dealing with testing text outputs
# The original code came from: # The original code came from:
......
...@@ -15,7 +15,7 @@ from transformers import ( ...@@ -15,7 +15,7 @@ from transformers import (
TransfoXLTokenizer, TransfoXLTokenizer,
is_torch_available, is_torch_available,
) )
from transformers.testing_utils import require_torch from transformers.testing_utils import get_tests_dir, require_torch
from transformers.tokenization_distilbert import DistilBertTokenizerFast from transformers.tokenization_distilbert import DistilBertTokenizerFast
from transformers.tokenization_openai import OpenAIGPTTokenizerFast from transformers.tokenization_openai import OpenAIGPTTokenizerFast
from transformers.tokenization_roberta import RobertaTokenizerFast from transformers.tokenization_roberta import RobertaTokenizerFast
...@@ -42,7 +42,7 @@ class CommonFastTokenizerTest(unittest.TestCase): ...@@ -42,7 +42,7 @@ class CommonFastTokenizerTest(unittest.TestCase):
TOKENIZERS_CLASSES = frozenset([]) TOKENIZERS_CLASSES = frozenset([])
def setUp(self) -> None: def setUp(self) -> None:
with open("tests/fixtures/sample_text.txt", encoding="utf-8") as f_data: with open(f"{get_tests_dir()}/fixtures/sample_text.txt", encoding="utf-8") as f_data:
self._data = f_data.read().replace("\n\n", "\n").strip() self._data = f_data.read().replace("\n\n", "\n").strip()
def test_all_tokenizers(self): def test_all_tokenizers(self):
......
...@@ -4,7 +4,7 @@ import nlp ...@@ -4,7 +4,7 @@ import nlp
import numpy as np import numpy as np
from transformers import AutoTokenizer, TrainingArguments, is_torch_available from transformers import AutoTokenizer, TrainingArguments, is_torch_available
from transformers.testing_utils import require_torch from transformers.testing_utils import get_tests_dir, require_torch
if is_torch_available(): if is_torch_available():
...@@ -20,7 +20,7 @@ if is_torch_available(): ...@@ -20,7 +20,7 @@ if is_torch_available():
) )
PATH_SAMPLE_TEXT = "./tests/fixtures/sample_text.txt" PATH_SAMPLE_TEXT = f"{get_tests_dir()}/fixtures/sample_text.txt"
class RegressionDataset: class RegressionDataset:
...@@ -262,7 +262,7 @@ class TrainerIntegrationTest(unittest.TestCase): ...@@ -262,7 +262,7 @@ class TrainerIntegrationTest(unittest.TestCase):
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
model = AutoModelForSequenceClassification.from_pretrained(MODEL_ID) model = AutoModelForSequenceClassification.from_pretrained(MODEL_ID)
data_args = GlueDataTrainingArguments( data_args = GlueDataTrainingArguments(
task_name="mrpc", data_dir="./tests/fixtures/tests_samples/MRPC", overwrite_cache=True task_name="mrpc", data_dir=f"{get_tests_dir()}/fixtures/tests_samples/MRPC", overwrite_cache=True
) )
eval_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="dev") eval_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="dev")
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment