"examples/vscode:/vscode.git/clone" did not exist on "8a861048dd5da11f2a82632333601b7bd42a71b8"
Unverified Commit 4c19f3ba authored by Sylvain Gugger's avatar Sylvain Gugger Committed by GitHub
Browse files

Clean Trainer tests and datasets dep (#8268)

parent 068e6b5e
...@@ -77,7 +77,6 @@ jobs: ...@@ -77,7 +77,6 @@ jobs:
- v0.4-torch_and_tf-{{ checksum "setup.py" }} - v0.4-torch_and_tf-{{ checksum "setup.py" }}
- v0.4-{{ checksum "setup.py" }} - v0.4-{{ checksum "setup.py" }}
- run: pip install --upgrade pip - run: pip install --upgrade pip
- run: pip install git+https://github.com/huggingface/datasets
- run: pip install .[sklearn,tf-cpu,torch,testing] - run: pip install .[sklearn,tf-cpu,torch,testing]
- save_cache: - save_cache:
key: v0.4-{{ checksum "setup.py" }} key: v0.4-{{ checksum "setup.py" }}
...@@ -102,7 +101,6 @@ jobs: ...@@ -102,7 +101,6 @@ jobs:
- v0.4-torch-{{ checksum "setup.py" }} - v0.4-torch-{{ checksum "setup.py" }}
- v0.4-{{ checksum "setup.py" }} - v0.4-{{ checksum "setup.py" }}
- run: pip install --upgrade pip - run: pip install --upgrade pip
- run: pip install git+https://github.com/huggingface/datasets
- run: pip install .[sklearn,torch,testing] - run: pip install .[sklearn,torch,testing]
- save_cache: - save_cache:
key: v0.4-torch-{{ checksum "setup.py" }} key: v0.4-torch-{{ checksum "setup.py" }}
...@@ -129,7 +127,6 @@ jobs: ...@@ -129,7 +127,6 @@ jobs:
- v0.4-tf-{{ checksum "setup.py" }} - v0.4-tf-{{ checksum "setup.py" }}
- v0.4-{{ checksum "setup.py" }} - v0.4-{{ checksum "setup.py" }}
- run: pip install --upgrade pip - run: pip install --upgrade pip
- run: pip install git+https://github.com/huggingface/datasets
- run: pip install .[sklearn,tf-cpu,testing] - run: pip install .[sklearn,tf-cpu,testing]
- save_cache: - save_cache:
key: v0.4-tf-{{ checksum "setup.py" }} key: v0.4-tf-{{ checksum "setup.py" }}
...@@ -154,7 +151,6 @@ jobs: ...@@ -154,7 +151,6 @@ jobs:
- v0.4-flax-{{ checksum "setup.py" }} - v0.4-flax-{{ checksum "setup.py" }}
- v0.4-{{ checksum "setup.py" }} - v0.4-{{ checksum "setup.py" }}
- run: pip install --upgrade pip - run: pip install --upgrade pip
- run: pip install git+https://github.com/huggingface/datasets
- run: sudo pip install .[flax,sklearn,torch,testing] - run: sudo pip install .[flax,sklearn,torch,testing]
- save_cache: - save_cache:
key: v0.4-flax-{{ checksum "setup.py" }} key: v0.4-flax-{{ checksum "setup.py" }}
...@@ -179,7 +175,6 @@ jobs: ...@@ -179,7 +175,6 @@ jobs:
- v0.4-torch-{{ checksum "setup.py" }} - v0.4-torch-{{ checksum "setup.py" }}
- v0.4-{{ checksum "setup.py" }} - v0.4-{{ checksum "setup.py" }}
- run: pip install --upgrade pip - run: pip install --upgrade pip
- run: pip install git+https://github.com/huggingface/datasets
- run: pip install .[sklearn,torch,testing] - run: pip install .[sklearn,torch,testing]
- save_cache: - save_cache:
key: v0.4-torch-{{ checksum "setup.py" }} key: v0.4-torch-{{ checksum "setup.py" }}
...@@ -204,7 +199,6 @@ jobs: ...@@ -204,7 +199,6 @@ jobs:
- v0.4-tf-{{ checksum "setup.py" }} - v0.4-tf-{{ checksum "setup.py" }}
- v0.4-{{ checksum "setup.py" }} - v0.4-{{ checksum "setup.py" }}
- run: pip install --upgrade pip - run: pip install --upgrade pip
- run: pip install git+https://github.com/huggingface/datasets
- run: pip install .[sklearn,tf-cpu,testing] - run: pip install .[sklearn,tf-cpu,testing]
- save_cache: - save_cache:
key: v0.4-tf-{{ checksum "setup.py" }} key: v0.4-tf-{{ checksum "setup.py" }}
......
...@@ -18,13 +18,13 @@ import os ...@@ -18,13 +18,13 @@ import os
import tempfile import tempfile
import unittest import unittest
import datasets
import numpy as np import numpy as np
from transformers import AutoTokenizer, EvaluationStrategy, PretrainedConfig, TrainingArguments, is_torch_available from transformers import AutoTokenizer, EvaluationStrategy, PretrainedConfig, TrainingArguments, is_torch_available
from transformers.file_utils import WEIGHTS_NAME from transformers.file_utils import WEIGHTS_NAME
from transformers.testing_utils import ( from transformers.testing_utils import (
get_tests_dir, get_tests_dir,
require_datasets,
require_optuna, require_optuna,
require_sentencepiece, require_sentencepiece,
require_tokenizers, require_tokenizers,
...@@ -340,7 +340,10 @@ class TrainerIntegrationTest(unittest.TestCase): ...@@ -340,7 +340,10 @@ class TrainerIntegrationTest(unittest.TestCase):
self.assertTrue(np.array_equal(labels[0], trainer.eval_dataset.ys[0])) self.assertTrue(np.array_equal(labels[0], trainer.eval_dataset.ys[0]))
self.assertTrue(np.array_equal(labels[1], trainer.eval_dataset.ys[1])) self.assertTrue(np.array_equal(labels[1], trainer.eval_dataset.ys[1]))
@require_datasets
def test_trainer_with_datasets(self): def test_trainer_with_datasets(self):
import datasets
np.random.seed(42) np.random.seed(42)
x = np.random.normal(size=(64,)).astype(np.float32) x = np.random.normal(size=(64,)).astype(np.float32)
y = 2.0 * x + 3.0 + np.random.normal(scale=0.1, size=(64,)) y = 2.0 * x + 3.0 + np.random.normal(scale=0.1, size=(64,))
...@@ -658,7 +661,9 @@ class TrainerHyperParameterIntegrationTest(unittest.TestCase): ...@@ -658,7 +661,9 @@ class TrainerHyperParameterIntegrationTest(unittest.TestCase):
def hp_name(trial): def hp_name(trial):
return MyTrialShortNamer.shortname(trial.params) return MyTrialShortNamer.shortname(trial.params)
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer( trainer = get_regression_trainer(
output_dir=tmp_dir,
learning_rate=0.1, learning_rate=0.1,
logging_steps=1, logging_steps=1,
evaluation_strategy=EvaluationStrategy.EPOCH, evaluation_strategy=EvaluationStrategy.EPOCH,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment