Unverified Commit b3a0aad3 authored by Yih-Dar's avatar Yih-Dar Committed by GitHub
Browse files

Fix past CI (#20967)



* Fix for Past CI

* make style

* clean up

* unindent 2 blocks
Co-authored-by: default avatarydshieh <ydshieh@users.noreply.github.com>
parent 41b0564b
......@@ -23,6 +23,7 @@ from ...utils import (
_LazyModule,
is_flax_available,
is_keras_nlp_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
......
......@@ -596,11 +596,11 @@ def is_spacy_available():
def is_tensorflow_text_available():
return importlib.util.find_spec("tensorflow_text") is not None
return is_tf_available() and importlib.util.find_spec("tensorflow_text") is not None
def is_keras_nlp_available():
return importlib.util.find_spec("keras_nlp") is not None
return is_tensorflow_text_available() and importlib.util.find_spec("keras_nlp") is not None
def is_in_notebook():
......
......@@ -4,15 +4,15 @@ from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, slow
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
TOKENIZER_CHECKPOINTS = ["bert-base-uncased", "bert-base-cased"]
TINY_MODEL_CHECKPOINT = "hf-internal-testing/tiny-bert-tf-only"
......@@ -32,6 +32,7 @@ if is_tf_available():
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class BertTokenizationTest(unittest.TestCase):
# The TF tokenizers are usually going to be used as pretrained tokenizers from existing model checkpoints,
......
......@@ -4,15 +4,15 @@ from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPT2LMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer
from transformers.testing_utils import require_keras_nlp, slow
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_keras_nlp_available():
from transformers.models.gpt2 import TFGPT2Tokenizer
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpt2 import TFGPT2Tokenizer
TOKENIZER_CHECKPOINTS = ["gpt2"]
TINY_MODEL_CHECKPOINT = "gpt2"
......@@ -40,6 +40,7 @@ if is_tf_available():
return outputs
@require_tf
@require_keras_nlp
class GPTTokenizationTest(unittest.TestCase):
# The TF tokenizers are usually going to be used as pretrained tokenizers from existing model checkpoints,
......
......@@ -38,7 +38,14 @@ from transformers.models.layoutlmv2.tokenization_layoutlmv2 import (
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import is_pt_tf_cross_test, require_pandas, require_tokenizers, require_torch, slow
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_detectron2,
require_pandas,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import (
SMALL_TRAINING_CORPUS,
......@@ -1264,6 +1271,7 @@ class LayoutLMv2TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens)
@require_torch
@require_detectron2
@slow
def test_torch_encode_plus_sent_to_model(self):
import torch
......
......@@ -22,7 +22,15 @@ from transformers import (
is_vision_available,
pipeline,
)
from transformers.testing_utils import nested_simplify, require_tf, require_timm, require_torch, require_vision, slow
from transformers.testing_utils import (
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY, PipelineTestCaseMeta
......@@ -245,6 +253,7 @@ class ObjectDetectionPipelineTests(unittest.TestCase, metaclass=PipelineTestCase
)
@require_torch
@require_pytesseract
@slow
def test_layoutlm(self):
model_id = "Narsil/layoutlmv3-finetuned-funsd"
......
......@@ -50,6 +50,7 @@ from transformers.testing_utils import (
get_gpu_count,
get_tests_dir,
is_staging_test,
require_accelerate,
require_intel_extension_for_pytorch,
require_optuna,
require_ray,
......@@ -1285,6 +1286,7 @@ class TrainerIntegrationTest(TestCasePlus, TrainerIntegrationCommon):
self.assertAlmostEqual(b, b1, delta=1e-5)
@slow
@require_accelerate
@require_torch_non_multi_gpu
def test_auto_batch_size_finder(self):
......
......@@ -4,6 +4,17 @@ import os
past_versions_testing = {
"pytorch": {
"1.12": {
"torch": "1.12.1",
"torchvision": "0.13.1",
"torchaudio": "0.12.1",
"python": 3.9,
"cuda": "cu113",
"install": (
"python3 -m pip install --no-cache-dir -U torch==1.12.1 torchvision==0.13.1 torchaudio==0.12.1"
" --extra-index-url https://download.pytorch.org/whl/cu113"
),
},
"1.11": {
"torch": "1.11.0",
"torchvision": "0.12.0",
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment