Unverified Commit 59cd9de3 authored by Yih-Dar's avatar Yih-Dar Committed by GitHub
Browse files

Byebye torch 1.10 (#28207)



* fix

* fix

---------
Co-authored-by: default avatarydshieh <ydshieh@users.noreply.github.com>
parent e768616a
...@@ -1439,15 +1439,6 @@ class TrainingArguments: ...@@ -1439,15 +1439,6 @@ class TrainingArguments:
raise ValueError( raise ValueError(
"Your setup doesn't support bf16/gpu. You need torch>=1.10, using Ampere GPU with cuda>=11.0" "Your setup doesn't support bf16/gpu. You need torch>=1.10, using Ampere GPU with cuda>=11.0"
) )
elif is_torch_npu_available():
# npu
from .pytorch_utils import is_torch_greater_or_equal_than_1_11
if not is_torch_greater_or_equal_than_1_11:
raise ValueError(
"Your setup doesn't support bf16/npu. You need torch>=1.11, using Ascend NPU with "
"`torch_npu` installed"
)
elif not is_torch_xpu_available(): elif not is_torch_xpu_available():
# xpu # xpu
from .pytorch_utils import is_torch_greater_or_equal_than_1_12 from .pytorch_utils import is_torch_greater_or_equal_than_1_12
......
...@@ -64,6 +64,7 @@ USE_JAX = os.environ.get("USE_FLAX", "AUTO").upper() ...@@ -64,6 +64,7 @@ USE_JAX = os.environ.get("USE_FLAX", "AUTO").upper()
FORCE_TF_AVAILABLE = os.environ.get("FORCE_TF_AVAILABLE", "AUTO").upper() FORCE_TF_AVAILABLE = os.environ.get("FORCE_TF_AVAILABLE", "AUTO").upper()
# `transformers` requires `torch>=1.11` but this variable is exposed publicly, and we can't simply remove it.
# This is the version of torch required to run torch.fx features and torch.onnx with dictionary inputs. # This is the version of torch required to run torch.fx features and torch.onnx with dictionary inputs.
TORCH_FX_REQUIRED_VERSION = version.parse("1.10") TORCH_FX_REQUIRED_VERSION = version.parse("1.10")
......
...@@ -40,7 +40,6 @@ if is_torch_available(): ...@@ -40,7 +40,6 @@ if is_torch_available():
LongT5Model, LongT5Model,
) )
from transformers.models.longt5.modeling_longt5 import LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.longt5.modeling_longt5 import LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.pytorch_utils import is_torch_less_than_1_11
class LongT5ModelTester: class LongT5ModelTester:
...@@ -595,10 +594,6 @@ class LongT5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMix ...@@ -595,10 +594,6 @@ class LongT5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMix
model = LongT5Model.from_pretrained(model_name) model = LongT5Model.from_pretrained(model_name)
self.assertIsNotNone(model) self.assertIsNotNone(model)
@unittest.skipIf(
not is_torch_available() or is_torch_less_than_1_11,
"Test failed with torch < 1.11 with an exception in a C++ file.",
)
@slow @slow
def test_export_to_onnx(self): def test_export_to_onnx(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
......
...@@ -28,10 +28,6 @@ from ...test_image_processing_common import ImageProcessingTestMixin, prepare_im ...@@ -28,10 +28,6 @@ from ...test_image_processing_common import ImageProcessingTestMixin, prepare_im
if is_torch_available(): if is_torch_available():
import torch import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
is_torch_greater_or_equal_than_1_11 = False
if is_vision_available(): if is_vision_available():
from PIL import Image from PIL import Image
...@@ -85,10 +81,6 @@ class Pix2StructImageProcessingTester(unittest.TestCase): ...@@ -85,10 +81,6 @@ class Pix2StructImageProcessingTester(unittest.TestCase):
) )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11,
reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`.",
)
@require_torch @require_torch
@require_vision @require_vision
class Pix2StructImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): class Pix2StructImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
...@@ -290,10 +282,6 @@ class Pix2StructImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase) ...@@ -290,10 +282,6 @@ class Pix2StructImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase)
) )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11,
reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`.",
)
@require_torch @require_torch
@require_vision @require_vision
class Pix2StructImageProcessingTestFourChannels(ImageProcessingTestMixin, unittest.TestCase): class Pix2StructImageProcessingTestFourChannels(ImageProcessingTestMixin, unittest.TestCase):
......
...@@ -49,9 +49,6 @@ if is_torch_available(): ...@@ -49,9 +49,6 @@ if is_torch_available():
Pix2StructVisionModel, Pix2StructVisionModel,
) )
from transformers.models.pix2struct.modeling_pix2struct import PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.pix2struct.modeling_pix2struct import PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
is_torch_greater_or_equal_than_1_11 = False
if is_vision_available(): if is_vision_available():
...@@ -746,10 +743,6 @@ def prepare_img(): ...@@ -746,10 +743,6 @@ def prepare_img():
return im return im
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11,
reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`.",
)
@require_vision @require_vision
@require_torch @require_torch
@slow @slow
......
...@@ -19,14 +19,9 @@ import numpy as np ...@@ -19,14 +19,9 @@ import numpy as np
import pytest import pytest
from transformers.testing_utils import require_torch, require_vision from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available from transformers.utils import is_vision_available
if is_torch_available():
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
is_torch_greater_or_equal_than_1_11 = False
if is_vision_available(): if is_vision_available():
from PIL import Image from PIL import Image
...@@ -39,10 +34,6 @@ if is_vision_available(): ...@@ -39,10 +34,6 @@ if is_vision_available():
) )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11,
reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`.",
)
@require_vision @require_vision
@require_torch @require_torch
class Pix2StructProcessorTest(unittest.TestCase): class Pix2StructProcessorTest(unittest.TestCase):
......
...@@ -45,10 +45,6 @@ if is_torch_available(): ...@@ -45,10 +45,6 @@ if is_torch_available():
from transformers import Pop2PianoForConditionalGeneration from transformers import Pop2PianoForConditionalGeneration
from transformers.models.pop2piano.modeling_pop2piano import POP2PIANO_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.pop2piano.modeling_pop2piano import POP2PIANO_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.pytorch_utils import is_torch_1_8_0
else:
is_torch_1_8_0 = False
@require_torch @require_torch
...@@ -616,10 +612,6 @@ class Pop2PianoModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTester ...@@ -616,10 +612,6 @@ class Pop2PianoModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTester
self.assertIsNotNone(model) self.assertIsNotNone(model)
@require_onnx @require_onnx
@unittest.skipIf(
is_torch_1_8_0,
reason="Test has a segmentation fault on torch 1.8.0",
)
def test_export_to_onnx(self): def test_export_to_onnx(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
model = Pop2PianoForConditionalGeneration(config_and_inputs[0]).to(torch_device) model = Pop2PianoForConditionalGeneration(config_and_inputs[0]).to(torch_device)
......
...@@ -906,7 +906,6 @@ class UniSpeechSatModelIntegrationTest(unittest.TestCase): ...@@ -906,7 +906,6 @@ class UniSpeechSatModelIntegrationTest(unittest.TestCase):
) )
self.assertEqual(labels[0, :, 0].sum(), 270) self.assertEqual(labels[0, :, 0].sum(), 270)
self.assertEqual(labels[0, :, 1].sum(), 647) self.assertEqual(labels[0, :, 1].sum(), 647)
# TODO: update the tolerance after the CI moves to torch 1.10
self.assertTrue(torch.allclose(outputs.logits[:, :4], expected_logits, atol=1e-2)) self.assertTrue(torch.allclose(outputs.logits[:, :4], expected_logits, atol=1e-2))
def test_inference_speaker_verification(self): def test_inference_speaker_verification(self):
...@@ -931,5 +930,4 @@ class UniSpeechSatModelIntegrationTest(unittest.TestCase): ...@@ -931,5 +930,4 @@ class UniSpeechSatModelIntegrationTest(unittest.TestCase):
# id10002 vs id10004 # id10002 vs id10004
self.assertAlmostEqual(cosine_sim(embeddings[2], embeddings[3]).item(), 0.5616, 3) self.assertAlmostEqual(cosine_sim(embeddings[2], embeddings[3]).item(), 0.5616, 3)
# TODO: update the tolerance after the CI moves to torch 1.10
self.assertAlmostEqual(outputs.loss.item(), 18.5925, 2) self.assertAlmostEqual(outputs.loss.item(), 18.5925, 2)
...@@ -1928,7 +1928,6 @@ class Wav2Vec2ModelIntegrationTest(unittest.TestCase): ...@@ -1928,7 +1928,6 @@ class Wav2Vec2ModelIntegrationTest(unittest.TestCase):
) )
self.assertEqual(labels[0, :, 0].sum(), 555) self.assertEqual(labels[0, :, 0].sum(), 555)
self.assertEqual(labels[0, :, 1].sum(), 299) self.assertEqual(labels[0, :, 1].sum(), 299)
# TODO: update the tolerance after the CI moves to torch 1.10
self.assertTrue(torch.allclose(outputs.logits[:, :4], expected_logits, atol=1e-2)) self.assertTrue(torch.allclose(outputs.logits[:, :4], expected_logits, atol=1e-2))
def test_inference_speaker_verification(self): def test_inference_speaker_verification(self):
...@@ -1953,7 +1952,6 @@ class Wav2Vec2ModelIntegrationTest(unittest.TestCase): ...@@ -1953,7 +1952,6 @@ class Wav2Vec2ModelIntegrationTest(unittest.TestCase):
# id10002 vs id10004 # id10002 vs id10004
self.assertAlmostEqual(cosine_sim(embeddings[2], embeddings[3]).numpy(), 0.7594, 3) self.assertAlmostEqual(cosine_sim(embeddings[2], embeddings[3]).numpy(), 0.7594, 3)
# TODO: update the tolerance after the CI moves to torch 1.10
self.assertAlmostEqual(outputs.loss.item(), 17.7963, 2) self.assertAlmostEqual(outputs.loss.item(), 17.7963, 2)
@require_torchaudio @require_torchaudio
......
...@@ -515,7 +515,6 @@ class WavLMModelIntegrationTest(unittest.TestCase): ...@@ -515,7 +515,6 @@ class WavLMModelIntegrationTest(unittest.TestCase):
EXPECTED_HIDDEN_STATES_SLICE = torch.tensor( EXPECTED_HIDDEN_STATES_SLICE = torch.tensor(
[[[0.0577, 0.1161], [0.0579, 0.1165]], [[0.0199, 0.1237], [0.0059, 0.0605]]] [[[0.0577, 0.1161], [0.0579, 0.1165]], [[0.0199, 0.1237], [0.0059, 0.0605]]]
) )
# TODO: update the tolerance after the CI moves to torch 1.10
self.assertTrue(torch.allclose(hidden_states_slice, EXPECTED_HIDDEN_STATES_SLICE, atol=5e-2)) self.assertTrue(torch.allclose(hidden_states_slice, EXPECTED_HIDDEN_STATES_SLICE, atol=5e-2))
def test_inference_large(self): def test_inference_large(self):
...@@ -567,7 +566,6 @@ class WavLMModelIntegrationTest(unittest.TestCase): ...@@ -567,7 +566,6 @@ class WavLMModelIntegrationTest(unittest.TestCase):
) )
self.assertEqual(labels[0, :, 0].sum(), 258) self.assertEqual(labels[0, :, 0].sum(), 258)
self.assertEqual(labels[0, :, 1].sum(), 647) self.assertEqual(labels[0, :, 1].sum(), 647)
# TODO: update the tolerance after the CI moves to torch 1.10
self.assertTrue(torch.allclose(outputs.logits[:, :4], expected_logits, atol=1e-2)) self.assertTrue(torch.allclose(outputs.logits[:, :4], expected_logits, atol=1e-2))
def test_inference_speaker_verification(self): def test_inference_speaker_verification(self):
...@@ -592,5 +590,4 @@ class WavLMModelIntegrationTest(unittest.TestCase): ...@@ -592,5 +590,4 @@ class WavLMModelIntegrationTest(unittest.TestCase):
# id10002 vs id10004 # id10002 vs id10004
self.assertAlmostEqual(cosine_sim(embeddings[2], embeddings[3]).item(), 0.4780, 3) self.assertAlmostEqual(cosine_sim(embeddings[2], embeddings[3]).item(), 0.4780, 3)
# TODO: update the tolerance after the CI moves to torch 1.10
self.assertAlmostEqual(outputs.loss.item(), 18.4154, 2) self.assertAlmostEqual(outputs.loss.item(), 18.4154, 2)
...@@ -20,7 +20,6 @@ from transformers import MODEL_FOR_VISION_2_SEQ_MAPPING, TF_MODEL_FOR_VISION_2_S ...@@ -20,7 +20,6 @@ from transformers import MODEL_FOR_VISION_2_SEQ_MAPPING, TF_MODEL_FOR_VISION_2_S
from transformers.pipelines import pipeline from transformers.pipelines import pipeline
from transformers.testing_utils import ( from transformers.testing_utils import (
is_pipeline_test, is_pipeline_test,
is_torch_available,
require_tf, require_tf,
require_torch, require_torch,
require_vision, require_vision,
...@@ -30,12 +29,6 @@ from transformers.testing_utils import ( ...@@ -30,12 +29,6 @@ from transformers.testing_utils import (
from .test_pipelines_common import ANY from .test_pipelines_common import ANY
if is_torch_available():
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
is_torch_greater_or_equal_than_1_11 = False
if is_vision_available(): if is_vision_available():
from PIL import Image from PIL import Image
else: else:
...@@ -217,9 +210,6 @@ class ImageToTextPipelineTests(unittest.TestCase): ...@@ -217,9 +210,6 @@ class ImageToTextPipelineTests(unittest.TestCase):
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
outputs = pipe([image, image], prompt=[prompt, prompt]) outputs = pipe([image, image], prompt=[prompt, prompt])
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11, reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`."
)
@slow @slow
@require_torch @require_torch
def test_conditional_generation_pt_pix2struct(self): def test_conditional_generation_pt_pix2struct(self):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment