Unverified Commit a23819ed authored by David Yang's avatar David Yang Committed by GitHub
Browse files

Clean up deprecation warnings (#19654)

* Clean up deprecation warnings

Notes:
Changed some strings in tests to raw strings, which will change the literal content of the strings as they are fed into whatever machine handles them.
Test cases for past in the past/past_key_values switch changed/removed due to warning of impending removal

* Add PILImageResampling abstraction for PIL.Image.Resampling
parent af556a09
......@@ -150,7 +150,7 @@ def _compute_mask_indices(
num_masked_spans = sequence_length // mask_length
# SpecAugment mask to fill
spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=np.bool)
spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool)
# get random indices to mask
spec_aug_mask_idxs = np.array(
......
......@@ -193,7 +193,7 @@ def _compute_mask_indices(
)
# SpecAugment mask to fill
spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=np.bool)
spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool)
spec_aug_mask_idxs = []
max_num_masked_span = compute_num_masked_span(sequence_length)
......@@ -266,7 +266,7 @@ def _sample_negative_indices(
sampled_negative_indices = np.zeros(shape=(batch_size, sequence_length, num_negatives), dtype=np.int32)
mask_time_indices = (
mask_time_indices.astype(np.bool) if mask_time_indices is not None else np.ones(features_shape, dtype=np.bool)
mask_time_indices.astype(bool) if mask_time_indices is not None else np.ones(features_shape, dtype=bool)
)
for batch_idx in range(batch_size):
......
......@@ -190,7 +190,7 @@ def _compute_mask_indices(
)
# SpecAugment mask to fill
spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=np.bool)
spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool)
spec_aug_mask_idxs = []
max_num_masked_span = compute_num_masked_span(sequence_length)
......@@ -264,7 +264,7 @@ def _sample_negative_indices(
sampled_negative_indices = np.zeros(shape=(batch_size, sequence_length, num_negatives), dtype=np.int32)
mask_time_indices = (
mask_time_indices.astype(np.bool) if mask_time_indices is not None else np.ones(features_shape, dtype=np.bool)
mask_time_indices.astype(bool) if mask_time_indices is not None else np.ones(features_shape, dtype=bool)
)
for batch_idx in range(batch_size):
......
......@@ -142,7 +142,7 @@ def _compute_mask_indices(
)
# SpecAugment mask to fill
spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=np.bool)
spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool)
spec_aug_mask_idxs = []
max_num_masked_span = compute_num_masked_span(sequence_length)
......
......@@ -898,7 +898,7 @@ class TFXGLMForCausalLM(TFXGLMPreTrainedModel, TFCausalLanguageModelingLoss):
return {
"input_ids": inputs,
"attention_mask": attention_mask,
"past": past,
"past_key_values": past,
"use_cache": use_cache,
}
......
......@@ -233,7 +233,7 @@ class FlaxBeitModelIntegrationTest(unittest.TestCase):
pixel_values = feature_extractor(images=image, return_tensors="np").pixel_values
# prepare bool_masked_pos
bool_masked_pos = np.ones((1, 196), dtype=np.bool)
bool_masked_pos = np.ones((1, 196), dtype=bool)
# forward pass
outputs = model(pixel_values=pixel_values, bool_masked_pos=bool_masked_pos)
......
......@@ -31,6 +31,7 @@ if is_vision_available():
from PIL import Image
from transformers import FlavaFeatureExtractor
from transformers.image_utils import PILImageResampling
from transformers.models.flava.feature_extraction_flava import (
FLAVA_CODEBOOK_MEAN,
FLAVA_CODEBOOK_STD,
......@@ -80,7 +81,7 @@ class FlavaFeatureExtractionTester(unittest.TestCase):
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.size = size
self.resample = resample if resample is not None else Image.BICUBIC
self.resample = resample if resample is not None else PILImageResampling.BICUBIC
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
......@@ -96,7 +97,7 @@ class FlavaFeatureExtractionTester(unittest.TestCase):
self.codebook_do_resize = codebook_do_resize
self.codebook_size = codebook_size
self.codebook_resample = codebook_resample if codebook_resample is not None else Image.LANCZOS
self.codebook_resample = codebook_resample if codebook_resample is not None else PILImageResampling.LANCZOS
self.codebook_do_center_crop = codebook_do_center_crop
self.codebook_crop_size = codebook_crop_size
self.codebook_do_map_pixels = codebook_do_map_pixels
......
This diff is collapsed.
......@@ -362,7 +362,7 @@ class FlaxWav2Vec2UtilsTest(unittest.TestCase):
self.assertTrue(abs(ppl.item() - 141.4291) < 1e-3)
# mask half of the input
mask = np.ones((2,), dtype=np.bool)
mask = np.ones((2,), dtype=bool)
mask[0] = 0
ppl = FlaxWav2Vec2GumbelVectorQuantizer._compute_perplexity(probs, mask)
......
......@@ -93,7 +93,7 @@ class QAPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
question_answerer(question="In what field is HuggingFace working ?", context=None)
outputs = question_answerer(
question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris.", topk=20
question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris.", top_k=20
)
self.assertEqual(
outputs, [{"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)} for i in range(20)]
......
......@@ -1984,9 +1984,14 @@ class UtilsFunctionsTest(unittest.TestCase):
@unpack_inputs
def call(
self, input_ids=None, past=None, output_attentions=None, output_hidden_states=None, return_dict=None
self,
input_ids=None,
past_key_values=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return input_ids, past, output_attentions, output_hidden_states, return_dict
return input_ids, past_key_values, output_attentions, output_hidden_states, return_dict
@unpack_inputs
def foo(self, pixel_values, output_attentions=None, output_hidden_states=None, return_dict=None):
......@@ -1994,55 +1999,48 @@ class UtilsFunctionsTest(unittest.TestCase):
dummy_model = DummyModel()
input_ids = tf.constant([0, 1, 2, 3], dtype=tf.int64)
past = tf.constant([4, 5, 6, 7], dtype=tf.int64)
past_key_values = tf.constant([4, 5, 6, 7], dtype=tf.int64)
pixel_values = tf.constant([8, 9, 10, 11], dtype=tf.int64)
# test case 1: Pass inputs as keyword arguments; Booleans are inherited from the config.
output = dummy_model.call(input_ids=input_ids, past=past)
output = dummy_model.call(input_ids=input_ids, past_key_values=past_key_values)
tf.debugging.assert_equal(output[0], input_ids)
tf.debugging.assert_equal(output[1], past)
tf.debugging.assert_equal(output[1], past_key_values)
self.assertFalse(output[2])
self.assertFalse(output[3])
self.assertFalse(output[4])
# test case 2: Same as above, but with positional arguments.
output = dummy_model.call(input_ids, past)
output = dummy_model.call(input_ids, past_key_values)
tf.debugging.assert_equal(output[0], input_ids)
tf.debugging.assert_equal(output[1], past)
tf.debugging.assert_equal(output[1], past_key_values)
self.assertFalse(output[2])
self.assertFalse(output[3])
self.assertFalse(output[4])
# test case 3: We can also pack everything in the first input.
output = dummy_model.call(input_ids={"input_ids": input_ids, "past": past})
output = dummy_model.call(input_ids={"input_ids": input_ids, "past_key_values": past_key_values})
tf.debugging.assert_equal(output[0], input_ids)
tf.debugging.assert_equal(output[1], past)
tf.debugging.assert_equal(output[1], past_key_values)
self.assertFalse(output[2])
self.assertFalse(output[3])
self.assertFalse(output[4])
# test case 4: Explicit boolean arguments should override the config.
output = dummy_model.call(input_ids=input_ids, past=past, output_attentions=False, return_dict=True)
output = dummy_model.call(
input_ids=input_ids, past_key_values=past_key_values, output_attentions=False, return_dict=True
)
tf.debugging.assert_equal(output[0], input_ids)
tf.debugging.assert_equal(output[1], past)
tf.debugging.assert_equal(output[1], past_key_values)
self.assertFalse(output[2])
self.assertFalse(output[3])
self.assertTrue(output[4])
# test case 5: Unexpected arguments should raise an exception.
with self.assertRaises(ValueError):
output = dummy_model.call(input_ids=input_ids, past=past, foo="bar")
# test case 6: Despite the above, `past_key_values` should be interchangeable with `past`
# (the decorator moves it to `past`, or vice-versa, depending on the signature).
output = dummy_model.call(input_ids=input_ids, past_key_values=past)
tf.debugging.assert_equal(output[0], input_ids)
tf.debugging.assert_equal(output[1], past)
self.assertFalse(output[2])
self.assertFalse(output[3])
self.assertFalse(output[4])
output = dummy_model.call(input_ids=input_ids, past_key_values=past_key_values, foo="bar")
# test case 7: the decorator is independent from `main_input_name` -- it treats the first argument of the
# test case 6: the decorator is independent from `main_input_name` -- it treats the first argument of the
# decorated function as its main input.
output = dummy_model.foo(pixel_values=pixel_values)
tf.debugging.assert_equal(output[0], pixel_values)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment