Unverified Commit b5bab710 authored by Patrick von Platen's avatar Patrick von Platen Committed by GitHub
Browse files

correct (#13585)

parent 89da1bfe
...@@ -137,11 +137,15 @@ class Speech2TextFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unitt ...@@ -137,11 +137,15 @@ class Speech2TextFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unitt
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]
paddings = ["longest", "max_length", "do_not_pad"]
max_lengths = [None, 16, None]
var_tolerances = [1e-3, 1e-3, 5e-1]
# TODO(Patrick, Suraj, Anton) - It's surprising that "non-padded/non-numpified" padding # TODO(Patrick, Suraj, Anton) - It's surprising that "non-padded/non-numpified" padding
# results in quite inaccurate variance computation after (see 5e-1 tolerance) # results in quite inaccurate variance computation after (see 5e-1 tolerance)
# Issue is filed and PR is underway: https://github.com/huggingface/transformers/issues/13539
# paddings = ["longest", "max_length", "do_not_pad"]
# max_lengths = [None, 16, None]
# var_tolerances = [1e-3, 1e-3, 5e-1]
paddings = ["longest", "max_length"]
max_lengths = [None, 16]
var_tolerances = [1e-3, 1e-3]
for max_length, padding, var_tol in zip(max_lengths, paddings, var_tolerances): for max_length, padding, var_tol in zip(max_lengths, paddings, var_tolerances):
inputs = feature_extractor( inputs = feature_extractor(
...@@ -163,11 +167,15 @@ class Speech2TextFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unitt ...@@ -163,11 +167,15 @@ class Speech2TextFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unitt
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]
paddings = ["longest", "max_length", "do_not_pad"]
max_lengths = [None, 16, None]
var_tolerances = [1e-3, 1e-3, 5e-1]
# TODO(Patrick, Suraj, Anton) - It's surprising that "non-padded/non-numpified" padding # TODO(Patrick, Suraj, Anton) - It's surprising that "non-padded/non-numpified" padding
# results in quite inaccurate variance computation after (see 5e-1 tolerance) # results in quite inaccurate variance computation after (see 5e-1 tolerance)
# Issue is filed and PR is underway: https://github.com/huggingface/transformers/issues/13539
# paddings = ["longest", "max_length", "do_not_pad"]
# max_lengths = [None, 16, None]
# var_tolerances = [1e-3, 1e-3, 5e-1]
paddings = ["longest", "max_length"]
max_lengths = [None, 16]
var_tolerances = [1e-3, 1e-3]
for max_length, padding, var_tol in zip(max_lengths, paddings, var_tolerances): for max_length, padding, var_tol in zip(max_lengths, paddings, var_tolerances):
inputs = feature_extractor( inputs = feature_extractor(
speech_inputs, max_length=max_length, padding=padding, return_tensors="np", return_attention_mask=True speech_inputs, max_length=max_length, padding=padding, return_tensors="np", return_attention_mask=True
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment