Unverified Commit 0754217c authored by Yih-Dar's avatar Yih-Dar Committed by GitHub
Browse files

Use `LoggingLevel` context manager in 3 tests (#28575)



* inside with LoggingLevel

* remove is_flaky

---------
Co-authored-by: default avatarydshieh <ydshieh@users.noreply.github.com>
parent d2cdefb9
...@@ -43,8 +43,8 @@ from transformers.testing_utils import ( ...@@ -43,8 +43,8 @@ from transformers.testing_utils import (
TOKEN, TOKEN,
USER, USER,
CaptureLogger, CaptureLogger,
LoggingLevel,
TestCasePlus, TestCasePlus,
is_flaky,
is_staging_test, is_staging_test,
require_accelerate, require_accelerate,
require_flax, require_flax,
...@@ -290,16 +290,14 @@ class ModelUtilsTest(TestCasePlus): ...@@ -290,16 +290,14 @@ class ModelUtilsTest(TestCasePlus):
self.assertIsNotNone(model) self.assertIsNotNone(model)
@is_flaky(
description="Capturing logs is flaky: https://app.circleci.com/pipelines/github/huggingface/transformers/81004/workflows/4919e5c9-0ea2-457b-ad4f-65371f79e277/jobs/1038999"
)
def test_model_from_pretrained_with_different_pretrained_model_name(self): def test_model_from_pretrained_with_different_pretrained_model_name(self):
model = T5ForConditionalGeneration.from_pretrained(TINY_T5) model = T5ForConditionalGeneration.from_pretrained(TINY_T5)
self.assertIsNotNone(model) self.assertIsNotNone(model)
logger = logging.get_logger("transformers.configuration_utils") logger = logging.get_logger("transformers.configuration_utils")
with CaptureLogger(logger) as cl: with LoggingLevel(logging.WARNING):
BertModel.from_pretrained(TINY_T5) with CaptureLogger(logger) as cl:
BertModel.from_pretrained(TINY_T5)
self.assertTrue("You are using a model of type t5 to instantiate a model of type bert" in cl.out) self.assertTrue("You are using a model of type t5 to instantiate a model of type bert" in cl.out)
def test_model_from_config_torch_dtype(self): def test_model_from_config_torch_dtype(self):
...@@ -1024,9 +1022,6 @@ class ModelUtilsTest(TestCasePlus): ...@@ -1024,9 +1022,6 @@ class ModelUtilsTest(TestCasePlus):
# Should only complain about the missing bias # Should only complain about the missing bias
self.assertListEqual(load_info["missing_keys"], ["decoder.bias"]) self.assertListEqual(load_info["missing_keys"], ["decoder.bias"])
@is_flaky(
description="Capturing logs is flaky: https://app.circleci.com/pipelines/github/huggingface/transformers/81004/workflows/4919e5c9-0ea2-457b-ad4f-65371f79e277/jobs/1038999"
)
def test_unexpected_keys_warnings(self): def test_unexpected_keys_warnings(self):
model = ModelWithHead(PretrainedConfig()) model = ModelWithHead(PretrainedConfig())
logger = logging.get_logger("transformers.modeling_utils") logger = logging.get_logger("transformers.modeling_utils")
...@@ -1034,8 +1029,9 @@ class ModelUtilsTest(TestCasePlus): ...@@ -1034,8 +1029,9 @@ class ModelUtilsTest(TestCasePlus):
model.save_pretrained(tmp_dir) model.save_pretrained(tmp_dir)
# Loading the model with a new class, we don't get a warning for unexpected weights, just an info # Loading the model with a new class, we don't get a warning for unexpected weights, just an info
with CaptureLogger(logger) as cl: with LoggingLevel(logging.WARNING):
_, loading_info = BaseModel.from_pretrained(tmp_dir, output_loading_info=True) with CaptureLogger(logger) as cl:
_, loading_info = BaseModel.from_pretrained(tmp_dir, output_loading_info=True)
self.assertNotIn("were not used when initializing ModelWithHead", cl.out) self.assertNotIn("were not used when initializing ModelWithHead", cl.out)
self.assertEqual( self.assertEqual(
set(loading_info["unexpected_keys"]), set(loading_info["unexpected_keys"]),
...@@ -1046,8 +1042,9 @@ class ModelUtilsTest(TestCasePlus): ...@@ -1046,8 +1042,9 @@ class ModelUtilsTest(TestCasePlus):
state_dict = model.state_dict() state_dict = model.state_dict()
state_dict["added_key"] = copy.deepcopy(state_dict["linear.weight"]) state_dict["added_key"] = copy.deepcopy(state_dict["linear.weight"])
safe_save_file(state_dict, os.path.join(tmp_dir, SAFE_WEIGHTS_NAME), metadata={"format": "pt"}) safe_save_file(state_dict, os.path.join(tmp_dir, SAFE_WEIGHTS_NAME), metadata={"format": "pt"})
with CaptureLogger(logger) as cl: with LoggingLevel(logging.WARNING):
_, loading_info = ModelWithHead.from_pretrained(tmp_dir, output_loading_info=True) with CaptureLogger(logger) as cl:
_, loading_info = ModelWithHead.from_pretrained(tmp_dir, output_loading_info=True)
self.assertIn("were not used when initializing ModelWithHead: ['added_key']", cl.out) self.assertIn("were not used when initializing ModelWithHead: ['added_key']", cl.out)
self.assertEqual(loading_info["unexpected_keys"], ["added_key"]) self.assertEqual(loading_info["unexpected_keys"], ["added_key"])
...@@ -1056,75 +1053,82 @@ class ModelUtilsTest(TestCasePlus): ...@@ -1056,75 +1053,82 @@ class ModelUtilsTest(TestCasePlus):
with self.subTest("Ensure no warnings when pad_token_id is None."): with self.subTest("Ensure no warnings when pad_token_id is None."):
logger.warning_once.cache_clear() logger.warning_once.cache_clear()
with CaptureLogger(logger) as cl: with LoggingLevel(logging.WARNING):
config_no_pad_token = PretrainedConfig() with CaptureLogger(logger) as cl:
config_no_pad_token.pad_token_id = None config_no_pad_token = PretrainedConfig()
model = ModelWithHead(config_no_pad_token) config_no_pad_token.pad_token_id = None
input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) model = ModelWithHead(config_no_pad_token)
model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]])
model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None)
self.assertNotIn("We strongly recommend passing in an `attention_mask`", cl.out) self.assertNotIn("We strongly recommend passing in an `attention_mask`", cl.out)
with self.subTest("Ensure no warnings when there is an attention_mask."): with self.subTest("Ensure no warnings when there is an attention_mask."):
logger.warning_once.cache_clear() logger.warning_once.cache_clear()
with CaptureLogger(logger) as cl: with LoggingLevel(logging.WARNING):
config = PretrainedConfig() with CaptureLogger(logger) as cl:
config.pad_token_id = 0 config = PretrainedConfig()
model = ModelWithHead(config) config.pad_token_id = 0
input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) model = ModelWithHead(config)
attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]]) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]])
model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]])
model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
self.assertNotIn("We strongly recommend passing in an `attention_mask`", cl.out) self.assertNotIn("We strongly recommend passing in an `attention_mask`", cl.out)
with self.subTest("Ensure no warnings when there are no pad_token_ids in the input_ids."): with self.subTest("Ensure no warnings when there are no pad_token_ids in the input_ids."):
logger.warning_once.cache_clear() logger.warning_once.cache_clear()
with CaptureLogger(logger) as cl: with LoggingLevel(logging.WARNING):
config = PretrainedConfig() with CaptureLogger(logger) as cl:
config.pad_token_id = 0 config = PretrainedConfig()
model = ModelWithHead(config) config.pad_token_id = 0
input_ids = torch.tensor([[1, 345, 232, 328, 740, 140, 1695, 69, 6078, 2341, 25]]) model = ModelWithHead(config)
model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) input_ids = torch.tensor([[1, 345, 232, 328, 740, 140, 1695, 69, 6078, 2341, 25]])
model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None)
self.assertNotIn("We strongly recommend passing in an `attention_mask`", cl.out) self.assertNotIn("We strongly recommend passing in an `attention_mask`", cl.out)
with self.subTest("Ensure a warning is shown when the input_ids start with a pad_token_id."): with self.subTest("Ensure a warning is shown when the input_ids start with a pad_token_id."):
logger.warning_once.cache_clear() logger.warning_once.cache_clear()
with CaptureLogger(logger) as cl: with LoggingLevel(logging.WARNING):
config = PretrainedConfig() with CaptureLogger(logger) as cl:
config.pad_token_id = 0 config = PretrainedConfig()
model = ModelWithHead(config) config.pad_token_id = 0
input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 432, 5232]]) model = ModelWithHead(config)
model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 432, 5232]])
model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None)
self.assertIn("We strongly recommend passing in an `attention_mask`", cl.out) self.assertIn("We strongly recommend passing in an `attention_mask`", cl.out)
with self.subTest("Ensure a warning is shown when the input_ids end with a pad_token_id."): with self.subTest("Ensure a warning is shown when the input_ids end with a pad_token_id."):
logger.warning_once.cache_clear() logger.warning_once.cache_clear()
with CaptureLogger(logger) as cl: with LoggingLevel(logging.WARNING):
config = PretrainedConfig() with CaptureLogger(logger) as cl:
config.pad_token_id = 0 config = PretrainedConfig()
model = ModelWithHead(config) config.pad_token_id = 0
input_ids = torch.tensor([[432, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) model = ModelWithHead(config)
model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) input_ids = torch.tensor([[432, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]])
model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None)
self.assertIn("We strongly recommend passing in an `attention_mask`", cl.out) self.assertIn("We strongly recommend passing in an `attention_mask`", cl.out)
with self.subTest("Ensure that the warning is shown at most once."): with self.subTest("Ensure that the warning is shown at most once."):
logger.warning_once.cache_clear() logger.warning_once.cache_clear()
with CaptureLogger(logger) as cl: with LoggingLevel(logging.WARNING):
config = PretrainedConfig() with CaptureLogger(logger) as cl:
config.pad_token_id = 0 config = PretrainedConfig()
model = ModelWithHead(config) config.pad_token_id = 0
input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) model = ModelWithHead(config)
model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]])
model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None)
model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None)
self.assertEqual(cl.out.count("We strongly recommend passing in an `attention_mask`"), 1) self.assertEqual(cl.out.count("We strongly recommend passing in an `attention_mask`"), 1)
with self.subTest("Ensure a different warning is shown when the pad_token_id is equal to the bos_token_id."): with self.subTest("Ensure a different warning is shown when the pad_token_id is equal to the bos_token_id."):
logger.warning_once.cache_clear() logger.warning_once.cache_clear()
with CaptureLogger(logger) as cl: with LoggingLevel(logging.WARNING):
config = PretrainedConfig() with CaptureLogger(logger) as cl:
config.pad_token_id = 0 config = PretrainedConfig()
config.bos_token_id = config.pad_token_id config.pad_token_id = 0
model = ModelWithHead(config) config.bos_token_id = config.pad_token_id
input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) model = ModelWithHead(config)
model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]])
model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None)
self.assertIn("You may ignore this warning if your `pad_token_id`", cl.out) self.assertIn("You may ignore this warning if your `pad_token_id`", cl.out)
if not is_torchdynamo_available(): if not is_torchdynamo_available():
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment