Unverified Commit f1fe1846 authored by Sylvain Gugger's avatar Sylvain Gugger Committed by GitHub
Browse files

Use labels to remove deprecation warnings (#4807)

parent 5c0cfc2c
......@@ -162,7 +162,7 @@ class AlbertModelTest(ModelTesterMixin, unittest.TestCase):
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
masked_lm_labels=token_labels,
labels=token_labels,
sentence_order_label=sequence_labels,
)
result = {
......@@ -183,7 +183,7 @@ class AlbertModelTest(ModelTesterMixin, unittest.TestCase):
model.to(torch_device)
model.eval()
loss, prediction_scores = model(
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, masked_lm_labels=token_labels
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels
)
result = {
"loss": loss,
......
......@@ -296,7 +296,7 @@ class BartTranslationTests(unittest.TestCase):
lm_model = BartForConditionalGeneration(config).to(torch_device)
context = torch.Tensor([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]]).long().to(torch_device)
summary = torch.Tensor([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]]).long().to(torch_device)
loss, logits, enc_features = lm_model(input_ids=context, decoder_input_ids=summary, lm_labels=summary)
loss, logits, enc_features = lm_model(input_ids=context, decoder_input_ids=summary, labels=summary)
expected_shape = (*summary.shape, config.vocab_size)
self.assertEqual(logits.shape, expected_shape)
......@@ -361,7 +361,7 @@ class BartHeadTests(unittest.TestCase):
lm_labels = ids_tensor([batch_size, input_ids.shape[1]], self.vocab_size).to(torch_device)
lm_model = BartForConditionalGeneration(config)
lm_model.to(torch_device)
loss, logits, enc_features = lm_model(input_ids=input_ids, lm_labels=lm_labels)
loss, logits, enc_features = lm_model(input_ids=input_ids, labels=lm_labels)
expected_shape = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(logits.shape, expected_shape)
self.assertIsInstance(loss.item(), float)
......@@ -381,7 +381,7 @@ class BartHeadTests(unittest.TestCase):
lm_model = BartForConditionalGeneration(config).to(torch_device)
context = torch.Tensor([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]]).long().to(torch_device)
summary = torch.Tensor([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]]).long().to(torch_device)
loss, logits, enc_features = lm_model(input_ids=context, decoder_input_ids=summary, lm_labels=summary)
loss, logits, enc_features = lm_model(input_ids=context, decoder_input_ids=summary, labels=summary)
expected_shape = (*summary.shape, config.vocab_size)
self.assertEqual(logits.shape, expected_shape)
......
......@@ -218,7 +218,7 @@ class BertModelTester:
model.to(torch_device)
model.eval()
loss, prediction_scores = model(
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, masked_lm_labels=token_labels
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels
)
result = {
"loss": loss,
......@@ -248,7 +248,7 @@ class BertModelTester:
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
masked_lm_labels=token_labels,
labels=token_labels,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
......@@ -256,7 +256,7 @@ class BertModelTester:
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
masked_lm_labels=token_labels,
labels=token_labels,
encoder_hidden_states=encoder_hidden_states,
)
result = {
......@@ -294,7 +294,7 @@ class BertModelTester:
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
masked_lm_labels=token_labels,
labels=token_labels,
next_sentence_label=sequence_labels,
)
result = {
......
......@@ -151,7 +151,7 @@ class DistilBertModelTest(ModelTesterMixin, unittest.TestCase):
model = DistilBertForMaskedLM(config=config)
model.to(torch_device)
model.eval()
loss, prediction_scores = model(input_ids, attention_mask=input_mask, masked_lm_labels=token_labels)
loss, prediction_scores = model(input_ids, attention_mask=input_mask, labels=token_labels)
result = {
"loss": loss,
"prediction_scores": prediction_scores,
......
......@@ -180,7 +180,7 @@ class ElectraModelTest(ModelTesterMixin, unittest.TestCase):
model.to(torch_device)
model.eval()
loss, prediction_scores = model(
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, masked_lm_labels=token_labels
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels
)
result = {
"loss": loss,
......
......@@ -268,7 +268,7 @@ class GPT2ModelTest(ModelTesterMixin, unittest.TestCase):
"mc_token_ids": mc_token_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
"lm_labels": multiple_choice_inputs_ids,
"labels": multiple_choice_inputs_ids,
}
loss, lm_logits, mc_logits, _ = model(**inputs)
......
......@@ -164,7 +164,7 @@ class LongformerModelTester(object):
model.to(torch_device)
model.eval()
loss, prediction_scores = model(
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, masked_lm_labels=token_labels
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels
)
result = {
"loss": loss,
......@@ -361,7 +361,7 @@ class LongformerModelIntegrationTest(unittest.TestCase):
[[0] + [20920, 232, 328, 1437] * 1000 + [2]], dtype=torch.long, device=torch_device
) # long input
loss, prediction_scores = model(input_ids, masked_lm_labels=input_ids)
loss, prediction_scores = model(input_ids, labels=input_ids)
expected_loss = torch.tensor(0.0620, device=torch_device)
expected_prediction_scores_sum = torch.tensor(-6.1599e08, device=torch_device)
......
......@@ -169,7 +169,7 @@ class OpenAIGPTModelTest(ModelTesterMixin, unittest.TestCase):
model.to(torch_device)
model.eval()
loss, lm_logits, mc_logits = model(input_ids, token_type_ids=token_type_ids, lm_labels=input_ids)
loss, lm_logits, mc_logits = model(input_ids, token_type_ids=token_type_ids, labels=input_ids)
result = {"loss": loss, "lm_logits": lm_logits}
......
......@@ -155,7 +155,7 @@ class RobertaModelTest(ModelTesterMixin, unittest.TestCase):
model.to(torch_device)
model.eval()
loss, prediction_scores = model(
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, masked_lm_labels=token_labels
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels
)
result = {
"loss": loss,
......
......@@ -206,7 +206,7 @@ class T5ModelTest(ModelTesterMixin, unittest.TestCase):
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
lm_labels=lm_labels,
labels=lm_labels,
)
loss, prediction_scores, _, _ = outputs
self.parent.assertEqual(len(outputs), 4)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment