Commit e6c0019c authored by Aymeric Augustin's avatar Aymeric Augustin
Browse files

Remove unused variables in tests.

parent 495580da
...@@ -284,7 +284,6 @@ class ModelTesterMixin: ...@@ -284,7 +284,6 @@ class ModelTesterMixin:
multihead_outputs = head_mask.grad multihead_outputs = head_mask.grad
attentions = outputs[-1] attentions = outputs[-1]
hidden_states = outputs[-2]
# Remove Nan # Remove Nan
for t in attentions: for t in attentions:
...@@ -590,7 +589,7 @@ class ModelTesterMixin: ...@@ -590,7 +589,7 @@ class ModelTesterMixin:
inputs_dict["decoder_inputs_embeds"] = wte(decoder_input_ids) inputs_dict["decoder_inputs_embeds"] = wte(decoder_input_ids)
with torch.no_grad(): with torch.no_grad():
outputs = model(**inputs_dict) model(**inputs_dict)
class ConfigTester(object): class ConfigTester(object):
......
...@@ -332,7 +332,7 @@ class TFModelTesterMixin: ...@@ -332,7 +332,7 @@ class TFModelTesterMixin:
inputs_dict["encoder_inputs_embeds"] = self._get_embeds(wte, encoder_input_ids) inputs_dict["encoder_inputs_embeds"] = self._get_embeds(wte, encoder_input_ids)
inputs_dict["decoder_inputs_embeds"] = self._get_embeds(wte, decoder_input_ids) inputs_dict["decoder_inputs_embeds"] = self._get_embeds(wte, decoder_input_ids)
outputs = model(inputs_dict) model(inputs_dict)
def ids_tensor(shape, vocab_size, rng=None, name=None, dtype=None): def ids_tensor(shape, vocab_size, rng=None, name=None, dtype=None):
......
...@@ -224,7 +224,6 @@ class TFXLMModelTest(TFModelTesterMixin, unittest.TestCase): ...@@ -224,7 +224,6 @@ class TFXLMModelTest(TFModelTesterMixin, unittest.TestCase):
inputs = {"input_ids": input_ids, "lengths": input_lengths} inputs = {"input_ids": input_ids, "lengths": input_lengths}
outputs = model(inputs)
start_logits, end_logits = model(inputs) start_logits, end_logits = model(inputs)
result = { result = {
......
...@@ -159,7 +159,6 @@ class TokenizerTesterMixin: ...@@ -159,7 +159,6 @@ class TokenizerTesterMixin:
self.assertEqual(all_size_2, all_size + len(new_toks)) self.assertEqual(all_size_2, all_size + len(new_toks))
tokens = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l", add_special_tokens=False) tokens = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l", add_special_tokens=False)
out_string = tokenizer.decode(tokens)
self.assertGreaterEqual(len(tokens), 4) self.assertGreaterEqual(len(tokens), 4)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1) self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
...@@ -178,7 +177,6 @@ class TokenizerTesterMixin: ...@@ -178,7 +177,6 @@ class TokenizerTesterMixin:
tokens = tokenizer.encode( tokens = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l", add_special_tokens=False ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l", add_special_tokens=False
) )
out_string = tokenizer.decode(tokens)
self.assertGreaterEqual(len(tokens), 6) self.assertGreaterEqual(len(tokens), 6)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1) self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment