Unverified Commit 7293fdc5 authored by Yih-Dar's avatar Yih-Dar Committed by GitHub
Browse files

Deprecate `TransfoXL` (#27607)



* fix

* fix

* trigger

* Apply suggestions from code review
Co-authored-by: default avatarLysandre Debut <hi@lysand.re>

* tic

* revert

* revert

---------
Co-authored-by: default avatarydshieh <ydshieh@users.noreply.github.com>
Co-authored-by: default avatarLysandre Debut <hi@lysand.re>
parent 623432dc
# coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class TFTransfoXLModelTester:
def __init__(
self,
parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.mem_len = 30
self.key_length = self.seq_length + self.mem_len
self.clamp_len = 15
self.is_training = True
self.use_labels = True
self.vocab_size = 99
self.cutoffs = [10, 50, 80]
self.hidden_size = 32
self.d_embed = 32
self.num_attention_heads = 4
self.d_head = 8
self.d_inner = 128
self.div_val = 2
self.num_hidden_layers = 2
self.scope = None
self.seed = 1
self.eos_token_id = 0
self.num_labels = 3
self.pad_token_id = self.vocab_size - 1
self.init_range = 0.01
def prepare_config_and_inputs(self):
input_ids_1 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_ids_2 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
lm_labels = None
if self.use_labels:
lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = TransfoXLConfig(
vocab_size=self.vocab_size,
mem_len=self.mem_len,
clamp_len=self.clamp_len,
cutoffs=self.cutoffs,
d_model=self.hidden_size,
d_embed=self.d_embed,
n_head=self.num_attention_heads,
d_head=self.d_head,
d_inner=self.d_inner,
div_val=self.div_val,
n_layer=self.num_hidden_layers,
eos_token_id=self.eos_token_id,
pad_token_id=self.vocab_size - 1,
init_range=self.init_range,
num_labels=self.num_labels,
)
return (config, input_ids_1, input_ids_2, lm_labels)
def set_seed(self):
random.seed(self.seed)
tf.random.set_seed(self.seed)
def create_and_check_transfo_xl_model(self, config, input_ids_1, input_ids_2, lm_labels):
model = TFTransfoXLModel(config)
hidden_states_1, mems_1 = model(input_ids_1).to_tuple()
inputs = {"input_ids": input_ids_2, "mems": mems_1}
hidden_states_2, mems_2 = model(inputs).to_tuple()
self.parent.assertEqual(hidden_states_1.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(hidden_states_2.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_1],
[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers,
)
self.parent.assertListEqual(
[mem.shape for mem in mems_2],
[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers,
)
def create_and_check_transfo_xl_lm_head(self, config, input_ids_1, input_ids_2, lm_labels):
model = TFTransfoXLLMHeadModel(config)
lm_logits_1, mems_1 = model(input_ids_1).to_tuple()
inputs = {"input_ids": input_ids_1, "labels": lm_labels}
_, mems_1 = model(inputs).to_tuple()
lm_logits_2, mems_2 = model([input_ids_2, mems_1]).to_tuple()
inputs = {"input_ids": input_ids_1, "mems": mems_1, "labels": lm_labels}
_, mems_2 = model(inputs).to_tuple()
self.parent.assertEqual(lm_logits_1.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_1],
[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers,
)
self.parent.assertEqual(lm_logits_2.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_2],
[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers,
)
def create_and_check_transfo_xl_for_sequence_classification(self, config, input_ids_1, input_ids_2, lm_labels):
model = TFTransfoXLForSequenceClassification(config)
result = model(input_ids_1)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids_1, input_ids_2, lm_labels) = config_and_inputs
inputs_dict = {"input_ids": input_ids_1}
return config, inputs_dict
@require_tf
class TFTransfoXLModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
all_generative_model_classes = () if is_tf_available() else ()
pipeline_model_mapping = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
test_resize_embeddings = False
test_head_masking = False
test_onnx = False
test_mismatched_shapes = False
# TODO: Fix the failed tests
def is_pipeline_test_to_skip(
self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name
):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def setUp(self):
self.model_tester = TFTransfoXLModelTester(self)
self.config_tester = ConfigTester(self, config_class=TransfoXLConfig, d_embed=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_transfo_xl_model(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*config_and_inputs)
def test_transfo_xl_lm_head(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*config_and_inputs)
def test_transfo_xl_sequence_classification_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*config_and_inputs)
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
list_other_models_with_output_ebd = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
model = model_class(config)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
if model_class in list_other_models_with_output_ebd:
x = model.get_output_embeddings()
assert isinstance(x, tf.keras.layers.Layer)
name = model.get_bias()
assert name is None
else:
x = model.get_output_embeddings()
assert x is None
name = model.get_bias()
assert name is None
def test_xla_mode(self):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def test_model_from_pretrained(self):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFTransfoXLModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss.")
def test_dataset_conversion(self):
pass
@require_tf
class TFTransfoXLModelLanguageGenerationTest(unittest.TestCase):
@unittest.skip("Skip test until #12651 is resolved.")
@slow
def test_lm_generate_transfo_xl_wt103(self):
model = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103")
input_ids = tf.convert_to_tensor([[33, 1297, 2, 1, 1009, 4, 1109, 11739, 4762, 358, 5, 25, 245, 22, 1706, 17, 20098, 5, 3215, 21, 37, 1110, 3, 13, 1041, 4, 24, 603, 490, 2, 71477, 20098, 104447, 2, 20961, 1, 2604, 4, 1, 329, 3, 6224, 831, 16002, 2, 8, 603, 78967, 29546, 23, 803, 20, 25, 416, 5, 8, 232, 4, 277, 6, 1855, 4601, 3, 29546, 54, 8, 3609, 5, 57211, 49, 4, 1, 277, 18, 8, 1755, 15691, 3, 341, 25, 416, 693, 42573, 71, 17, 401, 94, 31, 17919, 2, 29546, 7873, 18, 1, 435, 23, 11011, 755, 5, 5167, 3, 7983, 98, 84, 2, 29546, 3267, 8, 3609, 4, 1, 4865, 1075, 2, 6087, 71, 6, 346, 8, 5854, 3, 29546, 824, 1400, 1868, 2, 19, 160, 2, 311, 8, 5496, 2, 20920, 17, 25, 15097, 3, 24, 24, 0]]) # fmt: skip
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
expected_output_ids = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,142,1298,188,2,29546,113,8,3654,4,1,1109,7136,833,3,13,1645,4,29546,11,104,7,1,1109,532,7129,2,10,83507,2,1162,1123,2,6,7245,10,2,5,11,104,7,1,1109,532,7129,2,10,24,24,10,22,10,13,770,5863,4,7245,10] # fmt: skip
# In 1991, the remains of Russian Tsar Nicholas II and his family ( except for
# Alexei and Maria ) are discovered. The voice of young son, Tsarevich Alexei
# Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young
# Grigori Rasputin is asked by his father and a group of men to perform magic.
# Rasputin has a vision and denounces one of the men as a horse thief. Although
# his father initially slaps him for making such an accusation, Rasputin watches
# as the man is chased outside and beaten. Twenty years later, Rasputin sees a
# vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly
# becomes famous, with people, even a bishop, begging for his blessing. In the
# early 20th century, Rasputin became a symbol of the Russian Orthodox Church.
# The image of Rasputin was used in the Russian national anthem, " Nearer, My God,
# to Heaven ", and was used in the Russian national anthem, " " ( " The Great Spirit
# of Heaven "
output_ids = model.generate(input_ids, max_length=200, do_sample=False)
self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids)
This diff is collapsed.
# coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import unittest
from collections import Counter, OrderedDict
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class TransfoXLTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = TransfoXLTokenizer
test_rust_tokenizer = False
test_seq2seq = False
def setUp(self):
super().setUp()
vocab_tokens = [
"<unk>",
"[CLS]",
"[SEP]",
"want",
"unwanted",
"wa",
"un",
"running",
",",
"low",
"l",
]
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
saved_dict = {
"eos_idx": 0,
"min_freq": 0,
"vocab_file": None,
"counter": Counter(["welcome home"]),
"sym2idx": OrderedDict([("<eos>", 0), ("welcome", 1), ("home", 2)]),
"delimiter": None,
"idx2sym": ["<eos>", "welcome", "home"],
"max_size": None,
"lower_case": False,
"special": ["<eos>"],
}
self.pretrained_vocab_file = os.path.join(
self.tmpdirname, "mock_folder", VOCAB_FILES_NAMES["pretrained_vocab_file"]
)
os.makedirs(os.path.dirname(self.pretrained_vocab_file), exist_ok=True)
with open(self.pretrained_vocab_file, "wb") as f:
pickle.dump(saved_dict, f)
def get_tokenizer(self, **kwargs):
kwargs["lower_case"] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self, tokenizer):
input_text = "<unk> UNwanted , running"
output_text = "<unk> unwanted, running"
return input_text, output_text
def test_full_tokenizer(self):
tokenizer = TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=True)
tokens = tokenizer.tokenize("<unk> UNwanted , running")
self.assertListEqual(tokens, ["<unk>", "unwanted", ",", "running"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [0, 4, 8, 7])
def test_full_tokenizer_lower(self):
tokenizer = TransfoXLTokenizer(lower_case=True)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? "), ["hello", "!", "how", "are", "you", "?"]
)
def test_full_tokenizer_no_lower(self):
tokenizer = TransfoXLTokenizer(lower_case=False)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? "), ["HeLLo", "!", "how", "Are", "yoU", "?"]
)
def test_full_tokenizer_moses_numbers(self):
tokenizer = TransfoXLTokenizer(lower_case=False)
text_in = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"
tokens_out = [
"Hello",
"(",
"bracket",
")",
"and",
"side",
"@-@",
"scrolled",
"[",
"and",
"]",
"Henry",
"'s",
"$",
"5",
"@,@",
"000",
"with",
"3",
"@.@",
"34",
"m",
".",
"What",
"'s",
"up",
"!",
"?",
]
self.assertListEqual(tokenizer.tokenize(text_in), tokens_out)
self.assertEqual(tokenizer.convert_tokens_to_string(tokens_out), text_in)
def test_move_added_token(self):
tokenizer = self.get_tokenizer()
original_len = len(tokenizer)
tokenizer.add_tokens(["new1", "new2"])
tokenizer.move_added_token("new1", 1)
# Check that moved token is not copied (duplicate)
self.assertEqual(len(tokenizer), original_len + 2)
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("new1"), [1])
self.assertEqual(tokenizer.decode([1]), "new1")
def test_from_pretrained_vocab_file(self):
tokenizer = TransfoXLTokenizer.from_pretrained(os.path.join(self.tmpdirname, "mock_folder"))
sentence = "welcome home"
self.assertEqual(tokenizer.decode(tokenizer.encode(sentence)), sentence)
......@@ -1927,7 +1927,6 @@ class ModelTesterMixin:
"FunnelForPreTraining",
"ElectraForPreTraining",
"XLMWithLMHeadModel",
"TransfoXLLMHeadModel",
]:
for k in key_differences:
if k in ["loss", "losses"]:
......
......@@ -465,7 +465,6 @@ class TFModelTesterMixin:
"TFFunnelForPreTraining",
"TFElectraForPreTraining",
"TFXLMWithLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
for k in key_differences:
if k in ["loss", "losses"]:
......
......@@ -127,7 +127,6 @@ SPECIAL_CASES_TO_ALLOW.update(
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
......
......@@ -738,7 +738,6 @@ OBJECTS_TO_IGNORE = [
"TrainerState",
"TrainingArguments",
"TrajectoryTransformerConfig",
"TransfoXLConfig",
"TranslationPipeline",
"TvltImageProcessor",
"UMT5Config",
......
......@@ -397,13 +397,11 @@ def get_model_modules() -> List[str]:
"modeling_flax_speech_encoder_decoder",
"modeling_flax_vision_encoder_decoder",
"modeling_timm_backbone",
"modeling_transfo_xl_utilities",
"modeling_tf_auto",
"modeling_tf_encoder_decoder",
"modeling_tf_outputs",
"modeling_tf_pytorch_utils",
"modeling_tf_utils",
"modeling_tf_transfo_xl_utilities",
"modeling_tf_vision_encoder_decoder",
"modeling_vision_encoder_decoder",
]
......
......@@ -496,6 +496,11 @@ src/transformers/models/deprecated/tapex/tokenization_tapex.py
src/transformers/models/deprecated/trajectory_transformer/configuration_trajectory_transformer.py
src/transformers/models/deprecated/trajectory_transformer/convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.py
src/transformers/models/deprecated/trajectory_transformer/modeling_trajectory_transformer.py
src/transformers/models/deprecated/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py
src/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl.py
src/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl_utilities.py
src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py
src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl_utilities.py
src/transformers/models/deprecated/van/configuration_van.py
src/transformers/models/deprecated/van/convert_van_to_pytorch.py
src/transformers/models/deprecated/van/modeling_van.py
......@@ -818,11 +823,6 @@ src/transformers/models/tapas/modeling_tf_tapas.py
src/transformers/models/timesformer/convert_timesformer_to_pytorch.py
src/transformers/models/timm_backbone/configuration_timm_backbone.py
src/transformers/models/timm_backbone/modeling_timm_backbone.py
src/transformers/models/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py
src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py
src/transformers/models/transfo_xl/modeling_tf_transfo_xl_utilities.py
src/transformers/models/transfo_xl/modeling_transfo_xl.py
src/transformers/models/transfo_xl/modeling_transfo_xl_utilities.py
src/transformers/models/trocr/convert_trocr_unilm_to_pytorch.py
src/transformers/models/tvlt/configuration_tvlt.py
src/transformers/models/tvlt/modeling_tvlt.py
......@@ -990,4 +990,4 @@ src/transformers/utils/peft_utils.py
src/transformers/utils/quantization_config.py
src/transformers/utils/sentencepiece_model_pb2.py
src/transformers/utils/sentencepiece_model_pb2_new.py
src/transformers/utils/versions.py
\ No newline at end of file
src/transformers/utils/versions.py
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment