Commit 83a41d39 authored by Julien Chaumond's avatar Julien Chaumond
Browse files

💄 super

parent cd51893d
......@@ -69,7 +69,7 @@ TFXxxOutput = tf.keras.layers.Layer
class TFXxxLayer(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super(TFXxxLayer, self).__init__(**kwargs)
super().__init__(**kwargs)
self.attention = TFXxxAttention(config, name="attention")
self.intermediate = TFXxxIntermediate(config, name="intermediate")
self.transformer_output = TFXxxOutput(config, name="output")
......@@ -91,7 +91,7 @@ class TFXxxLayer(tf.keras.layers.Layer):
####################################################
class TFXxxMainLayer(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super(TFXxxMainLayer, self).__init__(**kwargs)
super().__init__(**kwargs)
def _resize_token_embeddings(self, new_num_tokens):
raise NotImplementedError # Not implemented yet in the library fr TF 2.0 models
......@@ -307,7 +307,7 @@ class TFXxxModel(TFXxxPreTrainedModel):
"""
def __init__(self, config, *inputs, **kwargs):
super(TFXxxModel, self).__init__(config, *inputs, **kwargs)
super().__init__(config, *inputs, **kwargs)
self.transformer = TFXxxMainLayer(config, name="transformer")
def call(self, inputs, **kwargs):
......@@ -348,7 +348,7 @@ class TFXxxForMaskedLM(TFXxxPreTrainedModel):
"""
def __init__(self, config, *inputs, **kwargs):
super(TFXxxForMaskedLM, self).__init__(config, *inputs, **kwargs)
super().__init__(config, *inputs, **kwargs)
self.transformer = TFXxxMainLayer(config, name="transformer")
self.mlm = TFXxxMLMHead(config, self.transformer.embeddings, name="mlm")
......@@ -397,7 +397,7 @@ class TFXxxForSequenceClassification(TFXxxPreTrainedModel):
"""
def __init__(self, config, *inputs, **kwargs):
super(TFXxxForSequenceClassification, self).__init__(config, *inputs, **kwargs)
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.transformer = TFXxxMainLayer(config, name="transformer")
......@@ -452,7 +452,7 @@ class TFXxxForTokenClassification(TFXxxPreTrainedModel):
"""
def __init__(self, config, *inputs, **kwargs):
super(TFXxxForTokenClassification, self).__init__(config, *inputs, **kwargs)
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.transformer = TFXxxMainLayer(config, name="transformer")
......@@ -509,7 +509,7 @@ class TFXxxForQuestionAnswering(TFXxxPreTrainedModel):
"""
def __init__(self, config, *inputs, **kwargs):
super(TFXxxForQuestionAnswering, self).__init__(config, *inputs, **kwargs)
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.transformer = TFXxxMainLayer(config, name="transformer")
......
......@@ -138,7 +138,7 @@ XxxOutput = nn.Module
class XxxLayer(nn.Module):
def __init__(self, config):
super(XxxLayer, self).__init__()
super().__init__()
self.attention = XxxAttention(config)
self.intermediate = XxxIntermediate(config)
self.output = XxxOutput(config)
......@@ -298,7 +298,7 @@ class XxxModel(XxxPreTrainedModel):
"""
def __init__(self, config):
super(XxxModel, self).__init__(config)
super().__init__(config)
self.embeddings = XxxEmbeddings(config)
self.encoder = XxxEncoder(config)
......@@ -426,7 +426,7 @@ class XxxForMaskedLM(XxxPreTrainedModel):
"""
def __init__(self, config):
super(XxxForMaskedLM, self).__init__(config)
super().__init__(config)
self.transformer = XxxModel(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size)
......@@ -507,7 +507,7 @@ class XxxForSequenceClassification(XxxPreTrainedModel):
"""
def __init__(self, config):
super(XxxForSequenceClassification, self).__init__(config)
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XxxModel(config)
......@@ -593,7 +593,7 @@ class XxxForTokenClassification(XxxPreTrainedModel):
"""
def __init__(self, config):
super(XxxForTokenClassification, self).__init__(config)
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XxxModel(config)
......@@ -692,7 +692,7 @@ class XxxForQuestionAnswering(XxxPreTrainedModel):
"""
def __init__(self, config):
super(XxxForQuestionAnswering, self).__init__(config)
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XxxModel(config)
......
......@@ -27,7 +27,7 @@ class XxxTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = XxxTokenizer
def setUp(self):
super(XxxTokenizationTest, self).setUp()
super().setUp()
vocab_tokens = [
"[UNK]",
......
......@@ -109,7 +109,7 @@ class XxxTokenizer(PreTrainedTokenizer):
Whether to lower case the input
Only has an effect when do_basic_tokenize=True
"""
super(XxxTokenizer, self).__init__(
super().__init__(
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
......
......@@ -30,7 +30,7 @@ class AlbertTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = AlbertTokenizer
def setUp(self):
super(AlbertTokenizationTest, self).setUp()
super().setUp()
# We have a SentencePiece fixture for testing
tokenizer = AlbertTokenizer(SAMPLE_VOCAB)
......
......@@ -38,7 +38,7 @@ class BertTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
test_rust_tokenizer = True
def setUp(self):
super(BertTokenizationTest, self).setUp()
super().setUp()
vocab_tokens = [
"[UNK]",
......
......@@ -35,7 +35,7 @@ class BertJapaneseTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = BertJapaneseTokenizer
def setUp(self):
super(BertJapaneseTokenizationTest, self).setUp()
super().setUp()
vocab_tokens = [
"[UNK]",
......@@ -135,7 +135,7 @@ class BertJapaneseCharacterTokenizationTest(TokenizerTesterMixin, unittest.TestC
tokenizer_class = BertJapaneseTokenizer
def setUp(self):
super(BertJapaneseCharacterTokenizationTest, self).setUp()
super().setUp()
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
......
......@@ -27,7 +27,7 @@ class CTRLTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = CTRLTokenizer
def setUp(self):
super(CTRLTokenizationTest, self).setUp()
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
vocab = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
......
......@@ -29,7 +29,7 @@ class GPT2TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
test_rust_tokenizer = True
def setUp(self):
super(GPT2TokenizationTest, self).setUp()
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
vocab = [
......
......@@ -28,7 +28,7 @@ class OpenAIGPTTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = OpenAIGPTTokenizer
def setUp(self):
super(OpenAIGPTTokenizationTest, self).setUp()
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
vocab = [
......
......@@ -28,7 +28,7 @@ class RobertaTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = RobertaTokenizer
def setUp(self):
super(RobertaTokenizationTest, self).setUp()
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
vocab = [
......
......@@ -31,7 +31,7 @@ class T5TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = T5Tokenizer
def setUp(self):
super(T5TokenizationTest, self).setUp()
super().setUp()
# We have a SentencePiece fixture for testing
tokenizer = T5Tokenizer(SAMPLE_VOCAB)
......
......@@ -33,7 +33,7 @@ class TransfoXLTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = TransfoXLTokenizer if is_torch_available() else None
def setUp(self):
super(TransfoXLTokenizationTest, self).setUp()
super().setUp()
vocab_tokens = [
"<unk>",
......
......@@ -29,7 +29,7 @@ class XLMTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = XLMTokenizer
def setUp(self):
super(XLMTokenizationTest, self).setUp()
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
vocab = [
......
......@@ -31,7 +31,7 @@ class XLNetTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = XLNetTokenizer
def setUp(self):
super(XLNetTokenizationTest, self).setUp()
super().setUp()
# We have a SentencePiece fixture for testing
tokenizer = XLNetTokenizer(SAMPLE_VOCAB, keep_accents=True)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment