xlm_hubconf.py 6.47 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
from pytorch_transformers.tokenization_xlm import XLMTokenizer
from pytorch_transformers.modeling_xlm import (
thomwolf's avatar
thomwolf committed
3
4
5
6
7
    XLMConfig,
    XLMModel,
    XLMWithLMHeadModel,
    XLMForSequenceClassification,
    XLMForQuestionAnswering
8
9
10
11
)

# A lot of models share the same param doc. Use a decorator
# to save typing
thomwolf's avatar
thomwolf committed
12
13
14
15
16
17
18
19
xlm_start_docstring = """
    Model class adapted from the XLM Transformer model of
        "Cross-lingual Language Model Pretraining" by Guillaume Lample, Alexis Conneau
        Paper: https://arxiv.org/abs/1901.07291
        Original code: https://github.com/facebookresearch/XLM

    Example:
        # Load the tokenizer
thomwolf's avatar
thomwolf committed
20
21
        import torch
        tokenizer = torch.hub.load('huggingface/pytorch-transformers', 'xlmTokenizer', 'xlm-mlm-en-2048')
thomwolf's avatar
thomwolf committed
22
23

        #  Prepare tokenized input
thomwolf's avatar
thomwolf committed
24
25
26
27
28
29
        text_1 = "Who was Jim Henson ?"
        text_2 = "Jim Henson was a puppeteer"
        indexed_tokens_1 = tokenizer.encode(text_1)
        indexed_tokens_2 = tokenizer.encode(text_2)
        tokens_tensor_1 = torch.tensor([indexed_tokens_1])
        tokens_tensor_2 = torch.tensor([indexed_tokens_2])
thomwolf's avatar
thomwolf committed
30
31
32
33
34
"""

# A lot of models share the same param doc. Use a decorator
# to save typing
xlm_end_docstring = """
35
36
37
    Params:
        pretrained_model_name_or_path: either:
            - a str with the name of a pre-trained model to load selected in the list of:
thomwolf's avatar
thomwolf committed
38
                . `xlm-mlm-en-2048`
39
40
            - a path or url to a pretrained model archive containing:
                . `config.json` a configuration file for the model
thomwolf's avatar
thomwolf committed
41
                . `pytorch_model.bin` a PyTorch dump created using the `convert_xlm_checkpoint_to_pytorch` conversion script
42
43
        cache_dir: an optional path to a folder in which the pre-trained models will be cached.
        state_dict: an optional state dictionary (collections.OrderedDict object) to use instead of pre-trained models
thomwolf's avatar
thomwolf committed
44
        *inputs, **kwargs: additional input for the specific XLM class
45
46
47
"""


thomwolf's avatar
thomwolf committed
48
49
50
51
52
53
54
def _begin_with_docstring(docstr):
    def docstring_decorator(fn):
        fn.__doc__ = fn.__doc__ + docstr
        return fn
    return docstring_decorator

def _end_with_docstring(docstr):
55
56
57
58
59
60
    def docstring_decorator(fn):
        fn.__doc__ = fn.__doc__ + docstr
        return fn
    return docstring_decorator


thomwolf's avatar
thomwolf committed
61
def xlmTokenizer(*args, **kwargs):
62
    """
thomwolf's avatar
thomwolf committed
63
    Instantiate a XLM BPE tokenizer for XLM from a pre-trained vocab file.
64
65
66
67

    Args:
    pretrained_model_name_or_path: Path to pretrained model archive
                                   or one of pre-trained vocab configs below.
thomwolf's avatar
thomwolf committed
68
                                       * xlm-mlm-en-2048
69
70
71
72
73
74
75
76
77
78
    Keyword args:
    special_tokens: Special tokens in vocabulary that are not pretrained
                    Default: None
    max_len: An artificial maximum length to truncate tokenized sequences to;
             Effective maximum length is always the minimum of this
             value (if specified) and the underlying model's
             sequence length.
             Default: None

    Example:
thomwolf's avatar
thomwolf committed
79
80
        import torch
        tokenizer = torch.hub.load('huggingface/pytorch-transformers', 'xlmTokenizer', 'xlm-mlm-en-2048')
81

thomwolf's avatar
thomwolf committed
82
83
        text = "Who was Jim Henson ?"
        indexed_tokens = tokenizer.encode(tokenized_text)
84
    """
thomwolf's avatar
thomwolf committed
85
    tokenizer = XLMTokenizer.from_pretrained(*args, **kwargs)
86
87
88
    return tokenizer


thomwolf's avatar
thomwolf committed
89
90
91
@_begin_with_docstring(xlm_start_docstring)
@_end_with_docstring(xlm_end_docstring)
def xlmModel(*args, **kwargs):
92
    """
thomwolf's avatar
thomwolf committed
93
        # Load xlmModel
thomwolf's avatar
thomwolf committed
94
95
        model = torch.hub.load('huggingface/pytorch-transformers', 'xlmModel', 'xlm-mlm-en-2048')
        model.eval()
96
97

        # Predict hidden states features for each layer
thomwolf's avatar
thomwolf committed
98
        with torch.no_grad():
99
100
101
                hidden_states_1, mems = model(tokens_tensor_1)
                hidden_states_2, mems = model(tokens_tensor_2, past=mems)
    """
thomwolf's avatar
thomwolf committed
102
    model = XLMModel.from_pretrained(*args, **kwargs)
103
104
105
    return model


thomwolf's avatar
thomwolf committed
106
107
108
@_begin_with_docstring(xlm_start_docstring)
@_end_with_docstring(xlm_end_docstring)
def xlmLMHeadModel(*args, **kwargs):
109
110
    """
        #  Prepare tokenized input
thomwolf's avatar
thomwolf committed
111
112
113
114
115
116
        text_1 = "Who was Jim Henson ?"
        text_2 = "Jim Henson was a puppeteer"
        indexed_tokens_1 = tokenizer.encode(text_1)
        indexed_tokens_2 = tokenizer.encode(text_2)
        tokens_tensor_1 = torch.tensor([indexed_tokens_1])
        tokens_tensor_2 = torch.tensor([indexed_tokens_2])
117
118

        # Load xlnetLMHeadModel
thomwolf's avatar
thomwolf committed
119
120
        model = torch.hub.load('huggingface/pytorch-transformers', 'xlnetLMHeadModel', 'xlm-mlm-en-2048')
        model.eval()
121
122

        # Predict hidden states features for each layer
thomwolf's avatar
thomwolf committed
123
        with torch.no_grad():
124
125
126
127
                predictions_1, mems = model(tokens_tensor_1)
                predictions_2, mems = model(tokens_tensor_2, mems=mems)

        # Get the predicted last token
thomwolf's avatar
thomwolf committed
128
129
130
        predicted_index = torch.argmax(predictions_2[0, -1, :]).item()
        predicted_token = tokenizer.decode([predicted_index])
        assert predicted_token == ' who'
131
    """
thomwolf's avatar
thomwolf committed
132
    model = XLMWithLMHeadModel.from_pretrained(*args, **kwargs)
133
134
135
    return model


thomwolf's avatar
thomwolf committed
136
# @_end_with_docstring(xlnet_docstring)
137
138
139
140
141
142
143
144
# def xlnetForSequenceClassification(*args, **kwargs):
#     """
#     xlnetModel is the basic XLNet Transformer model from
#         "XLNet: Generalized Autoregressive Pretraining for Language Understanding"
#         by Zhilin Yang, Zihang Dai1, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le

#     Example:
#         # Load the tokenizer
thomwolf's avatar
thomwolf committed
145
146
#         import torch
#         tokenizer = torch.hub.load('huggingface/pytorch-transformers', 'xlnetTokenizer', 'xlm-mlm-en-2048')
147
148

#         #  Prepare tokenized input
thomwolf's avatar
thomwolf committed
149
150
151
152
153
154
155
156
#         text1 = "Who was Jim Henson ? Jim Henson was a puppeteer"
#         text2 = "Who was Jim Henson ? Jim Henson was a mysterious young man"
#         tokenized_text1 = tokenizer.tokenize(text1)
#         tokenized_text2 = tokenizer.tokenize(text2)
#         indexed_tokens1 = tokenizer.convert_tokens_to_ids(tokenized_text1)
#         indexed_tokens2 = tokenizer.convert_tokens_to_ids(tokenized_text2)
#         tokens_tensor = torch.tensor([[indexed_tokens1, indexed_tokens2]])
#         mc_token_ids = torch.LongTensor([[len(tokenized_text1)-1, len(tokenized_text2)-1]])
157
158

#         # Load xlnetForSequenceClassification
thomwolf's avatar
thomwolf committed
159
160
#         model = torch.hub.load('huggingface/pytorch-transformers', 'xlnetForSequenceClassification', 'xlm-mlm-en-2048')
#         model.eval()
161
162

#         # Predict sequence classes logits
thomwolf's avatar
thomwolf committed
163
#         with torch.no_grad():
164
165
166
167
#                 lm_logits, mems = model(tokens_tensor)
#     """
#     model = XLNetForSequenceClassification.from_pretrained(*args, **kwargs)
#     return model