xlm_hubconf.py 6.63 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
from pytorch_transformers.tokenization_xlm import XLMTokenizer
from pytorch_transformers.modeling_xlm import (
thomwolf's avatar
thomwolf committed
3
4
5
6
7
    XLMConfig,
    XLMModel,
    XLMWithLMHeadModel,
    XLMForSequenceClassification,
    XLMForQuestionAnswering
8
9
10
11
)

# A lot of models share the same param doc. Use a decorator
# to save typing
thomwolf's avatar
thomwolf committed
12
13
14
15
16
17
18
19
20
xlm_start_docstring = """
    Model class adapted from the XLM Transformer model of
        "Cross-lingual Language Model Pretraining" by Guillaume Lample, Alexis Conneau
        Paper: https://arxiv.org/abs/1901.07291
        Original code: https://github.com/facebookresearch/XLM

    Example:
        # Load the tokenizer
        >>> import torch
thomwolf's avatar
thomwolf committed
21
        >>> tokenizer = torch.hub.load('huggingface/pytorch-transformers', 'xlmTokenizer', 'xlm-mlm-en-2048')
thomwolf's avatar
thomwolf committed
22
23
24
25
26
27
28
29
30
31
32
33
34

        #  Prepare tokenized input
        >>> text_1 = "Who was Jim Henson ?"
        >>> text_2 = "Jim Henson was a puppeteer"
        >>> indexed_tokens_1 = tokenizer.encode(text_1)
        >>> indexed_tokens_2 = tokenizer.encode(text_2)
        >>> tokens_tensor_1 = torch.tensor([indexed_tokens_1])
        >>> tokens_tensor_2 = torch.tensor([indexed_tokens_2])
"""

# A lot of models share the same param doc. Use a decorator
# to save typing
xlm_end_docstring = """
35
36
37
    Params:
        pretrained_model_name_or_path: either:
            - a str with the name of a pre-trained model to load selected in the list of:
thomwolf's avatar
thomwolf committed
38
                . `xlm-mlm-en-2048`
39
40
            - a path or url to a pretrained model archive containing:
                . `config.json` a configuration file for the model
thomwolf's avatar
thomwolf committed
41
                . `pytorch_model.bin` a PyTorch dump created using the `convert_xlm_checkpoint_to_pytorch` conversion script
42
43
        cache_dir: an optional path to a folder in which the pre-trained models will be cached.
        state_dict: an optional state dictionary (collections.OrderedDict object) to use instead of pre-trained models
thomwolf's avatar
thomwolf committed
44
        *inputs, **kwargs: additional input for the specific XLM class
45
46
47
"""


thomwolf's avatar
thomwolf committed
48
49
50
51
52
53
54
def _begin_with_docstring(docstr):
    def docstring_decorator(fn):
        fn.__doc__ = fn.__doc__ + docstr
        return fn
    return docstring_decorator

def _end_with_docstring(docstr):
55
56
57
58
59
60
    def docstring_decorator(fn):
        fn.__doc__ = fn.__doc__ + docstr
        return fn
    return docstring_decorator


thomwolf's avatar
thomwolf committed
61
def xlmTokenizer(*args, **kwargs):
62
    """
thomwolf's avatar
thomwolf committed
63
    Instantiate a XLM BPE tokenizer for XLM from a pre-trained vocab file.
64
65
66
67

    Args:
    pretrained_model_name_or_path: Path to pretrained model archive
                                   or one of pre-trained vocab configs below.
thomwolf's avatar
thomwolf committed
68
                                       * xlm-mlm-en-2048
69
70
71
72
73
74
75
76
77
78
79
    Keyword args:
    special_tokens: Special tokens in vocabulary that are not pretrained
                    Default: None
    max_len: An artificial maximum length to truncate tokenized sequences to;
             Effective maximum length is always the minimum of this
             value (if specified) and the underlying model's
             sequence length.
             Default: None

    Example:
        >>> import torch
thomwolf's avatar
thomwolf committed
80
        >>> tokenizer = torch.hub.load('huggingface/pytorch-transformers', 'xlmTokenizer', 'xlm-mlm-en-2048')
81
82
83
84

        >>> text = "Who was Jim Henson ?"
        >>> indexed_tokens = tokenizer.encode(tokenized_text)
    """
thomwolf's avatar
thomwolf committed
85
    tokenizer = XLMTokenizer.from_pretrained(*args, **kwargs)
86
87
88
    return tokenizer


thomwolf's avatar
thomwolf committed
89
90
91
@_begin_with_docstring(xlm_start_docstring)
@_end_with_docstring(xlm_end_docstring)
def xlmModel(*args, **kwargs):
92
    """
thomwolf's avatar
thomwolf committed
93
        # Load xlmModel
thomwolf's avatar
thomwolf committed
94
        >>> model = torch.hub.load('huggingface/pytorch-transformers', 'xlmModel', 'xlm-mlm-en-2048')
95
96
97
98
99
100
101
        >>> model.eval()

        # Predict hidden states features for each layer
        >>> with torch.no_grad():
                hidden_states_1, mems = model(tokens_tensor_1)
                hidden_states_2, mems = model(tokens_tensor_2, past=mems)
    """
thomwolf's avatar
thomwolf committed
102
    model = XLMModel.from_pretrained(*args, **kwargs)
103
104
105
    return model


thomwolf's avatar
thomwolf committed
106
107
108
@_begin_with_docstring(xlm_start_docstring)
@_end_with_docstring(xlm_end_docstring)
def xlmLMHeadModel(*args, **kwargs):
109
110
111
112
113
114
115
116
117
118
    """
        #  Prepare tokenized input
        >>> text_1 = "Who was Jim Henson ?"
        >>> text_2 = "Jim Henson was a puppeteer"
        >>> indexed_tokens_1 = tokenizer.encode(text_1)
        >>> indexed_tokens_2 = tokenizer.encode(text_2)
        >>> tokens_tensor_1 = torch.tensor([indexed_tokens_1])
        >>> tokens_tensor_2 = torch.tensor([indexed_tokens_2])

        # Load xlnetLMHeadModel
thomwolf's avatar
thomwolf committed
119
        >>> model = torch.hub.load('huggingface/pytorch-transformers', 'xlnetLMHeadModel', 'xlm-mlm-en-2048')
120
121
122
123
124
125
126
127
128
129
130
131
        >>> model.eval()

        # Predict hidden states features for each layer
        >>> with torch.no_grad():
                predictions_1, mems = model(tokens_tensor_1)
                predictions_2, mems = model(tokens_tensor_2, mems=mems)

        # Get the predicted last token
        >>> predicted_index = torch.argmax(predictions_2[0, -1, :]).item()
        >>> predicted_token = tokenizer.decode([predicted_index])
        >>> assert predicted_token == ' who'
    """
thomwolf's avatar
thomwolf committed
132
    model = XLMWithLMHeadModel.from_pretrained(*args, **kwargs)
133
134
135
    return model


thomwolf's avatar
thomwolf committed
136
# @_end_with_docstring(xlnet_docstring)
137
138
139
140
141
142
143
144
145
# def xlnetForSequenceClassification(*args, **kwargs):
#     """
#     xlnetModel is the basic XLNet Transformer model from
#         "XLNet: Generalized Autoregressive Pretraining for Language Understanding"
#         by Zhilin Yang, Zihang Dai1, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le

#     Example:
#         # Load the tokenizer
#         >>> import torch
thomwolf's avatar
thomwolf committed
146
#         >>> tokenizer = torch.hub.load('huggingface/pytorch-transformers', 'xlnetTokenizer', 'xlm-mlm-en-2048')
147
148
149
150
151
152
153
154
155
156
157
158

#         #  Prepare tokenized input
#         >>> text1 = "Who was Jim Henson ? Jim Henson was a puppeteer"
#         >>> text2 = "Who was Jim Henson ? Jim Henson was a mysterious young man"
#         >>> tokenized_text1 = tokenizer.tokenize(text1)
#         >>> tokenized_text2 = tokenizer.tokenize(text2)
#         >>> indexed_tokens1 = tokenizer.convert_tokens_to_ids(tokenized_text1)
#         >>> indexed_tokens2 = tokenizer.convert_tokens_to_ids(tokenized_text2)
#         >>> tokens_tensor = torch.tensor([[indexed_tokens1, indexed_tokens2]])
#         >>> mc_token_ids = torch.LongTensor([[len(tokenized_text1)-1, len(tokenized_text2)-1]])

#         # Load xlnetForSequenceClassification
thomwolf's avatar
thomwolf committed
159
#         >>> model = torch.hub.load('huggingface/pytorch-transformers', 'xlnetForSequenceClassification', 'xlm-mlm-en-2048')
160
161
162
163
164
165
166
167
#         >>> model.eval()

#         # Predict sequence classes logits
#         >>> with torch.no_grad():
#                 lm_logits, mems = model(tokens_tensor)
#     """
#     model = XLNetForSequenceClassification.from_pretrained(*args, **kwargs)
#     return model