xlnet_hubconf.1.py 7 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
from pytorch_transformers.tokenization_xlnet import XLNetTokenizer
from pytorch_transformers.modeling_xlnet import (
3
4
5
    XLNetConfig,
    XLNetModel,
    XLNetLMHeadModel,
6
    # XLNetForSequenceClassification
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
)

# A lot of models share the same param doc. Use a decorator
# to save typing
xlnet_docstring = """
    Params:
        pretrained_model_name_or_path: either:
            - a str with the name of a pre-trained model to load selected in the list of:
                . `xlnet-large-cased`
            - a path or url to a pretrained model archive containing:
                . `config.json` a configuration file for the model
                . `pytorch_model.bin` a PyTorch dump of a XLNetForPreTraining instance
            - a path or url to a pretrained model archive containing:
                . `xlnet_config.json` a configuration file for the model
                . `model.chkpt` a TensorFlow checkpoint
        from_tf: should we load the weights from a locally saved TensorFlow checkpoint
        cache_dir: an optional path to a folder in which the pre-trained models will be cached.
        state_dict: an optional state dictionary (collections.OrderedDict object) to use instead of pre-trained models
        *inputs, **kwargs: additional input for the specific XLNet class
"""


def _append_from_pretrained_docstring(docstr):
    def docstring_decorator(fn):
        fn.__doc__ = fn.__doc__ + docstr
        return fn
    return docstring_decorator


def xlnetTokenizer(*args, **kwargs):
    """
    Instantiate a XLNet sentencepiece tokenizer for XLNet from a pre-trained vocab file.
    Peculiarities:
        - require Google sentencepiece (https://github.com/google/sentencepiece)

    Args:
    pretrained_model_name_or_path: Path to pretrained model archive
                                   or one of pre-trained vocab configs below.
                                       * xlnet-large-cased
    Keyword args:
    special_tokens: Special tokens in vocabulary that are not pretrained
                    Default: None
    max_len: An artificial maximum length to truncate tokenized sequences to;
             Effective maximum length is always the minimum of this
             value (if specified) and the underlying model's
             sequence length.
             Default: None

    Example:
thomwolf's avatar
thomwolf committed
56
57
        import torch
        tokenizer = torch.hub.load('huggingface/pytorch-transformers', 'xlnetTokenizer', 'xlnet-large-cased')
58

thomwolf's avatar
thomwolf committed
59
60
        text = "Who was Jim Henson ?"
        indexed_tokens = tokenizer.encode(tokenized_text)
61
62
63
64
65
66
67
68
69
70
71
72
73
74
    """
    tokenizer = XLNetTokenizer.from_pretrained(*args, **kwargs)
    return tokenizer


@_append_from_pretrained_docstring(xlnet_docstring)
def xlnetModel(*args, **kwargs):
    """
    xlnetModel is the basic XLNet Transformer model from
        "XLNet: Generalized Autoregressive Pretraining for Language Understanding"
        by Zhilin Yang, Zihang Dai1, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le

    Example:
        # Load the tokenizer
thomwolf's avatar
thomwolf committed
75
76
        import torch
        tokenizer = torch.hub.load('huggingface/pytorch-transformers', 'xlnetTokenizer', 'xlnet-large-cased')
77
78

        #  Prepare tokenized input
thomwolf's avatar
thomwolf committed
79
80
81
82
83
84
        text_1 = "Who was Jim Henson ?"
        text_2 = "Jim Henson was a puppeteer"
        indexed_tokens_1 = tokenizer.encode(text_1)
        indexed_tokens_2 = tokenizer.encode(text_2)
        tokens_tensor_1 = torch.tensor([indexed_tokens_1])
        tokens_tensor_2 = torch.tensor([indexed_tokens_2])
85
86

        # Load xlnetModel
thomwolf's avatar
thomwolf committed
87
88
        model = torch.hub.load('huggingface/pytorch-transformers', 'xlnetModel', 'xlnet-large-cased')
        model.eval()
89
90

        # Predict hidden states features for each layer
thomwolf's avatar
thomwolf committed
91
        with torch.no_grad():
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
                hidden_states_1, mems = model(tokens_tensor_1)
                hidden_states_2, mems = model(tokens_tensor_2, past=mems)
    """
    model = XLNetModel.from_pretrained(*args, **kwargs)
    return model


@_append_from_pretrained_docstring(xlnet_docstring)
def xlnetLMHeadModel(*args, **kwargs):
    """
    xlnetModel is the basic XLNet Transformer model from
        "XLNet: Generalized Autoregressive Pretraining for Language Understanding"
        by Zhilin Yang, Zihang Dai1, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le
    with a tied (pre-trained) language modeling head on top.

    Example:
        # Load the tokenizer
thomwolf's avatar
thomwolf committed
109
110
        import torch
        tokenizer = torch.hub.load('huggingface/pytorch-transformers', 'xlnetTokenizer', 'xlnet-large-cased')
111
112

        #  Prepare tokenized input
thomwolf's avatar
thomwolf committed
113
114
115
116
117
118
        text_1 = "Who was Jim Henson ?"
        text_2 = "Jim Henson was a puppeteer"
        indexed_tokens_1 = tokenizer.encode(text_1)
        indexed_tokens_2 = tokenizer.encode(text_2)
        tokens_tensor_1 = torch.tensor([indexed_tokens_1])
        tokens_tensor_2 = torch.tensor([indexed_tokens_2])
119
120

        # Load xlnetLMHeadModel
thomwolf's avatar
thomwolf committed
121
122
        model = torch.hub.load('huggingface/pytorch-transformers', 'xlnetLMHeadModel', 'xlnet-large-cased')
        model.eval()
123
124

        # Predict hidden states features for each layer
thomwolf's avatar
thomwolf committed
125
        with torch.no_grad():
126
127
128
129
                predictions_1, mems = model(tokens_tensor_1)
                predictions_2, mems = model(tokens_tensor_2, mems=mems)

        # Get the predicted last token
thomwolf's avatar
thomwolf committed
130
131
132
        predicted_index = torch.argmax(predictions_2[0, -1, :]).item()
        predicted_token = tokenizer.decode([predicted_index])
        assert predicted_token == ' who'
133
134
135
136
137
    """
    model = XLNetLMHeadModel.from_pretrained(*args, **kwargs)
    return model


138
139
140
141
142
143
144
145
146
# @_append_from_pretrained_docstring(xlnet_docstring)
# def xlnetForSequenceClassification(*args, **kwargs):
#     """
#     xlnetModel is the basic XLNet Transformer model from
#         "XLNet: Generalized Autoregressive Pretraining for Language Understanding"
#         by Zhilin Yang, Zihang Dai1, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le

#     Example:
#         # Load the tokenizer
thomwolf's avatar
thomwolf committed
147
148
#         import torch
#         tokenizer = torch.hub.load('huggingface/pytorch-transformers', 'xlnetTokenizer', 'xlnet-large-cased')
149
150

#         #  Prepare tokenized input
thomwolf's avatar
thomwolf committed
151
152
153
154
155
156
157
158
#         text1 = "Who was Jim Henson ? Jim Henson was a puppeteer"
#         text2 = "Who was Jim Henson ? Jim Henson was a mysterious young man"
#         tokenized_text1 = tokenizer.tokenize(text1)
#         tokenized_text2 = tokenizer.tokenize(text2)
#         indexed_tokens1 = tokenizer.convert_tokens_to_ids(tokenized_text1)
#         indexed_tokens2 = tokenizer.convert_tokens_to_ids(tokenized_text2)
#         tokens_tensor = torch.tensor([[indexed_tokens1, indexed_tokens2]])
#         mc_token_ids = torch.LongTensor([[len(tokenized_text1)-1, len(tokenized_text2)-1]])
159
160

#         # Load xlnetForSequenceClassification
thomwolf's avatar
thomwolf committed
161
162
#         model = torch.hub.load('huggingface/pytorch-transformers', 'xlnetForSequenceClassification', 'xlnet-large-cased')
#         model.eval()
163
164

#         # Predict sequence classes logits
thomwolf's avatar
thomwolf committed
165
#         with torch.no_grad():
166
167
168
169
#                 lm_logits, mems = model(tokens_tensor)
#     """
#     model = XLNetForSequenceClassification.from_pretrained(*args, **kwargs)
#     return model