"test/ut/retiarii/vscode:/vscode.git/clone" did not exist on "445e7e0b74495326ef6f90f238d16513a437d797"
hubconf.py 7.23 KB
Newer Older
Ailing Zhang's avatar
Ailing Zhang committed
1
from pytorch_pretrained_bert.tokenization import BertTokenizer
2
3
4
5
6
7
8
9
10
11
from pytorch_pretrained_bert.modeling import (
        BertModel,
        BertForNextSentencePrediction,
        BertForMaskedLM,
        BertForMultipleChoice,
        BertForPreTraining,
        BertForQuestionAnswering,
        BertForSequenceClassification,
        BertForTokenClassification,
        )
Ailing Zhang's avatar
Ailing Zhang committed
12
13
14

dependencies = ['torch', 'tqdm', 'boto3', 'requests', 'regex']

15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
# A lot of models share the same param doc. Use a decorator
# to save typing
bert_docstring = """
    Params:
        pretrained_model_name_or_path: either:
            - a str with the name of a pre-trained model to load
                . `bert-base-uncased`
                . `bert-large-uncased`
                . `bert-base-cased`
                . `bert-large-cased`
                . `bert-base-multilingual-uncased`
                . `bert-base-multilingual-cased`
                . `bert-base-chinese`
            - a path or url to a pretrained model archive containing:
                . `bert_config.json` a configuration file for the model
                . `pytorch_model.bin` a PyTorch dump of a BertForPreTraining
                  instance
            - a path or url to a pretrained model archive containing:
                . `bert_config.json` a configuration file for the model
                . `model.chkpt` a TensorFlow checkpoint
        from_tf: should we load the weights from a locally saved TensorFlow
                 checkpoint
        cache_dir: an optional path to a folder in which the pre-trained models
                   will be cached.
        state_dict: an optional state dictionnary
                    (collections.OrderedDict object) to use instead of Google
                    pre-trained models
        *inputs, **kwargs: additional input for the specific Bert class
            (ex: num_labels for BertForSequenceClassification)
"""


def _append_from_pretrained_docstring(docstr):
    def docstring_decorator(fn):
        fn.__doc__ = fn.__doc__ + docstr
        return fn
    return docstring_decorator

Ailing Zhang's avatar
Ailing Zhang committed
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86

def bertTokenizer(*args, **kwargs):
    """
    Instantiate a BertTokenizer from a pre-trained/customized vocab file
    Args:
    pretrained_model_name_or_path: Path to pretrained model archive
                                   or one of pre-trained vocab configs below.
                                       * bert-base-uncased
                                       * bert-large-uncased
                                       * bert-base-cased
                                       * bert-large-cased
                                       * bert-base-multilingual-uncased
                                       * bert-base-multilingual-cased
                                       * bert-base-chinese
    Keyword args:
    cache_dir: an optional path to a specific directory to download and cache
               the pre-trained model weights.
               Default: None
    do_lower_case: Whether to lower case the input.
                   Only has an effect when do_wordpiece_only=False
                   Default: True
    do_basic_tokenize: Whether to do basic tokenization before wordpiece.
                       Default: True
    max_len: An artificial maximum length to truncate tokenized sequences to;
             Effective maximum length is always the minimum of this
             value (if specified) and the underlying BERT model's
             sequence length.
             Default: None
    never_split: List of tokens which will never be split during tokenization.
                 Only has an effect when do_wordpiece_only=False
                 Default: ["[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]"]

    Example:
        >>> sentence = 'Hello, World!'
87
        >>> tokenizer = torch.hub.load('ailzhang/pytorch-pretrained-BERT:hubconf', 'bertTokenizer', 'bert-base-cased', do_basic_tokenize=False, force_reload=False)
Ailing Zhang's avatar
Ailing Zhang committed
88
89
90
91
92
93
94
95
96
        >>> toks = tokenizer.tokenize(sentence)
        ['Hello', '##,', 'World', '##!']
        >>> ids = tokenizer.convert_tokens_to_ids(toks)
        [8667, 28136, 1291, 28125]
    """
    tokenizer = BertTokenizer.from_pretrained(*args, **kwargs)
    return tokenizer


97
98
99
100
101
102
103
104
105
106
107
108
@_append_from_pretrained_docstring(bert_docstring)
def bertModel(*args, **kwargs):
    """
    BertModel is the basic BERT Transformer model with a layer of summed token,
    position and sequence embeddings followed by a series of identical
    self-attention blocks (12 for BERT-base, 24 for BERT-large).
    """
    model = BertModel.from_pretrained(*args, **kwargs)
    return model


@_append_from_pretrained_docstring(bert_docstring)
Ailing Zhang's avatar
Ailing Zhang committed
109
def bertForNextSentencePrediction(*args, **kwargs):
110
111
112
113
    """
    BERT model with next sentence prediction head.
    This module comprises the BERT model followed by the next sentence
    classification head.
Ailing Zhang's avatar
Ailing Zhang committed
114
115
116
117
118
    """
    model = BertForNextSentencePrediction.from_pretrained(*args, **kwargs)
    return model


119
@_append_from_pretrained_docstring(bert_docstring)
Ailing Zhang's avatar
Ailing Zhang committed
120
def bertForPreTraining(*args, **kwargs):
121
122
123
    """
    BERT model with pre-training heads.
    This module comprises the BERT model followed by the two pre-training heads
Ailing Zhang's avatar
Ailing Zhang committed
124
125
126
127
128
129
130
        - the masked language modeling head, and
        - the next sentence classification head.
    """
    model = BertForPreTraining.from_pretrained(*args, **kwargs)
    return model


131
@_append_from_pretrained_docstring(bert_docstring)
Ailing Zhang's avatar
Ailing Zhang committed
132
133
def bertForMaskedLM(*args, **kwargs):
    """
134
135
    BertForMaskedLM includes the BertModel Transformer followed by the
    (possibly) pre-trained masked language modeling head.
Ailing Zhang's avatar
Ailing Zhang committed
136
137
138
139
140
    """
    model = BertForMaskedLM.from_pretrained(*args, **kwargs)
    return model


141
142
143
144
145
146
147
148
149
150
151
152
153
@_append_from_pretrained_docstring(bert_docstring)
def bertForSequenceClassification(*args, **kwargs):
    """
    BertForSequenceClassification is a fine-tuning model that includes
    BertModel and a sequence-level (sequence or pair of sequences) classifier
    on top of the BertModel.

    The sequence-level classifier is a linear layer that takes as input the
    last hidden state of the first character in the input sequence
    (see Figures 3a and 3b in the BERT paper).
    """
    model = BertForSequenceClassification.from_pretrained(*args, **kwargs)
    return model
Ailing Zhang's avatar
Ailing Zhang committed
154
155


156
157
158
159
160
161
162
163
@_append_from_pretrained_docstring(bert_docstring)
def bertForMultipleChoice(*args, **kwargs):
    """
    BertForMultipleChoice is a fine-tuning model that includes BertModel and a
    linear layer on top of the BertModel.
    """
    model = BertForMultipleChoice.from_pretrained(*args, **kwargs)
    return model
Ailing Zhang's avatar
Ailing Zhang committed
164
165


166
@_append_from_pretrained_docstring(bert_docstring)
Ailing Zhang's avatar
Ailing Zhang committed
167
168
def bertForQuestionAnswering(*args, **kwargs):
    """
169
170
171
    BertForQuestionAnswering is a fine-tuning model that includes BertModel
    with a token-level classifiers on top of the full sequence of last hidden
    states.
Ailing Zhang's avatar
Ailing Zhang committed
172
173
174
175
176
    """
    model = BertForQuestionAnswering.from_pretrained(*args, **kwargs)
    return model


177
178
179
180
181
@_append_from_pretrained_docstring(bert_docstring)
def bertForTokenClassification(*args, **kwargs):
    """
    BertForTokenClassification is a fine-tuning model that includes BertModel
    and a token-level classifier on top of the BertModel.
Ailing Zhang's avatar
Ailing Zhang committed
182

183
184
185
186
187
    The token-level classifier is a linear layer that takes as input the last
    hidden state of the sequence.
    """
    model = BertForTokenClassification.from_pretrained(*args, **kwargs)
    return model