transformer_xl_hubconf.py 5.56 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
from pytorch_pretrained_bert.tokenization_transfo_xl import TransfoXLTokenizer
from pytorch_pretrained_bert.modeling_transfo_xl import (
	TransfoXLModel,
	TransfoXLLMHeadModel
)

# A lot of models share the same param doc. Use a decorator
# to save typing
transformer_xl_docstring = """
    Transformer XL use a relative positioning (with sinusiodal patterns) and adaptive softmax inputs which means that:
    - you don't need to specify positioning embeddings indices
    - the tokens in the vocabulary have to be sorted to decreasing frequency.

	Params:
		pretrained_model_name_or_path: either:
			- a str with the name of a pre-trained model to load selected in the list of:
				. `transfo-xl-wt103`
			- a path or url to a pretrained model archive containing:
				. `transfo_xl_config.json` a configuration file for the model
				. `pytorch_model.bin` a PyTorch dump of a TransfoXLModel instance
			- a path or url to a pretrained model archive containing:
				. `transfo_xl_config.json` a configuration file for the model
				. `model.chkpt` a TensorFlow checkpoint
		from_tf: should we load the weights from a locally saved TensorFlow checkpoint
		cache_dir: an optional path to a folder in which the pre-trained models will be cached.
		state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of pre-trained models
		*inputs, **kwargs: additional input for the specific TransformerXL class
"""


def _append_from_pretrained_docstring(docstr):
    def docstring_decorator(fn):
        fn.__doc__ = fn.__doc__ + docstr
        return fn
    return docstring_decorator


def transformerXLTokenizer(*args, **kwargs):
    """
    Instantiate a Transformer-XL tokenizer adapted from Vocab class in https://github.com/kimiyoung/transformer-xl

    Args:
    pretrained_model_name_or_path: Path to pretrained model archive
                                   or one of pre-trained vocab configs below.
                                       * transfo-xl-wt103

    Example:
		>>> import torch
        >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'transformerXLTokenizer', 'transfo-xl-wt103')
		
		>>> text = "Who was Jim Henson ?"
        >>> tokenized_text = tokenizer.tokenize(tokenized_text)
		>>> indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
    """
    tokenizer = TransfoXLTokenizer.from_pretrained(*args, **kwargs)
    return tokenizer


@_append_from_pretrained_docstring(transformer_xl_docstring)
def transformerXLModel(*args, **kwargs):
    """
    gpt2Model is the basic OpenAI GPT-2 Transformer model based on
	identical stacked masked self-attention blocks and pre-trained
	on large scale dataset using language modeling signal.

    Example:
        # Load the tokenizer
		>>> import torch
        >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'transformerXLTokenizer', 'transfo-xl-wt103')

        #  Prepare tokenized input
        >>> text_1 = "Who was Jim Henson ?"
		>>> text_2 = "Jim Henson was a puppeteer"
		>>> tokenized_text_1 = tokenizer.tokenize(text_1)
		>>> tokenized_text_2 = tokenizer.tokenize(text_2)
        >>> indexed_tokens_1 = tokenizer.convert_tokens_to_ids(tokenized_text_1)
        >>> indexed_tokens_2 = tokenizer.convert_tokens_to_ids(tokenized_text_2)
		>>> tokens_tensor_1 = torch.tensor([indexed_tokens_1])
		>>> tokens_tensor_2 = torch.tensor([indexed_tokens_2])

        # Load transformerXLModel
        >>> model = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'transformerXLModel', 'transfo-xl-wt103')
        >>> model.eval()

        # Predict hidden states features for each layer
		# We can re-use the memory cells in a subsequent call to attend a longer context
        >>> with torch.no_grad():
                hidden_states_1, mems_1 = model(tokens_tensor_1)
VictorSanh's avatar
VictorSanh committed
89
				hidden_states_2, mems_2 = model(tokens_tensor_2, mems=mems_1)
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
    """
    model = TransfoXLModel.from_pretrained(*args, **kwargs)
    return model


@_append_from_pretrained_docstring(transformer_xl_docstring)
def transformerXLLMHeadModel(*args, **kwargs):
    """
    gpt2LMHeadModel is the OpenAI GPT-2 Transformer model with the
	tied (pre-trained) language modeling head on top.

	Example:
        # Load the tokenizer
		>>> import torch
        >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'transformerXLTokenizer', 'transfo-xl-wt103')

        #  Prepare tokenized input
        >>> text_1 = "Who was Jim Henson ?"
		>>> text_2 = "Jim Henson was a puppeteer"
		>>> tokenized_text_1 = tokenizer.tokenize(text_1)
		>>> tokenized_text_2 = tokenizer.tokenize(text_2)
        >>> indexed_tokens_1 = tokenizer.convert_tokens_to_ids(tokenized_text_1)
        >>> indexed_tokens_2 = tokenizer.convert_tokens_to_ids(tokenized_text_2)
		>>> tokens_tensor_1 = torch.tensor([indexed_tokens_1])
		>>> tokens_tensor_2 = torch.tensor([indexed_tokens_2])

        # Load transformerXLLMHeadModel
        >>> model = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'transformerXLLMHeadModel', 'transfo-xl-wt103')
        >>> model.eval()

        # Predict hidden states features for each layer
		# We can re-use the memory cells in a subsequent call to attend a longer context
        >>> with torch.no_grad():
                predictions_1, mems_1 = model(tokens_tensor_1)
VictorSanh's avatar
VictorSanh committed
124
				predictions_2, mems_2 = model(tokens_tensor_2, mems=mems_1)
125
126
127
128
129
130
131
132

		# Get the predicted last token
		>>> predicted_index = torch.argmax(predictions_2[0, -1, :]).item()
		>>> predicted_token = tokenizer.convert_ids_to_tokens([predicted_index])[0]
		>>> assert predicted_token == 'who'
    """
    model = TransfoXLLMHeadModel.from_pretrained(*args, **kwargs)
    return model