"tests/models/gpt2/test_modeling_tf_gpt2.py" did not exist on "cf416764f4d08bd24dc625a706e0ad7540ffd2c0"
hubconf.py 8.47 KB
Newer Older
Sylvain Gugger's avatar
Sylvain Gugger committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
16
17
import os
import sys

18

19
20
21
22
23
SRC_DIR = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)


from transformers import (
24
25
    AutoConfig,
    AutoModel,
26
27
    AutoModelForCausalLM,
    AutoModelForMaskedLM,
28
    AutoModelForQuestionAnswering,
Aymeric Augustin's avatar
Aymeric Augustin committed
29
30
    AutoModelForSequenceClassification,
    AutoTokenizer,
31
    add_start_docstrings,
thomwolf's avatar
thomwolf committed
32
33
)

Aymeric Augustin's avatar
Aymeric Augustin committed
34

Sylvain Gugger's avatar
Sylvain Gugger committed
35
dependencies = ["torch", "numpy", "tokenizers", "filelock", "requests", "tqdm", "regex", "sentencepiece", "sacremoses", "importlib_metadata", "huggingface_hub"]
36

VictorSanh's avatar
VictorSanh committed
37

thomwolf's avatar
thomwolf committed
38
39
@add_start_docstrings(AutoConfig.__doc__)
def config(*args, **kwargs):
40
    r"""
thomwolf's avatar
thomwolf committed
41
42
43
                # Using torch.hub !
                import torch

44
                config = torch.hub.load('huggingface/transformers', 'config', 'google-bert/bert-base-uncased')  # Download configuration from huggingface.co and cache.
45
46
                config = torch.hub.load('huggingface/transformers', 'config', './test/bert_saved_model/')  # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')`
                config = torch.hub.load('huggingface/transformers', 'config', './test/bert_saved_model/my_configuration.json')
47
                config = torch.hub.load('huggingface/transformers', 'config', 'google-bert/bert-base-uncased', output_attentions=True, foo=False)
48
                assert config.output_attentions == True
49
                config, unused_kwargs = torch.hub.load('huggingface/transformers', 'config', 'google-bert/bert-base-uncased', output_attentions=True, foo=False, return_unused_kwargs=True)
50
                assert config.output_attentions == True
thomwolf's avatar
thomwolf committed
51
52
53
54
55
56
57
58
59
                assert unused_kwargs == {'foo': False}

            """

    return AutoConfig.from_pretrained(*args, **kwargs)


@add_start_docstrings(AutoTokenizer.__doc__)
def tokenizer(*args, **kwargs):
60
    r"""
thomwolf's avatar
thomwolf committed
61
62
63
        # Using torch.hub !
        import torch

64
        tokenizer = torch.hub.load('huggingface/transformers', 'tokenizer', 'google-bert/bert-base-uncased')    # Download vocabulary from huggingface.co and cache.
65
        tokenizer = torch.hub.load('huggingface/transformers', 'tokenizer', './test/bert_saved_model/')  # E.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`
thomwolf's avatar
thomwolf committed
66
67
68
69
70
71
72
73
74
75
76
77

    """

    return AutoTokenizer.from_pretrained(*args, **kwargs)


@add_start_docstrings(AutoModel.__doc__)
def model(*args, **kwargs):
    r"""
            # Using torch.hub !
            import torch

78
            model = torch.hub.load('huggingface/transformers', 'model', 'google-bert/bert-base-uncased')    # Download model and configuration from huggingface.co and cache.
79
            model = torch.hub.load('huggingface/transformers', 'model', './test/bert_model/')  # E.g. model was saved using `save_pretrained('./test/saved_model/')`
80
            model = torch.hub.load('huggingface/transformers', 'model', 'google-bert/bert-base-uncased', output_attentions=True)  # Update configuration during loading
81
            assert model.config.output_attentions == True
thomwolf's avatar
thomwolf committed
82
            # Loading from a TF checkpoint file instead of a PyTorch model (slower)
83
            config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
84
            model = torch.hub.load('huggingface/transformers', 'model', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
thomwolf's avatar
thomwolf committed
85
86
87
88
89

        """

    return AutoModel.from_pretrained(*args, **kwargs)

90

91
92
@add_start_docstrings(AutoModelForCausalLM.__doc__)
def modelForCausalLM(*args, **kwargs):
thomwolf's avatar
thomwolf committed
93
94
95
96
    r"""
        # Using torch.hub !
        import torch

97
        model = torch.hub.load('huggingface/transformers', 'modelForCausalLM', 'openai-community/gpt2')    # Download model and configuration from huggingface.co and cache.
98
        model = torch.hub.load('huggingface/transformers', 'modelForCausalLM', './test/saved_model/')  # E.g. model was saved using `save_pretrained('./test/saved_model/')`
99
        model = torch.hub.load('huggingface/transformers', 'modelForCausalLM', 'openai-community/gpt2', output_attentions=True)  # Update configuration during loading
100
        assert model.config.output_attentions == True
thomwolf's avatar
thomwolf committed
101
        # Loading from a TF checkpoint file instead of a PyTorch model (slower)
102
103
        config = AutoConfig.from_pretrained('./tf_model/gpt_tf_model_config.json')
        model = torch.hub.load('huggingface/transformers', 'modelForCausalLM', './tf_model/gpt_tf_checkpoint.ckpt.index', from_tf=True, config=config)
thomwolf's avatar
thomwolf committed
104
105

    """
106
107
108
109
110
111
112
113
114
    return AutoModelForCausalLM.from_pretrained(*args, **kwargs)


@add_start_docstrings(AutoModelForMaskedLM.__doc__)
def modelForMaskedLM(*args, **kwargs):
    r"""
            # Using torch.hub !
            import torch

115
            model = torch.hub.load('huggingface/transformers', 'modelForMaskedLM', 'google-bert/bert-base-uncased')    # Download model and configuration from huggingface.co and cache.
116
            model = torch.hub.load('huggingface/transformers', 'modelForMaskedLM', './test/bert_model/')  # E.g. model was saved using `save_pretrained('./test/saved_model/')`
117
            model = torch.hub.load('huggingface/transformers', 'modelForMaskedLM', 'google-bert/bert-base-uncased', output_attentions=True)  # Update configuration during loading
118
119
120
121
122
123
124
125
            assert model.config.output_attentions == True
            # Loading from a TF checkpoint file instead of a PyTorch model (slower)
            config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
            model = torch.hub.load('huggingface/transformers', 'modelForMaskedLM', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)

        """

    return AutoModelForMaskedLM.from_pretrained(*args, **kwargs)
thomwolf's avatar
thomwolf committed
126
127
128
129
130
131
132
133


@add_start_docstrings(AutoModelForSequenceClassification.__doc__)
def modelForSequenceClassification(*args, **kwargs):
    r"""
            # Using torch.hub !
            import torch

134
            model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', 'google-bert/bert-base-uncased')    # Download model and configuration from huggingface.co and cache.
135
            model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', './test/bert_model/')  # E.g. model was saved using `save_pretrained('./test/saved_model/')`
136
            model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', 'google-bert/bert-base-uncased', output_attentions=True)  # Update configuration during loading
137
            assert model.config.output_attentions == True
thomwolf's avatar
thomwolf committed
138
            # Loading from a TF checkpoint file instead of a PyTorch model (slower)
139
            config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
140
            model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
thomwolf's avatar
thomwolf committed
141
142
143
144
145
146
147
148
149
150
151
152

        """

    return AutoModelForSequenceClassification.from_pretrained(*args, **kwargs)


@add_start_docstrings(AutoModelForQuestionAnswering.__doc__)
def modelForQuestionAnswering(*args, **kwargs):
    r"""
        # Using torch.hub !
        import torch

153
        model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', 'google-bert/bert-base-uncased')    # Download model and configuration from huggingface.co and cache.
154
        model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', './test/bert_model/')  # E.g. model was saved using `save_pretrained('./test/saved_model/')`
155
        model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', 'google-bert/bert-base-uncased', output_attentions=True)  # Update configuration during loading
156
        assert model.config.output_attentions == True
thomwolf's avatar
thomwolf committed
157
        # Loading from a TF checkpoint file instead of a PyTorch model (slower)
158
        config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
159
        model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
thomwolf's avatar
thomwolf committed
160
161
162

    """
    return AutoModelForQuestionAnswering.from_pretrained(*args, **kwargs)