Unverified Commit d4c2cb40 authored by Julien Chaumond's avatar Julien Chaumond Committed by GitHub
Browse files

Kill model archive maps (#4636)

* Kill model archive maps

* Fixup

* Also kill model_archive_map for MaskedBertPreTrainedModel

* Unhook config_archive_map

* Tokenizers: align with model id changes

* make style && make quality

* Fix CI
parent 47a551d1
...@@ -63,8 +63,6 @@ logger = logging.getLogger(__name__) ...@@ -63,8 +63,6 @@ logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) MODEL_CONFIG_CLASSES = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in MODEL_CONFIG_CLASSES), (),)
def set_seed(args): def set_seed(args):
random.seed(args.seed) random.seed(args.seed)
...@@ -411,7 +409,7 @@ def main(): ...@@ -411,7 +409,7 @@ def main():
default=None, default=None,
type=str, type=str,
required=True, required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS), help="Path to pretrained model or model identifier from huggingface.co/models",
) )
parser.add_argument( parser.add_argument(
"--output_dir", "--output_dir",
......
...@@ -57,7 +57,6 @@ class XxxConfig(PretrainedConfig): ...@@ -57,7 +57,6 @@ class XxxConfig(PretrainedConfig):
initializing all weight matrices. initializing all weight matrices.
layer_norm_eps: The epsilon used by LayerNorm. layer_norm_eps: The epsilon used by LayerNorm.
""" """
pretrained_config_archive_map = XXX_PRETRAINED_CONFIG_ARCHIVE_MAP
model_type = "xxx" model_type = "xxx"
def __init__( def __init__(
......
...@@ -32,13 +32,13 @@ from .modeling_tf_utils import TFPreTrainedModel, get_initializer, shape_list ...@@ -32,13 +32,13 @@ from .modeling_tf_utils import TFPreTrainedModel, get_initializer, shape_list
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
#################################################### ####################################################
# This dict contrains shortcut names and associated url # This list contrains shortcut names for some of
# for the pretrained weights provided with the models # the pretrained weights provided with the models
#################################################### ####################################################
TF_XXX_PRETRAINED_MODEL_ARCHIVE_MAP = { TF_XXX_PRETRAINED_MODEL_ARCHIVE_LIST = [
"xxx-base-uncased": "https://cdn.huggingface.co/xxx-base-uncased-tf_model.h5", "xxx-base-uncased",
"xxx-large-uncased": "https://cdn.huggingface.co/xxx-large-uncased-tf_model.h5", "xxx-large-uncased",
} ]
#################################################### ####################################################
...@@ -180,7 +180,6 @@ class TFXxxPreTrainedModel(TFPreTrainedModel): ...@@ -180,7 +180,6 @@ class TFXxxPreTrainedModel(TFPreTrainedModel):
""" """
config_class = XxxConfig config_class = XxxConfig
pretrained_model_archive_map = TF_XXX_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "transformer" base_model_prefix = "transformer"
......
...@@ -34,13 +34,13 @@ from .modeling_utils import PreTrainedModel ...@@ -34,13 +34,13 @@ from .modeling_utils import PreTrainedModel
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
#################################################### ####################################################
# This dict contrains shortcut names and associated url # This list contrains shortcut names for some of
# for the pretrained weights provided with the models # the pretrained weights provided with the models
#################################################### ####################################################
XXX_PRETRAINED_MODEL_ARCHIVE_MAP = { XXX_PRETRAINED_MODEL_ARCHIVE_LIST = [
"xxx-base-uncased": "https://cdn.huggingface.co/xxx-base-uncased-pytorch_model.bin", "xxx-base-uncased",
"xxx-large-uncased": "https://cdn.huggingface.co/xxx-large-uncased-pytorch_model.bin", "xxx-large-uncased",
} ]
#################################################### ####################################################
...@@ -180,7 +180,6 @@ class XxxPreTrainedModel(PreTrainedModel): ...@@ -180,7 +180,6 @@ class XxxPreTrainedModel(PreTrainedModel):
""" """
config_class = XxxConfig config_class = XxxConfig
pretrained_model_archive_map = XXX_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_xxx load_tf_weights = load_tf_weights_in_xxx
base_model_prefix = "transformer" base_model_prefix = "transformer"
......
...@@ -32,7 +32,7 @@ if is_torch_available(): ...@@ -32,7 +32,7 @@ if is_torch_available():
XxxForSequenceClassification, XxxForSequenceClassification,
XxxForTokenClassification, XxxForTokenClassification,
) )
from transformers.modeling_xxx import XXX_PRETRAINED_MODEL_ARCHIVE_MAP from transformers.modeling_xxx import XXX_PRETRAINED_MODEL_ARCHIVE_LIST
@require_torch @require_torch
...@@ -269,6 +269,6 @@ class XxxModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -269,6 +269,6 @@ class XxxModelTest(ModelTesterMixin, unittest.TestCase):
@slow @slow
def test_model_from_pretrained(self): def test_model_from_pretrained(self):
for model_name in list(XXX_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: for model_name in XXX_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = XxxModel.from_pretrained(model_name, cache_dir=CACHE_DIR) model = XxxModel.from_pretrained(model_name, cache_dir=CACHE_DIR)
self.assertIsNotNone(model) self.assertIsNotNone(model)
...@@ -33,7 +33,7 @@ if is_torch_available(): ...@@ -33,7 +33,7 @@ if is_torch_available():
AlbertForTokenClassification, AlbertForTokenClassification,
AlbertForQuestionAnswering, AlbertForQuestionAnswering,
) )
from transformers.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP from transformers.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
@require_torch @require_torch
...@@ -295,6 +295,6 @@ class AlbertModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -295,6 +295,6 @@ class AlbertModelTest(ModelTesterMixin, unittest.TestCase):
@slow @slow
def test_model_from_pretrained(self): def test_model_from_pretrained(self):
for model_name in list(ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = AlbertModel.from_pretrained(model_name) model = AlbertModel.from_pretrained(model_name)
self.assertIsNotNone(model) self.assertIsNotNone(model)
...@@ -40,7 +40,7 @@ if is_torch_available(): ...@@ -40,7 +40,7 @@ if is_torch_available():
AutoModelForTokenClassification, AutoModelForTokenClassification,
BertForTokenClassification, BertForTokenClassification,
) )
from transformers.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_MAP from transformers.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.modeling_auto import ( from transformers.modeling_auto import (
MODEL_MAPPING, MODEL_MAPPING,
MODEL_FOR_PRETRAINING_MAPPING, MODEL_FOR_PRETRAINING_MAPPING,
...@@ -56,7 +56,7 @@ class AutoModelTest(unittest.TestCase): ...@@ -56,7 +56,7 @@ class AutoModelTest(unittest.TestCase):
@slow @slow
def test_model_from_pretrained(self): def test_model_from_pretrained(self):
logging.basicConfig(level=logging.INFO) logging.basicConfig(level=logging.INFO)
for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name) config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config) self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig) self.assertIsInstance(config, BertConfig)
...@@ -71,7 +71,7 @@ class AutoModelTest(unittest.TestCase): ...@@ -71,7 +71,7 @@ class AutoModelTest(unittest.TestCase):
@slow @slow
def test_model_for_pretraining_from_pretrained(self): def test_model_for_pretraining_from_pretrained(self):
logging.basicConfig(level=logging.INFO) logging.basicConfig(level=logging.INFO)
for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name) config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config) self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig) self.assertIsInstance(config, BertConfig)
...@@ -87,7 +87,7 @@ class AutoModelTest(unittest.TestCase): ...@@ -87,7 +87,7 @@ class AutoModelTest(unittest.TestCase):
@slow @slow
def test_lmhead_model_from_pretrained(self): def test_lmhead_model_from_pretrained(self):
logging.basicConfig(level=logging.INFO) logging.basicConfig(level=logging.INFO)
for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name) config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config) self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig) self.assertIsInstance(config, BertConfig)
...@@ -100,7 +100,7 @@ class AutoModelTest(unittest.TestCase): ...@@ -100,7 +100,7 @@ class AutoModelTest(unittest.TestCase):
@slow @slow
def test_sequence_classification_model_from_pretrained(self): def test_sequence_classification_model_from_pretrained(self):
logging.basicConfig(level=logging.INFO) logging.basicConfig(level=logging.INFO)
for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name) config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config) self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig) self.assertIsInstance(config, BertConfig)
...@@ -115,7 +115,7 @@ class AutoModelTest(unittest.TestCase): ...@@ -115,7 +115,7 @@ class AutoModelTest(unittest.TestCase):
@slow @slow
def test_question_answering_model_from_pretrained(self): def test_question_answering_model_from_pretrained(self):
logging.basicConfig(level=logging.INFO) logging.basicConfig(level=logging.INFO)
for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name) config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config) self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig) self.assertIsInstance(config, BertConfig)
...@@ -128,7 +128,7 @@ class AutoModelTest(unittest.TestCase): ...@@ -128,7 +128,7 @@ class AutoModelTest(unittest.TestCase):
@slow @slow
def test_token_classification_model_from_pretrained(self): def test_token_classification_model_from_pretrained(self):
logging.basicConfig(level=logging.INFO) logging.basicConfig(level=logging.INFO)
for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name) config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config) self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig) self.assertIsInstance(config, BertConfig)
......
...@@ -39,7 +39,7 @@ if is_torch_available(): ...@@ -39,7 +39,7 @@ if is_torch_available():
MBartTokenizer, MBartTokenizer,
) )
from transformers.modeling_bart import ( from transformers.modeling_bart import (
BART_PRETRAINED_MODEL_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST,
shift_tokens_right, shift_tokens_right,
invert_mask, invert_mask,
_prepare_bart_decoder_inputs, _prepare_bart_decoder_inputs,
...@@ -261,7 +261,7 @@ class BartTranslationTests(unittest.TestCase): ...@@ -261,7 +261,7 @@ class BartTranslationTests(unittest.TestCase):
self.assertEqual(expected_translation_romanian, decoded[0]) self.assertEqual(expected_translation_romanian, decoded[0])
def test_mbart_enro_config(self): def test_mbart_enro_config(self):
mbart_models = ["mbart-large-en-ro"] mbart_models = ["facebook/mbart-large-en-ro"]
expected = {"scale_embedding": True, "output_past": True} expected = {"scale_embedding": True, "output_past": True}
for name in mbart_models: for name in mbart_models:
config = BartConfig.from_pretrained(name) config = BartConfig.from_pretrained(name)
...@@ -561,7 +561,7 @@ class BartModelIntegrationTests(unittest.TestCase): ...@@ -561,7 +561,7 @@ class BartModelIntegrationTests(unittest.TestCase):
@unittest.skip("This is just too slow") @unittest.skip("This is just too slow")
def test_model_from_pretrained(self): def test_model_from_pretrained(self):
# Forces 1.6GB download from S3 for each model # Forces 1.6GB download from S3 for each model
for model_name in list(BART_PRETRAINED_MODEL_ARCHIVE_MAP.keys()): for model_name in BART_PRETRAINED_MODEL_ARCHIVE_LIST:
model = BartModel.from_pretrained(model_name) model = BartModel.from_pretrained(model_name)
self.assertIsNotNone(model) self.assertIsNotNone(model)
...@@ -593,7 +593,7 @@ class BartModelIntegrationTests(unittest.TestCase): ...@@ -593,7 +593,7 @@ class BartModelIntegrationTests(unittest.TestCase):
self.assertEqual(EXPECTED_SUMMARY, decoded[0]) self.assertEqual(EXPECTED_SUMMARY, decoded[0])
def test_xsum_config_generation_params(self): def test_xsum_config_generation_params(self):
config = BartConfig.from_pretrained("bart-large-xsum") config = BartConfig.from_pretrained("facebook/bart-large-xsum")
expected_params = dict(num_beams=6, do_sample=False, early_stopping=True, length_penalty=1.0) expected_params = dict(num_beams=6, do_sample=False, early_stopping=True, length_penalty=1.0)
config_params = {k: getattr(config, k, "MISSING") for k, v in expected_params.items()} config_params = {k: getattr(config, k, "MISSING") for k, v in expected_params.items()}
self.assertDictEqual(expected_params, config_params) self.assertDictEqual(expected_params, config_params)
......
...@@ -35,7 +35,7 @@ if is_torch_available(): ...@@ -35,7 +35,7 @@ if is_torch_available():
BertForTokenClassification, BertForTokenClassification,
BertForMultipleChoice, BertForMultipleChoice,
) )
from transformers.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_MAP from transformers.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_LIST
class BertModelTester: class BertModelTester:
...@@ -494,6 +494,6 @@ class BertModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -494,6 +494,6 @@ class BertModelTest(ModelTesterMixin, unittest.TestCase):
@slow @slow
def test_model_from_pretrained(self): def test_model_from_pretrained(self):
for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = BertModel.from_pretrained(model_name) model = BertModel.from_pretrained(model_name)
self.assertIsNotNone(model) self.assertIsNotNone(model)
...@@ -36,7 +36,7 @@ if is_torch_available(): ...@@ -36,7 +36,7 @@ if is_torch_available():
PreTrainedModel, PreTrainedModel,
BertModel, BertModel,
BertConfig, BertConfig,
BERT_PRETRAINED_MODEL_ARCHIVE_MAP, BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
top_k_top_p_filtering, top_k_top_p_filtering,
) )
...@@ -824,7 +824,7 @@ class ModelUtilsTest(unittest.TestCase): ...@@ -824,7 +824,7 @@ class ModelUtilsTest(unittest.TestCase):
@slow @slow
def test_model_from_pretrained(self): def test_model_from_pretrained(self):
logging.basicConfig(level=logging.INFO) logging.basicConfig(level=logging.INFO)
for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = BertConfig.from_pretrained(model_name) config = BertConfig.from_pretrained(model_name)
self.assertIsNotNone(config) self.assertIsNotNone(config)
self.assertIsInstance(config, PretrainedConfig) self.assertIsInstance(config, PretrainedConfig)
......
...@@ -24,7 +24,7 @@ from .utils import require_torch, slow, torch_device ...@@ -24,7 +24,7 @@ from .utils import require_torch, slow, torch_device
if is_torch_available(): if is_torch_available():
import torch import torch
from transformers import CTRLConfig, CTRLModel, CTRL_PRETRAINED_MODEL_ARCHIVE_MAP, CTRLLMHeadModel from transformers import CTRLConfig, CTRLModel, CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLLMHeadModel
@require_torch @require_torch
...@@ -210,7 +210,7 @@ class CTRLModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -210,7 +210,7 @@ class CTRLModelTest(ModelTesterMixin, unittest.TestCase):
@slow @slow
def test_model_from_pretrained(self): def test_model_from_pretrained(self):
for model_name in list(CTRL_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = CTRLModel.from_pretrained(model_name) model = CTRLModel.from_pretrained(model_name)
self.assertIsNotNone(model) self.assertIsNotNone(model)
......
...@@ -247,6 +247,6 @@ class DistilBertModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -247,6 +247,6 @@ class DistilBertModelTest(ModelTesterMixin, unittest.TestCase):
# @slow # @slow
# def test_model_from_pretrained(self): # def test_model_from_pretrained(self):
# for model_name in list(DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: # for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
# model = DistilBertModel.from_pretrained(model_name) # model = DistilBertModel.from_pretrained(model_name)
# self.assertIsNotNone(model) # self.assertIsNotNone(model)
...@@ -32,7 +32,7 @@ if is_torch_available(): ...@@ -32,7 +32,7 @@ if is_torch_available():
ElectraForPreTraining, ElectraForPreTraining,
ElectraForSequenceClassification, ElectraForSequenceClassification,
) )
from transformers.modeling_electra import ELECTRA_PRETRAINED_MODEL_ARCHIVE_MAP from transformers.modeling_electra import ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST
@require_torch @require_torch
...@@ -312,6 +312,6 @@ class ElectraModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -312,6 +312,6 @@ class ElectraModelTest(ModelTesterMixin, unittest.TestCase):
@slow @slow
def test_model_from_pretrained(self): def test_model_from_pretrained(self):
for model_name in list(ELECTRA_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: for model_name in ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = ElectraModel.from_pretrained(model_name) model = ElectraModel.from_pretrained(model_name)
self.assertIsNotNone(model) self.assertIsNotNone(model)
...@@ -32,7 +32,7 @@ if is_torch_available(): ...@@ -32,7 +32,7 @@ if is_torch_available():
FlaubertForQuestionAnsweringSimple, FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification, FlaubertForSequenceClassification,
) )
from transformers.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_MAP from transformers.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
@require_torch @require_torch
...@@ -387,6 +387,6 @@ class FlaubertModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -387,6 +387,6 @@ class FlaubertModelTest(ModelTesterMixin, unittest.TestCase):
@slow @slow
def test_model_from_pretrained(self): def test_model_from_pretrained(self):
for model_name in list(FLAUBERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = FlaubertModel.from_pretrained(model_name) model = FlaubertModel.from_pretrained(model_name)
self.assertIsNotNone(model) self.assertIsNotNone(model)
...@@ -28,7 +28,7 @@ if is_torch_available(): ...@@ -28,7 +28,7 @@ if is_torch_available():
from transformers import ( from transformers import (
GPT2Config, GPT2Config,
GPT2Model, GPT2Model,
GPT2_PRETRAINED_MODEL_ARCHIVE_MAP, GPT2_PRETRAINED_MODEL_ARCHIVE_LIST,
GPT2LMHeadModel, GPT2LMHeadModel,
GPT2DoubleHeadsModel, GPT2DoubleHeadsModel,
) )
...@@ -334,7 +334,7 @@ class GPT2ModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -334,7 +334,7 @@ class GPT2ModelTest(ModelTesterMixin, unittest.TestCase):
@slow @slow
def test_model_from_pretrained(self): def test_model_from_pretrained(self):
for model_name in list(GPT2_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: for model_name in GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = GPT2Model.from_pretrained(model_name) model = GPT2Model.from_pretrained(model_name)
self.assertIsNotNone(model) self.assertIsNotNone(model)
......
...@@ -28,7 +28,7 @@ if is_torch_available(): ...@@ -28,7 +28,7 @@ if is_torch_available():
from transformers import ( from transformers import (
OpenAIGPTConfig, OpenAIGPTConfig,
OpenAIGPTModel, OpenAIGPTModel,
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel,
OpenAIGPTDoubleHeadsModel, OpenAIGPTDoubleHeadsModel,
) )
...@@ -218,7 +218,7 @@ class OpenAIGPTModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -218,7 +218,7 @@ class OpenAIGPTModelTest(ModelTesterMixin, unittest.TestCase):
@slow @slow
def test_model_from_pretrained(self): def test_model_from_pretrained(self):
for model_name in list(OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = OpenAIGPTModel.from_pretrained(model_name) model = OpenAIGPTModel.from_pretrained(model_name)
self.assertIsNotNone(model) self.assertIsNotNone(model)
......
...@@ -29,7 +29,7 @@ if is_torch_available(): ...@@ -29,7 +29,7 @@ if is_torch_available():
ReformerModelWithLMHead, ReformerModelWithLMHead,
ReformerTokenizer, ReformerTokenizer,
ReformerLayer, ReformerLayer,
REFORMER_PRETRAINED_MODEL_ARCHIVE_MAP, REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
) )
import torch import torch
...@@ -503,7 +503,7 @@ class ReformerLocalAttnModelTest(ReformerTesterMixin, ModelTesterMixin, unittest ...@@ -503,7 +503,7 @@ class ReformerLocalAttnModelTest(ReformerTesterMixin, ModelTesterMixin, unittest
@slow @slow
def test_model_from_pretrained(self): def test_model_from_pretrained(self):
for model_name in list(REFORMER_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: for model_name in REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = ReformerModelWithLMHead.from_pretrained(model_name) model = ReformerModelWithLMHead.from_pretrained(model_name)
self.assertIsNotNone(model) self.assertIsNotNone(model)
......
...@@ -33,7 +33,7 @@ if is_torch_available(): ...@@ -33,7 +33,7 @@ if is_torch_available():
RobertaForTokenClassification, RobertaForTokenClassification,
) )
from transformers.modeling_roberta import RobertaEmbeddings, RobertaForMultipleChoice, RobertaForQuestionAnswering from transformers.modeling_roberta import RobertaEmbeddings, RobertaForMultipleChoice, RobertaForQuestionAnswering
from transformers.modeling_roberta import ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP from transformers.modeling_roberta import ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.modeling_utils import create_position_ids_from_input_ids from transformers.modeling_utils import create_position_ids_from_input_ids
...@@ -273,7 +273,7 @@ class RobertaModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -273,7 +273,7 @@ class RobertaModelTest(ModelTesterMixin, unittest.TestCase):
@slow @slow
def test_model_from_pretrained(self): def test_model_from_pretrained(self):
for model_name in list(ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: for model_name in ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = RobertaModel.from_pretrained(model_name) model = RobertaModel.from_pretrained(model_name)
self.assertIsNotNone(model) self.assertIsNotNone(model)
......
...@@ -26,7 +26,7 @@ from .utils import require_torch, slow, torch_device ...@@ -26,7 +26,7 @@ from .utils import require_torch, slow, torch_device
if is_torch_available(): if is_torch_available():
import torch import torch
from transformers import T5Config, T5Model, T5ForConditionalGeneration from transformers import T5Config, T5Model, T5ForConditionalGeneration
from transformers.modeling_t5 import T5_PRETRAINED_MODEL_ARCHIVE_MAP from transformers.modeling_t5 import T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.tokenization_t5 import T5Tokenizer from transformers.tokenization_t5 import T5Tokenizer
...@@ -372,7 +372,7 @@ class T5ModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -372,7 +372,7 @@ class T5ModelTest(ModelTesterMixin, unittest.TestCase):
@slow @slow
def test_model_from_pretrained(self): def test_model_from_pretrained(self):
for model_name in list(T5_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: for model_name in T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = T5Model.from_pretrained(model_name) model = T5Model.from_pretrained(model_name)
self.assertIsNotNone(model) self.assertIsNotNone(model)
......
...@@ -30,7 +30,7 @@ if is_tf_available(): ...@@ -30,7 +30,7 @@ if is_tf_available():
TFAlbertForMaskedLM, TFAlbertForMaskedLM,
TFAlbertForSequenceClassification, TFAlbertForSequenceClassification,
TFAlbertForQuestionAnswering, TFAlbertForQuestionAnswering,
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP, TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
) )
...@@ -257,6 +257,6 @@ class TFAlbertModelTest(TFModelTesterMixin, unittest.TestCase): ...@@ -257,6 +257,6 @@ class TFAlbertModelTest(TFModelTesterMixin, unittest.TestCase):
@slow @slow
def test_model_from_pretrained(self): def test_model_from_pretrained(self):
for model_name in list(TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: for model_name in TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFAlbertModel.from_pretrained(model_name) model = TFAlbertModel.from_pretrained(model_name)
self.assertIsNotNone(model) self.assertIsNotNone(model)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment