"official/vision/evaluation/iou_test.py" did not exist on "ff93e945dcc2b1ad93148f98ca42edf32bc13b61"
Unverified Commit d4c2cb40 authored by Julien Chaumond's avatar Julien Chaumond Committed by GitHub
Browse files

Kill model archive maps (#4636)

* Kill model archive maps

* Fixup

* Also kill model_archive_map for MaskedBertPreTrainedModel

* Unhook config_archive_map

* Tokenizers: align with model id changes

* make style && make quality

* Fix CI
parent 47a551d1
......@@ -63,8 +63,6 @@ logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in MODEL_CONFIG_CLASSES), (),)
def set_seed(args):
random.seed(args.seed)
......@@ -411,7 +409,7 @@ def main():
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS),
help="Path to pretrained model or model identifier from huggingface.co/models",
)
parser.add_argument(
"--output_dir",
......
......@@ -57,7 +57,6 @@ class XxxConfig(PretrainedConfig):
initializing all weight matrices.
layer_norm_eps: The epsilon used by LayerNorm.
"""
pretrained_config_archive_map = XXX_PRETRAINED_CONFIG_ARCHIVE_MAP
model_type = "xxx"
def __init__(
......
......@@ -32,13 +32,13 @@ from .modeling_tf_utils import TFPreTrainedModel, get_initializer, shape_list
logger = logging.getLogger(__name__)
####################################################
# This dict contrains shortcut names and associated url
# for the pretrained weights provided with the models
# This list contrains shortcut names for some of
# the pretrained weights provided with the models
####################################################
TF_XXX_PRETRAINED_MODEL_ARCHIVE_MAP = {
"xxx-base-uncased": "https://cdn.huggingface.co/xxx-base-uncased-tf_model.h5",
"xxx-large-uncased": "https://cdn.huggingface.co/xxx-large-uncased-tf_model.h5",
}
TF_XXX_PRETRAINED_MODEL_ARCHIVE_LIST = [
"xxx-base-uncased",
"xxx-large-uncased",
]
####################################################
......@@ -180,7 +180,6 @@ class TFXxxPreTrainedModel(TFPreTrainedModel):
"""
config_class = XxxConfig
pretrained_model_archive_map = TF_XXX_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "transformer"
......
......@@ -34,13 +34,13 @@ from .modeling_utils import PreTrainedModel
logger = logging.getLogger(__name__)
####################################################
# This dict contrains shortcut names and associated url
# for the pretrained weights provided with the models
# This list contrains shortcut names for some of
# the pretrained weights provided with the models
####################################################
XXX_PRETRAINED_MODEL_ARCHIVE_MAP = {
"xxx-base-uncased": "https://cdn.huggingface.co/xxx-base-uncased-pytorch_model.bin",
"xxx-large-uncased": "https://cdn.huggingface.co/xxx-large-uncased-pytorch_model.bin",
}
XXX_PRETRAINED_MODEL_ARCHIVE_LIST = [
"xxx-base-uncased",
"xxx-large-uncased",
]
####################################################
......@@ -180,7 +180,6 @@ class XxxPreTrainedModel(PreTrainedModel):
"""
config_class = XxxConfig
pretrained_model_archive_map = XXX_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_xxx
base_model_prefix = "transformer"
......
......@@ -32,7 +32,7 @@ if is_torch_available():
XxxForSequenceClassification,
XxxForTokenClassification,
)
from transformers.modeling_xxx import XXX_PRETRAINED_MODEL_ARCHIVE_MAP
from transformers.modeling_xxx import XXX_PRETRAINED_MODEL_ARCHIVE_LIST
@require_torch
......@@ -269,6 +269,6 @@ class XxxModelTest(ModelTesterMixin, unittest.TestCase):
@slow
def test_model_from_pretrained(self):
for model_name in list(XXX_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
for model_name in XXX_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = XxxModel.from_pretrained(model_name, cache_dir=CACHE_DIR)
self.assertIsNotNone(model)
......@@ -33,7 +33,7 @@ if is_torch_available():
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
from transformers.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP
from transformers.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
@require_torch
......@@ -295,6 +295,6 @@ class AlbertModelTest(ModelTesterMixin, unittest.TestCase):
@slow
def test_model_from_pretrained(self):
for model_name in list(ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = AlbertModel.from_pretrained(model_name)
self.assertIsNotNone(model)
......@@ -40,7 +40,7 @@ if is_torch_available():
AutoModelForTokenClassification,
BertForTokenClassification,
)
from transformers.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_MAP
from transformers.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.modeling_auto import (
MODEL_MAPPING,
MODEL_FOR_PRETRAINING_MAPPING,
......@@ -56,7 +56,7 @@ class AutoModelTest(unittest.TestCase):
@slow
def test_model_from_pretrained(self):
logging.basicConfig(level=logging.INFO)
for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
......@@ -71,7 +71,7 @@ class AutoModelTest(unittest.TestCase):
@slow
def test_model_for_pretraining_from_pretrained(self):
logging.basicConfig(level=logging.INFO)
for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
......@@ -87,7 +87,7 @@ class AutoModelTest(unittest.TestCase):
@slow
def test_lmhead_model_from_pretrained(self):
logging.basicConfig(level=logging.INFO)
for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
......@@ -100,7 +100,7 @@ class AutoModelTest(unittest.TestCase):
@slow
def test_sequence_classification_model_from_pretrained(self):
logging.basicConfig(level=logging.INFO)
for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
......@@ -115,7 +115,7 @@ class AutoModelTest(unittest.TestCase):
@slow
def test_question_answering_model_from_pretrained(self):
logging.basicConfig(level=logging.INFO)
for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
......@@ -128,7 +128,7 @@ class AutoModelTest(unittest.TestCase):
@slow
def test_token_classification_model_from_pretrained(self):
logging.basicConfig(level=logging.INFO)
for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
......
......@@ -39,7 +39,7 @@ if is_torch_available():
MBartTokenizer,
)
from transformers.modeling_bart import (
BART_PRETRAINED_MODEL_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
shift_tokens_right,
invert_mask,
_prepare_bart_decoder_inputs,
......@@ -261,7 +261,7 @@ class BartTranslationTests(unittest.TestCase):
self.assertEqual(expected_translation_romanian, decoded[0])
def test_mbart_enro_config(self):
mbart_models = ["mbart-large-en-ro"]
mbart_models = ["facebook/mbart-large-en-ro"]
expected = {"scale_embedding": True, "output_past": True}
for name in mbart_models:
config = BartConfig.from_pretrained(name)
......@@ -561,7 +561,7 @@ class BartModelIntegrationTests(unittest.TestCase):
@unittest.skip("This is just too slow")
def test_model_from_pretrained(self):
# Forces 1.6GB download from S3 for each model
for model_name in list(BART_PRETRAINED_MODEL_ARCHIVE_MAP.keys()):
for model_name in BART_PRETRAINED_MODEL_ARCHIVE_LIST:
model = BartModel.from_pretrained(model_name)
self.assertIsNotNone(model)
......@@ -593,7 +593,7 @@ class BartModelIntegrationTests(unittest.TestCase):
self.assertEqual(EXPECTED_SUMMARY, decoded[0])
def test_xsum_config_generation_params(self):
config = BartConfig.from_pretrained("bart-large-xsum")
config = BartConfig.from_pretrained("facebook/bart-large-xsum")
expected_params = dict(num_beams=6, do_sample=False, early_stopping=True, length_penalty=1.0)
config_params = {k: getattr(config, k, "MISSING") for k, v in expected_params.items()}
self.assertDictEqual(expected_params, config_params)
......
......@@ -35,7 +35,7 @@ if is_torch_available():
BertForTokenClassification,
BertForMultipleChoice,
)
from transformers.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_MAP
from transformers.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_LIST
class BertModelTester:
......@@ -494,6 +494,6 @@ class BertModelTest(ModelTesterMixin, unittest.TestCase):
@slow
def test_model_from_pretrained(self):
for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = BertModel.from_pretrained(model_name)
self.assertIsNotNone(model)
......@@ -36,7 +36,7 @@ if is_torch_available():
PreTrainedModel,
BertModel,
BertConfig,
BERT_PRETRAINED_MODEL_ARCHIVE_MAP,
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
top_k_top_p_filtering,
)
......@@ -824,7 +824,7 @@ class ModelUtilsTest(unittest.TestCase):
@slow
def test_model_from_pretrained(self):
logging.basicConfig(level=logging.INFO)
for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = BertConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, PretrainedConfig)
......
......@@ -24,7 +24,7 @@ from .utils import require_torch, slow, torch_device
if is_torch_available():
import torch
from transformers import CTRLConfig, CTRLModel, CTRL_PRETRAINED_MODEL_ARCHIVE_MAP, CTRLLMHeadModel
from transformers import CTRLConfig, CTRLModel, CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLLMHeadModel
@require_torch
......@@ -210,7 +210,7 @@ class CTRLModelTest(ModelTesterMixin, unittest.TestCase):
@slow
def test_model_from_pretrained(self):
for model_name in list(CTRL_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = CTRLModel.from_pretrained(model_name)
self.assertIsNotNone(model)
......
......@@ -247,6 +247,6 @@ class DistilBertModelTest(ModelTesterMixin, unittest.TestCase):
# @slow
# def test_model_from_pretrained(self):
# for model_name in list(DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
# for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
# model = DistilBertModel.from_pretrained(model_name)
# self.assertIsNotNone(model)
......@@ -32,7 +32,7 @@ if is_torch_available():
ElectraForPreTraining,
ElectraForSequenceClassification,
)
from transformers.modeling_electra import ELECTRA_PRETRAINED_MODEL_ARCHIVE_MAP
from transformers.modeling_electra import ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST
@require_torch
......@@ -312,6 +312,6 @@ class ElectraModelTest(ModelTesterMixin, unittest.TestCase):
@slow
def test_model_from_pretrained(self):
for model_name in list(ELECTRA_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
for model_name in ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = ElectraModel.from_pretrained(model_name)
self.assertIsNotNone(model)
......@@ -32,7 +32,7 @@ if is_torch_available():
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
)
from transformers.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_MAP
from transformers.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
@require_torch
......@@ -387,6 +387,6 @@ class FlaubertModelTest(ModelTesterMixin, unittest.TestCase):
@slow
def test_model_from_pretrained(self):
for model_name in list(FLAUBERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = FlaubertModel.from_pretrained(model_name)
self.assertIsNotNone(model)
......@@ -28,7 +28,7 @@ if is_torch_available():
from transformers import (
GPT2Config,
GPT2Model,
GPT2_PRETRAINED_MODEL_ARCHIVE_MAP,
GPT2_PRETRAINED_MODEL_ARCHIVE_LIST,
GPT2LMHeadModel,
GPT2DoubleHeadsModel,
)
......@@ -334,7 +334,7 @@ class GPT2ModelTest(ModelTesterMixin, unittest.TestCase):
@slow
def test_model_from_pretrained(self):
for model_name in list(GPT2_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
for model_name in GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = GPT2Model.from_pretrained(model_name)
self.assertIsNotNone(model)
......
......@@ -28,7 +28,7 @@ if is_torch_available():
from transformers import (
OpenAIGPTConfig,
OpenAIGPTModel,
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTLMHeadModel,
OpenAIGPTDoubleHeadsModel,
)
......@@ -218,7 +218,7 @@ class OpenAIGPTModelTest(ModelTesterMixin, unittest.TestCase):
@slow
def test_model_from_pretrained(self):
for model_name in list(OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = OpenAIGPTModel.from_pretrained(model_name)
self.assertIsNotNone(model)
......
......@@ -29,7 +29,7 @@ if is_torch_available():
ReformerModelWithLMHead,
ReformerTokenizer,
ReformerLayer,
REFORMER_PRETRAINED_MODEL_ARCHIVE_MAP,
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
import torch
......@@ -503,7 +503,7 @@ class ReformerLocalAttnModelTest(ReformerTesterMixin, ModelTesterMixin, unittest
@slow
def test_model_from_pretrained(self):
for model_name in list(REFORMER_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
for model_name in REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = ReformerModelWithLMHead.from_pretrained(model_name)
self.assertIsNotNone(model)
......
......@@ -33,7 +33,7 @@ if is_torch_available():
RobertaForTokenClassification,
)
from transformers.modeling_roberta import RobertaEmbeddings, RobertaForMultipleChoice, RobertaForQuestionAnswering
from transformers.modeling_roberta import ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
from transformers.modeling_roberta import ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.modeling_utils import create_position_ids_from_input_ids
......@@ -273,7 +273,7 @@ class RobertaModelTest(ModelTesterMixin, unittest.TestCase):
@slow
def test_model_from_pretrained(self):
for model_name in list(ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
for model_name in ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = RobertaModel.from_pretrained(model_name)
self.assertIsNotNone(model)
......
......@@ -26,7 +26,7 @@ from .utils import require_torch, slow, torch_device
if is_torch_available():
import torch
from transformers import T5Config, T5Model, T5ForConditionalGeneration
from transformers.modeling_t5 import T5_PRETRAINED_MODEL_ARCHIVE_MAP
from transformers.modeling_t5 import T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.tokenization_t5 import T5Tokenizer
......@@ -372,7 +372,7 @@ class T5ModelTest(ModelTesterMixin, unittest.TestCase):
@slow
def test_model_from_pretrained(self):
for model_name in list(T5_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
for model_name in T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = T5Model.from_pretrained(model_name)
self.assertIsNotNone(model)
......
......@@ -30,7 +30,7 @@ if is_tf_available():
TFAlbertForMaskedLM,
TFAlbertForSequenceClassification,
TFAlbertForQuestionAnswering,
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
)
......@@ -257,6 +257,6 @@ class TFAlbertModelTest(TFModelTesterMixin, unittest.TestCase):
@slow
def test_model_from_pretrained(self):
for model_name in list(TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
for model_name in TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFAlbertModel.from_pretrained(model_name)
self.assertIsNotNone(model)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment