Unverified Commit d4c2cb40 authored by Julien Chaumond's avatar Julien Chaumond Committed by GitHub
Browse files

Kill model archive maps (#4636)

* Kill model archive maps

* Fixup

* Also kill model_archive_map for MaskedBertPreTrainedModel

* Unhook config_archive_map

* Tokenizers: align with model id changes

* make style && make quality

* Fix CI
parent 47a551d1
......@@ -49,7 +49,7 @@ class TFAutoModelTest(unittest.TestCase):
self.assertTrue(h5py.version.hdf5_version.startswith("1.10"))
logging.basicConfig(level=logging.INFO)
# for model_name in list(TF_BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
......@@ -66,7 +66,7 @@ class TFAutoModelTest(unittest.TestCase):
self.assertTrue(h5py.version.hdf5_version.startswith("1.10"))
logging.basicConfig(level=logging.INFO)
# for model_name in list(TF_BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
......@@ -79,7 +79,7 @@ class TFAutoModelTest(unittest.TestCase):
@slow
def test_lmhead_model_from_pretrained(self):
logging.basicConfig(level=logging.INFO)
# for model_name in list(TF_BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
......@@ -92,7 +92,7 @@ class TFAutoModelTest(unittest.TestCase):
@slow
def test_sequence_classification_model_from_pretrained(self):
logging.basicConfig(level=logging.INFO)
# for model_name in list(TF_BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
......@@ -105,7 +105,7 @@ class TFAutoModelTest(unittest.TestCase):
@slow
def test_question_answering_model_from_pretrained(self):
logging.basicConfig(level=logging.INFO)
# for model_name in list(TF_BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
......
......@@ -311,7 +311,7 @@ class TFBertModelTest(TFModelTesterMixin, unittest.TestCase):
@slow
def test_model_from_pretrained(self):
# for model_name in list(TF_BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
model = TFBertModel.from_pretrained(model_name)
self.assertIsNotNone(model)
......@@ -25,7 +25,7 @@ from .utils import require_tf, slow
if is_tf_available():
import tensorflow as tf
from transformers.modeling_tf_ctrl import TFCTRLModel, TFCTRLLMHeadModel, TF_CTRL_PRETRAINED_MODEL_ARCHIVE_MAP
from transformers.modeling_tf_ctrl import TFCTRLModel, TFCTRLLMHeadModel, TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST
@require_tf
......@@ -200,7 +200,7 @@ class TFCTRLModelTest(TFModelTesterMixin, unittest.TestCase):
@slow
def test_model_from_pretrained(self):
for model_name in list(TF_CTRL_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
for model_name in TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFCTRLModel.from_pretrained(model_name)
self.assertIsNotNone(model)
......
......@@ -218,6 +218,6 @@ class TFDistilBertModelTest(TFModelTesterMixin, unittest.TestCase):
# @slow
# def test_model_from_pretrained(self):
# for model_name in list(DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
# for model_name in list(DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
# model = DistilBertModesss.from_pretrained(model_name)
# self.assertIsNotNone(model)
......@@ -221,7 +221,7 @@ class TFElectraModelTest(TFModelTesterMixin, unittest.TestCase):
@slow
def test_model_from_pretrained(self):
# for model_name in list(TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
# for model_name in TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/electra-small-discriminator"]:
model = TFElectraModel.from_pretrained(model_name)
self.assertIsNotNone(model)
......@@ -29,7 +29,7 @@ if is_tf_available():
TFGPT2Model,
TFGPT2LMHeadModel,
TFGPT2DoubleHeadsModel,
TF_GPT2_PRETRAINED_MODEL_ARCHIVE_MAP,
TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST,
shape_list,
)
......@@ -323,7 +323,7 @@ class TFGPT2ModelTest(TFModelTesterMixin, unittest.TestCase):
@slow
def test_model_from_pretrained(self):
for model_name in list(TF_GPT2_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFGPT2Model.from_pretrained(model_name)
self.assertIsNotNone(model)
......
......@@ -29,7 +29,7 @@ if is_tf_available():
TFOpenAIGPTModel,
TFOpenAIGPTLMHeadModel,
TFOpenAIGPTDoubleHeadsModel,
TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP,
TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
)
......@@ -235,7 +235,7 @@ class TFOpenAIGPTModelTest(TFModelTesterMixin, unittest.TestCase):
@slow
def test_model_from_pretrained(self):
for model_name in list(TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
for model_name in TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFOpenAIGPTModel.from_pretrained(model_name)
self.assertIsNotNone(model)
......
......@@ -32,7 +32,7 @@ if is_tf_available():
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaForQuestionAnswering,
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
)
......@@ -232,7 +232,7 @@ class TFRobertaModelTest(TFModelTesterMixin, unittest.TestCase):
@slow
def test_model_from_pretrained(self):
for model_name in list(TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
for model_name in TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFRobertaModel.from_pretrained(model_name)
self.assertIsNotNone(model)
......
......@@ -29,7 +29,7 @@ if is_tf_available():
from transformers import (
TFTransfoXLModel,
TFTransfoXLLMHeadModel,
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP,
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
)
......@@ -209,7 +209,7 @@ class TFTransfoXLModelTest(TFModelTesterMixin, unittest.TestCase):
@slow
def test_model_from_pretrained(self):
for model_name in list(TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFTransfoXLModel.from_pretrained(model_name)
self.assertIsNotNone(model)
......
......@@ -31,7 +31,7 @@ if is_tf_available():
TFXLMWithLMHeadModel,
TFXLMForSequenceClassification,
TFXLMForQuestionAnsweringSimple,
TF_XLM_PRETRAINED_MODEL_ARCHIVE_MAP,
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
)
......@@ -308,7 +308,7 @@ class TFXLMModelTest(TFModelTesterMixin, unittest.TestCase):
@slow
def test_model_from_pretrained(self):
for model_name in list(TF_XLM_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
for model_name in TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFXLMModel.from_pretrained(model_name)
self.assertIsNotNone(model)
......
......@@ -33,7 +33,7 @@ if is_tf_available():
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetForQuestionAnsweringSimple,
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_MAP,
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
)
......@@ -410,7 +410,7 @@ class TFXLNetModelTest(TFModelTesterMixin, unittest.TestCase):
@slow
def test_model_from_pretrained(self):
for model_name in list(TF_XLNET_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
for model_name in TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFXLNetModel.from_pretrained(model_name)
self.assertIsNotNone(model)
......
......@@ -27,7 +27,7 @@ from .utils import require_multigpu, require_torch, slow, torch_device
if is_torch_available():
import torch
from transformers import TransfoXLConfig, TransfoXLModel, TransfoXLLMHeadModel
from transformers.modeling_transfo_xl import TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP
from transformers.modeling_transfo_xl import TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST
@require_torch
......@@ -214,7 +214,7 @@ class TransfoXLModelTest(ModelTesterMixin, unittest.TestCase):
@slow
def test_model_from_pretrained(self):
for model_name in list(TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
for model_name in TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TransfoXLModel.from_pretrained(model_name)
self.assertIsNotNone(model)
......
......@@ -34,7 +34,7 @@ if is_torch_available():
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
)
from transformers.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_MAP
from transformers.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
@require_torch
......@@ -425,7 +425,7 @@ class XLMModelTest(ModelTesterMixin, unittest.TestCase):
@slow
def test_model_from_pretrained(self):
for model_name in list(XLM_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = XLMModel.from_pretrained(model_name)
self.assertIsNotNone(model)
......
......@@ -35,7 +35,7 @@ if is_torch_available():
XLNetForTokenClassification,
XLNetForQuestionAnswering,
)
from transformers.modeling_xlnet import XLNET_PRETRAINED_MODEL_ARCHIVE_MAP
from transformers.modeling_xlnet import XLNET_PRETRAINED_MODEL_ARCHIVE_LIST
@require_torch
......@@ -508,7 +508,7 @@ class XLNetModelTest(ModelTesterMixin, unittest.TestCase):
@slow
def test_model_from_pretrained(self):
for model_name in list(XLNET_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
for model_name in XLNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = XLNetModel.from_pretrained(model_name)
self.assertIsNotNone(model)
......
......@@ -127,7 +127,7 @@ class BertJapaneseTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは"), ["こん", "##ばんは", "[UNK]", "こんにちは"])
def test_sequence_builders(self):
tokenizer = self.tokenizer_class.from_pretrained("bert-base-japanese")
tokenizer = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese")
text = tokenizer.encode("ありがとう。", add_special_tokens=False)
text_2 = tokenizer.encode("どういたしまして。", add_special_tokens=False)
......@@ -188,7 +188,7 @@ class BertJapaneseCharacterTokenizationTest(TokenizerTesterMixin, unittest.TestC
self.assertListEqual(tokenizer.tokenize("こんにちほ"), ["こ", "ん", "に", "ち", "[UNK]"])
def test_sequence_builders(self):
tokenizer = self.tokenizer_class.from_pretrained("bert-base-japanese-char")
tokenizer = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char")
text = tokenizer.encode("ありがとう。", add_special_tokens=False)
text_2 = tokenizer.encode("どういたしまして。", add_special_tokens=False)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment