Unverified Commit c89bdfbe authored by Sylvain Gugger's avatar Sylvain Gugger Committed by GitHub
Browse files

Reorganize repo (#8580)

* Put models in subfolders

* Styling

* Fix imports in tests

* More fixes in test imports

* Sneaky hidden imports

* Fix imports in doc files

* More sneaky imports

* Finish fixing tests

* Fix examples

* Fix path for copies

* More fixes for examples

* Fix dummy files

* More fixes for example

* More model import fixes

* Is this why you're unhappy GitHub?

* Fix imports in conver command
parent 90150733
......@@ -48,7 +48,7 @@ if is_torch_available():
PegasusConfig,
pipeline,
)
from transformers.modeling_bart import (
from transformers.models.bart.modeling_bart import (
SinusoidalPositionalEmbedding,
_prepare_bart_decoder_inputs,
invert_mask,
......
......@@ -40,7 +40,7 @@ if is_torch_available():
BertLMHeadModel,
BertModel,
)
from transformers.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.bert.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_LIST
class BertModelTester:
......
......@@ -34,7 +34,7 @@ if is_torch_available():
DebertaForSequenceClassification,
DebertaModel,
)
from transformers.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
@require_torch
......
......@@ -27,7 +27,7 @@ if is_torch_available():
import torch
from transformers import BertConfig, DPRConfig, DPRContextEncoder, DPRQuestionEncoder, DPRReader
from transformers.modeling_dpr import (
from transformers.models.dpr.modeling_dpr import (
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
......
......@@ -37,7 +37,7 @@ if is_torch_available():
ElectraForTokenClassification,
ElectraModel,
)
from transformers.modeling_electra import ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.electra.modeling_electra import ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST
class ElectraModelTester:
......
......@@ -36,7 +36,7 @@ if is_torch_available():
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class FlaubertModelTester(object):
......
......@@ -2,18 +2,17 @@ import unittest
from numpy import ndarray
from transformers import TensorType, is_flax_available, is_torch_available
from transformers import BertTokenizerFast, TensorType, is_flax_available, is_torch_available
from transformers.testing_utils import require_flax, require_torch
from transformers.tokenization_bert_fast import BertTokenizerFast
if is_flax_available():
from transformers.modeling_flax_bert import FlaxBertModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
if is_torch_available():
import torch
from transformers.modeling_bert import BertModel
from transformers.models.bert.modeling_bert import BertModel
@require_flax
......
......@@ -2,18 +2,17 @@ import unittest
from numpy import ndarray
from transformers import TensorType, is_flax_available, is_torch_available
from transformers import RobertaTokenizerFast, TensorType, is_flax_available, is_torch_available
from transformers.testing_utils import require_flax, require_torch
from transformers.tokenization_roberta_fast import RobertaTokenizerFast
if is_flax_available():
from transformers.modeling_flax_roberta import FlaxRobertaModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
if is_torch_available():
import torch
from transformers.modeling_roberta import RobertaModel
from transformers.models.roberta.modeling_roberta import RobertaModel
@require_flax
......
......@@ -32,7 +32,7 @@ if is_torch_available():
import torch
from transformers import FSMTConfig, FSMTForConditionalGeneration, FSMTModel, FSMTTokenizer
from transformers.modeling_fsmt import (
from transformers.models.fsmt.modeling_fsmt import (
SinusoidalPositionalEmbedding,
_prepare_fsmt_decoder_inputs,
invert_mask,
......
......@@ -35,7 +35,7 @@ if is_torch_available():
LxmertForQuestionAnswering,
LxmertModel,
)
from transformers.modeling_lxmert import LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.lxmert.modeling_lxmert import LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST
class LxmertModelTester:
......
......@@ -28,12 +28,12 @@ if is_torch_available():
import torch
from transformers import AutoModelWithLMHead, MarianMTModel
from transformers.convert_marian_to_pytorch import (
from transformers.models.bart.modeling_bart import shift_tokens_right
from transformers.models.marian.convert_marian_to_pytorch import (
ORG_NAME,
convert_hf_name_to_opus_name,
convert_opus_name_to_hf_name,
)
from transformers.modeling_bart import shift_tokens_right
from transformers.pipelines import TranslationPipeline
......
import unittest
from transformers import AutoConfig, AutoTokenizer, is_torch_available
from transformers.configuration_pegasus import task_specific_params
from transformers.file_utils import cached_property
from transformers.models.pegasus.configuration_pegasus import task_specific_params
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils.logging import ERROR, set_verbosity
......
......@@ -25,6 +25,9 @@ import numpy as np
from transformers import BartTokenizer, T5Tokenizer
from transformers.file_utils import cached_property, is_datasets_available, is_faiss_available, is_torch_available
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.tokenization_dpr import DPRQuestionEncoderTokenizer
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
......@@ -33,9 +36,6 @@ from transformers.testing_utils import (
slow,
torch_device,
)
from transformers.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.tokenization_dpr import DPRQuestionEncoderTokenizer
from transformers.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from .test_modeling_bart import ModelTester as BartModelTester
from .test_modeling_dpr import DPRModelTester
......@@ -205,7 +205,7 @@ class RagTestMixin:
)
dataset.add_faiss_index("embeddings", string_factory="Flat", metric_type=faiss.METRIC_INNER_PRODUCT)
tokenizer = self.bart_tokenizer if config.generator.model_type == "bart" else self.t5_tokenizer
with patch("transformers.retrieval_rag.load_dataset") as mock_load_dataset:
with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset:
mock_load_dataset.return_value = dataset
retriever = RagRetriever(
config,
......
......@@ -37,7 +37,7 @@ if is_torch_available():
RobertaForTokenClassification,
RobertaModel,
)
from transformers.modeling_roberta import (
from transformers.models.roberta.modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaEmbeddings,
create_position_ids_from_input_ids,
......
......@@ -31,7 +31,7 @@ if is_torch_available():
import torch
from transformers import T5Config, T5ForConditionalGeneration, T5Model, T5Tokenizer
from transformers.modeling_t5 import T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.t5.modeling_t5 import T5_PRETRAINED_MODEL_ARCHIVE_LIST
class T5ModelTester:
......
......@@ -26,7 +26,7 @@ from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
if is_tf_available():
import tensorflow as tf
from transformers.modeling_tf_albert import (
from transformers.models.albert.modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
......
......@@ -43,7 +43,7 @@ if is_tf_available():
TFRobertaForMaskedLM,
TFT5ForConditionalGeneration,
)
from transformers.modeling_tf_auto import (
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
......@@ -54,9 +54,9 @@ if is_tf_available():
TF_MODEL_MAPPING,
TF_MODEL_WITH_LM_HEAD_MAPPING,
)
from transformers.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.modeling_tf_gpt2 import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.modeling_tf_t5 import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpt2.modeling_tf_gpt2 import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.t5.modeling_tf_t5 import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
@require_tf
......
......@@ -31,7 +31,7 @@ if is_tf_available():
import tensorflow as tf
from transformers import TFBartForConditionalGeneration, TFBartModel
from transformers.modeling_tf_bart import TFSinusoidalPositionalEmbedding
from transformers.models.bart.modeling_tf_bart import TFSinusoidalPositionalEmbedding
@require_tf
......
......@@ -27,7 +27,7 @@ if is_tf_available():
import tensorflow as tf
from transformers import TF_MODEL_FOR_PRETRAINING_MAPPING
from transformers.modeling_tf_bert import (
from transformers.models.bert.modeling_tf_bert import (
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
......
......@@ -26,7 +26,11 @@ from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
if is_tf_available():
import tensorflow as tf
from transformers.modeling_tf_ctrl import TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLLMHeadModel, TFCTRLModel
from transformers.models.ctrl.modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLLMHeadModel,
TFCTRLModel,
)
class TFCTRLModelTester(object):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment