Unverified Commit c89bdfbe authored by Sylvain Gugger's avatar Sylvain Gugger Committed by GitHub
Browse files

Reorganize repo (#8580)

* Put models in subfolders

* Styling

* Fix imports in tests

* More fixes in test imports

* Sneaky hidden imports

* Fix imports in doc files

* More sneaky imports

* Finish fixing tests

* Fix examples

* Fix path for copies

* More fixes for examples

* Fix dummy files

* More fixes for example

* More model import fixes

* Is this why you're unhappy GitHub?

* Fix imports in conver command
parent 90150733
......@@ -20,9 +20,8 @@ from typing import Optional, Tuple
import tensorflow as tf
from .activations_tf import get_tf_activation
from .configuration_funnel import FunnelConfig
from .file_utils import (
from ...activations_tf import get_tf_activation
from ...file_utils import (
MULTIPLE_CHOICE_DUMMY_INPUTS,
ModelOutput,
add_code_sample_docstrings,
......@@ -30,7 +29,7 @@ from .file_utils import (
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from .modeling_tf_outputs import (
from ...modeling_tf_outputs import (
TFBaseModelOutput,
TFMaskedLMOutput,
TFMultipleChoiceModelOutput,
......@@ -38,7 +37,7 @@ from .modeling_tf_outputs import (
TFSequenceClassifierOutput,
TFTokenClassifierOutput,
)
from .modeling_tf_utils import (
from ...modeling_tf_utils import (
TFMaskedLanguageModelingLoss,
TFMultipleChoiceLoss,
TFPreTrainedModel,
......@@ -49,8 +48,9 @@ from .modeling_tf_utils import (
keras_serializable,
shape_list,
)
from .tokenization_utils import BatchEncoding
from .utils import logging
from ...tokenization_utils import BatchEncoding
from ...utils import logging
from .configuration_funnel import FunnelConfig
logger = logging.get_logger(__name__)
......
......@@ -16,8 +16,8 @@
from typing import List, Optional
from .tokenization_bert import BertTokenizer
from .utils import logging
from ...utils import logging
from ..bert.tokenization_bert import BertTokenizer
logger = logging.get_logger(__name__)
......
......@@ -16,9 +16,9 @@
from typing import List, Optional
from .tokenization_bert_fast import BertTokenizerFast
from ...utils import logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_funnel import FunnelTokenizer
from .utils import logging
logger = logging.get_logger(__name__)
......
# flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
from ...file_utils import is_tf_available, is_tokenizers_available, is_torch_available
from .configuration_gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config
from .tokenization_gpt2 import GPT2Tokenizer
if is_tokenizers_available():
from .tokenization_gpt2_fast import GPT2TokenizerFast
if is_torch_available():
from .modeling_gpt2 import (
GPT2_PRETRAINED_MODEL_ARCHIVE_LIST,
GPT2DoubleHeadsModel,
GPT2ForSequenceClassification,
GPT2LMHeadModel,
GPT2Model,
GPT2PreTrainedModel,
load_tf_weights_in_gpt2,
)
if is_tf_available():
from .modeling_tf_gpt2 import (
TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGPT2DoubleHeadsModel,
TFGPT2LMHeadModel,
TFGPT2MainLayer,
TFGPT2Model,
TFGPT2PreTrainedModel,
)
......@@ -15,8 +15,8 @@
# limitations under the License.
""" OpenAI GPT-2 configuration """
from .configuration_utils import PretrainedConfig
from .utils import logging
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
......
......@@ -24,28 +24,28 @@ import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss, MSELoss
from .activations import ACT2FN
from .configuration_gpt2 import GPT2Config
from .file_utils import (
from ...activations import ACT2FN
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from .modeling_outputs import (
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
CausalLMOutputWithPastAndCrossAttentions,
SequenceClassifierOutputWithPast,
)
from .modeling_utils import (
from ...modeling_utils import (
Conv1D,
PreTrainedModel,
SequenceSummary,
find_pruneable_heads_and_indices,
prune_conv1d_layer,
)
from .utils import logging
from ...utils import logging
from .configuration_gpt2 import GPT2Config
logger = logging.get_logger(__name__)
......
......@@ -21,17 +21,16 @@ from typing import List, Optional, Tuple
import tensorflow as tf
from .activations_tf import get_tf_activation
from .configuration_gpt2 import GPT2Config
from .file_utils import (
from ...activations_tf import get_tf_activation
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from .modeling_tf_outputs import TFBaseModelOutputWithPast, TFCausalLMOutputWithPast
from .modeling_tf_utils import (
from ...modeling_tf_outputs import TFBaseModelOutputWithPast, TFCausalLMOutputWithPast
from ...modeling_tf_utils import (
TFCausalLanguageModelingLoss,
TFConv1D,
TFPreTrainedModel,
......@@ -41,8 +40,9 @@ from .modeling_tf_utils import (
keras_serializable,
shape_list,
)
from .tokenization_utils import BatchEncoding
from .utils import logging
from ...tokenization_utils import BatchEncoding
from ...utils import logging
from .configuration_gpt2 import GPT2Config
logger = logging.get_logger(__name__)
......
......@@ -23,8 +23,8 @@ from typing import Optional, Tuple
import regex as re
from .tokenization_utils import AddedToken, PreTrainedTokenizer
from .utils import logging
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
logger = logging.get_logger(__name__)
......
......@@ -21,10 +21,10 @@ from typing import Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpt2 import GPT2Tokenizer
from .tokenization_utils_base import BatchEncoding
from .tokenization_utils_fast import PreTrainedTokenizerFast
from .utils import logging
logger = logging.get_logger(__name__)
......
# flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
from ...file_utils import is_tokenizers_available
from .tokenization_herbert import HerbertTokenizer
if is_tokenizers_available():
from .tokenization_herbert_fast import HerbertTokenizerFast
......@@ -13,9 +13,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from .tokenization_bert import BasicTokenizer
from .tokenization_xlm import XLMTokenizer
from .utils import logging
from ...utils import logging
from ..bert.tokenization_bert import BasicTokenizer
from ..xlm.tokenization_xlm import XLMTokenizer
logger = logging.get_logger(__name__)
......
......@@ -15,14 +15,14 @@
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import (
PRETRAINED_INIT_CONFIGURATION,
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES,
PRETRAINED_VOCAB_FILES_MAP,
HerbertTokenizer,
)
from .tokenization_utils_fast import PreTrainedTokenizerFast
from .utils import logging
logger = logging.get_logger(__name__)
......
# flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
from ...file_utils import is_tokenizers_available, is_torch_available
from .configuration_layoutlm import LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMConfig
from .tokenization_layoutlm import LayoutLMTokenizer
if is_tokenizers_available():
from .tokenization_layoutlm_fast import LayoutLMTokenizerFast
if is_torch_available():
from .modeling_layoutlm import (
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMForMaskedLM,
LayoutLMForTokenClassification,
LayoutLMModel,
)
......@@ -15,8 +15,8 @@
""" LayoutLM model configuration """
from .configuration_bert import BertConfig
from .utils import logging
from ...utils import logging
from ..bert.configuration_bert import BertConfig
logger = logging.get_logger(__name__)
......
......@@ -21,22 +21,22 @@ import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from .activations import ACT2FN
from .configuration_layoutlm import LayoutLMConfig
from .file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from .modeling_outputs import (
from ...activations import ACT2FN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
MaskedLMOutput,
TokenClassifierOutput,
)
from .modeling_utils import (
from ...modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from .utils import logging
from ...utils import logging
from .configuration_layoutlm import LayoutLMConfig
logger = logging.get_logger(__name__)
......@@ -127,7 +127,7 @@ class LayoutLMEmbeddings(nn.Module):
return embeddings
# Copied from transformers.modeling_bert.BertSelfAttention with Bert->LayoutLM
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->LayoutLM
class LayoutLMSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
......@@ -206,7 +206,7 @@ class LayoutLMSelfAttention(nn.Module):
return outputs
# Copied from transformers.modeling_bert.BertSelfOutput with Bert->LayoutLM
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->LayoutLM
class LayoutLMSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
......@@ -221,7 +221,7 @@ class LayoutLMSelfOutput(nn.Module):
return hidden_states
# Copied from transformers.modeling_bert.BertAttention with Bert->LayoutLM
# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->LayoutLM
class LayoutLMAttention(nn.Module):
def __init__(self, config):
super().__init__()
......@@ -269,7 +269,7 @@ class LayoutLMAttention(nn.Module):
return outputs
# Copied from transformers.modeling_bert.BertIntermediate
# Copied from transformers.models.bert.modeling_bert.BertIntermediate
class LayoutLMIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
......@@ -285,7 +285,7 @@ class LayoutLMIntermediate(nn.Module):
return hidden_states
# Copied from transformers.modeling_bert.BertOutput with Bert->LayoutLM
# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->LayoutLM
class LayoutLMOutput(nn.Module):
def __init__(self, config):
super().__init__()
......@@ -300,7 +300,7 @@ class LayoutLMOutput(nn.Module):
return hidden_states
# Copied from transformers.modeling_bert.BertLayer with Bert->LayoutLM
# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->LayoutLM
class LayoutLMLayer(nn.Module):
def __init__(self, config):
super().__init__()
......@@ -360,7 +360,7 @@ class LayoutLMLayer(nn.Module):
return layer_output
# Copied from transformers.modeling_bert.BertEncoder with Bert->LayoutLM
# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->LayoutLM
class LayoutLMEncoder(nn.Module):
def __init__(self, config):
super().__init__()
......@@ -435,7 +435,7 @@ class LayoutLMEncoder(nn.Module):
)
# Copied from transformers.modeling_bert.BertPooler
# Copied from transformers.models.bert.modeling_bert.BertPooler
class LayoutLMPooler(nn.Module):
def __init__(self, config):
super().__init__()
......@@ -451,7 +451,7 @@ class LayoutLMPooler(nn.Module):
return pooled_output
# Copied from transformers.modeling_bert.BertPredictionHeadTransform with Bert->LayoutLM
# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->LayoutLM
class LayoutLMPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
......@@ -469,7 +469,7 @@ class LayoutLMPredictionHeadTransform(nn.Module):
return hidden_states
# Copied from transformers.modeling_bert.BertLMPredictionHead with Bert->LayoutLM
# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->LayoutLM
class LayoutLMLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
......@@ -490,7 +490,7 @@ class LayoutLMLMPredictionHead(nn.Module):
return hidden_states
# Copied from transformers.modeling_bert.BertOnlyMLMHead with Bert->LayoutLM
# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->LayoutLM
class LayoutLMOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
......
......@@ -15,8 +15,8 @@
""" Tokenization class for model LayoutLM."""
from .tokenization_bert import BertTokenizer
from .utils import logging
from ...utils import logging
from ..bert.tokenization_bert import BertTokenizer
logger = logging.get_logger(__name__)
......
......@@ -15,9 +15,9 @@
""" Tokenization class for model LayoutLM."""
from .tokenization_bert_fast import BertTokenizerFast
from ...utils import logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_layoutlm import LayoutLMTokenizer
from .utils import logging
logger = logging.get_logger(__name__)
......
# flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
from ...file_utils import is_tf_available, is_tokenizers_available, is_torch_available
from .configuration_longformer import LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig
from .tokenization_longformer import LongformerTokenizer
if is_tokenizers_available():
from .tokenization_longformer_fast import LongformerTokenizerFast
if is_torch_available():
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerSelfAttention,
)
if is_tf_available():
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForQuestionAnswering,
TFLongformerModel,
TFLongformerSelfAttention,
)
......@@ -16,8 +16,8 @@
from typing import List, Union
from .configuration_roberta import RobertaConfig
from .utils import logging
from ...utils import logging
from ..roberta.configuration_roberta import RobertaConfig
logger = logging.get_logger(__name__)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment