Unverified Commit 77abd1e7 authored by Lysandre Debut's avatar Lysandre Debut Committed by GitHub
Browse files

Centralize logging (#6434)



* Logging

* Style

* hf_logging > utils.logging

* Address @thomwolf's comments

* Update test

* Update src/transformers/benchmark/benchmark_utils.py
Co-authored-by: default avatarSylvain Gugger <35901082+sgugger@users.noreply.github.com>

* Revert bad change
Co-authored-by: default avatarSylvain Gugger <35901082+sgugger@users.noreply.github.com>
parent 461ae868
......@@ -17,7 +17,6 @@
import json
import logging
import math
import os
import warnings
......@@ -45,9 +44,10 @@ from .modeling_utils import (
find_pruneable_heads_and_indices,
prune_conv1d_layer,
)
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "OpenAIGPTConfig"
_TOKENIZER_FOR_DOC = "OpenAIGPTTokenizer"
......
......@@ -15,7 +15,6 @@
# limitations under the License.
"""PyTorch REFORMER model. """
import logging
import sys
from collections import namedtuple
from dataclasses import dataclass
......@@ -41,9 +40,10 @@ from .file_utils import (
)
from .modeling_outputs import CausalLMOutput, MaskedLMOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput
from .modeling_utils import PreTrainedModel, apply_chunking_to_forward
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "ReformerConfig"
_TOKENIZER_FOR_DOC = "ReformerTokenizer"
......
......@@ -17,7 +17,6 @@ RetriBERT model
"""
import logging
import math
import torch
......@@ -28,9 +27,10 @@ from .configuration_retribert import RetriBertConfig
from .file_utils import add_start_docstrings
from .modeling_bert import BertLayerNorm, BertModel
from .modeling_utils import PreTrainedModel
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"yjernite/retribert-base-uncased",
......
......@@ -16,7 +16,6 @@
"""PyTorch RoBERTa model. """
import logging
import warnings
import torch
......@@ -39,9 +38,10 @@ from .modeling_outputs import (
SequenceClassifierOutput,
TokenClassifierOutput,
)
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "RobertaConfig"
_TOKENIZER_FOR_DOC = "RobertaTokenizer"
......
......@@ -16,7 +16,6 @@
import copy
import logging
import math
import os
import warnings
......@@ -36,9 +35,10 @@ from .file_utils import (
)
from .modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, Seq2SeqLMOutput, Seq2SeqModelOutput
from .modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "T5Config"
_TOKENIZER_FOR_DOC = "T5Tokenizer"
......
......@@ -16,7 +16,6 @@
""" TF 2.0 ALBERT model. """
import logging
from dataclasses import dataclass
from typing import Optional, Tuple
......@@ -53,9 +52,10 @@ from .modeling_tf_utils import (
shape_list,
)
from .tokenization_utils import BatchEncoding
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "AlbertConfig"
_TOKENIZER_FOR_DOC = "AlbertTokenizer"
......
......@@ -15,7 +15,6 @@
""" Auto Model class. """
import logging
import warnings
from collections import OrderedDict
......@@ -139,9 +138,10 @@ from .modeling_tf_xlnet import (
TFXLNetLMHeadModel,
TFXLNetModel,
)
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
TF_MODEL_MAPPING = OrderedDict(
......
......@@ -16,7 +16,6 @@
""" TF 2.0 BERT model. """
import logging
from dataclasses import dataclass
from typing import Optional, Tuple
......@@ -56,9 +55,10 @@ from .modeling_tf_utils import (
shape_list,
)
from .tokenization_utils import BatchEncoding
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "BertConfig"
_TOKENIZER_FOR_DOC = "BertTokenizer"
......
......@@ -15,9 +15,6 @@
# limitations under the License.
""" TF 2.0 CamemBERT model. """
import logging
from .configuration_camembert import CamembertConfig
from .file_utils import add_start_docstrings
from .modeling_tf_roberta import (
......@@ -28,9 +25,10 @@ from .modeling_tf_roberta import (
TFRobertaForTokenClassification,
TFRobertaModel,
)
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
# See all CamemBERT models at https://huggingface.co/models?filter=camembert
......
......@@ -16,8 +16,6 @@
""" TF 2.0 CTRL model."""
import logging
import numpy as np
import tensorflow as tf
......@@ -32,9 +30,10 @@ from .modeling_tf_utils import (
shape_list,
)
from .tokenization_utils import BatchEncoding
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "CTRLConfig"
_TOKENIZER_FOR_DOC = "CTRLTokenizer"
......
......@@ -16,7 +16,6 @@
"""
import logging
import math
import numpy as np
......@@ -50,9 +49,10 @@ from .modeling_tf_utils import (
shape_list,
)
from .tokenization_utils import BatchEncoding
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "DistilBertConfig"
_TOKENIZER_FOR_DOC = "DistilBertTokenizer"
......
import logging
from dataclasses import dataclass
from typing import Optional, Tuple
......@@ -35,9 +34,10 @@ from .modeling_tf_utils import (
shape_list,
)
from .tokenization_utils import BatchEncoding
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "ElectraConfig"
_TOKENIZER_FOR_DOC = "ElectraTokenizer"
......
......@@ -15,7 +15,6 @@
""" TF 2.0 Flaubert model.
"""
import logging
import random
import tensorflow as tf
......@@ -36,9 +35,10 @@ from .modeling_tf_xlm import (
get_masks,
)
from .tokenization_utils import BatchEncoding
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
# See all Flaubert models at https://huggingface.co/models?filter=flaubert
......
......@@ -16,7 +16,6 @@
""" TF 2.0 OpenAI GPT-2 model. """
import logging
from dataclasses import dataclass
from typing import List, Optional, Tuple
......@@ -43,9 +42,10 @@ from .modeling_tf_utils import (
shape_list,
)
from .tokenization_utils import BatchEncoding
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "GPT2Config"
_TOKENIZER_FOR_DOC = "GPT2Tokenizer"
......
......@@ -14,8 +14,6 @@
# limitations under the License.
"""Tensorflow Longformer model. """
import logging
import tensorflow as tf
from .configuration_longformer import LongformerConfig
......@@ -37,9 +35,10 @@ from .modeling_tf_utils import (
shape_list,
)
from .tokenization_utils import BatchEncoding
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "LongformerConfig"
_TOKENIZER_FOR_DOC = "LongformerTokenizer"
......
......@@ -16,7 +16,6 @@
""" TF 2.0 MobileBERT model. """
import logging
from dataclasses import dataclass
from typing import Optional, Tuple
......@@ -54,9 +53,10 @@ from .modeling_tf_utils import (
shape_list,
)
from .tokenization_utils import BatchEncoding
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "MobileBertConfig"
_TOKENIZER_FOR_DOC = "MobileBertTokenizer"
......
......@@ -16,7 +16,6 @@
""" TF 2.0 OpenAI GPT model."""
import logging
from dataclasses import dataclass
from typing import Optional, Tuple
......@@ -43,9 +42,10 @@ from .modeling_tf_utils import (
shape_list,
)
from .tokenization_utils import BatchEncoding
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "OpenAIGPTConfig"
_TOKENIZER_FOR_DOC = "OpenAIGPTTokenizer"
......
......@@ -16,14 +16,15 @@
""" PyTorch - TF 2.0 general utilities."""
import logging
import os
import re
import numpy
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
def convert_tf_weight_name_to_pt_weight_name(tf_name, start_prefix_to_remove=""):
......
......@@ -16,8 +16,6 @@
""" TF 2.0 RoBERTa model. """
import logging
import tensorflow as tf
from .configuration_roberta import RobertaConfig
......@@ -48,9 +46,10 @@ from .modeling_tf_utils import (
shape_list,
)
from .tokenization_utils_base import BatchEncoding
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "RobertaConfig"
_TOKENIZER_FOR_DOC = "RobertaTokenizer"
......
......@@ -18,7 +18,6 @@
import copy
import itertools
import logging
import math
import warnings
......@@ -42,9 +41,10 @@ from .modeling_tf_utils import (
shape_list,
)
from .tokenization_utils import BatchEncoding
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "T5Config"
_TOKENIZER_FOR_DOC = "T5Tokenizer"
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment