Unverified Commit 77abd1e7 authored by Lysandre Debut's avatar Lysandre Debut Committed by GitHub
Browse files

Centralize logging (#6434)



* Logging

* Style

* hf_logging > utils.logging

* Address @thomwolf's comments

* Update test

* Update src/transformers/benchmark/benchmark_utils.py
Co-authored-by: default avatarSylvain Gugger <35901082+sgugger@users.noreply.github.com>

* Revert bad change
Co-authored-by: default avatarSylvain Gugger <35901082+sgugger@users.noreply.github.com>
parent 461ae868
......@@ -17,7 +17,6 @@
"""
import logging
from dataclasses import dataclass
from typing import List, Optional, Tuple
......@@ -28,9 +27,10 @@ from .file_utils import ModelOutput, add_code_sample_docstrings, add_start_docst
from .modeling_tf_transfo_xl_utilities import TFAdaptiveSoftmaxMask
from .modeling_tf_utils import TFPreTrainedModel, get_initializer, keras_serializable, shape_list
from .tokenization_utils import BatchEncoding
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "TransfoXLConfig"
_TOKENIZER_FOR_DOC = "TransfoXLTokenizer"
......
......@@ -15,7 +15,6 @@
# limitations under the License.
"""TF general model utils."""
import functools
import logging
import os
import warnings
from typing import Dict, List, Optional, Union
......@@ -29,9 +28,10 @@ from .configuration_utils import PretrainedConfig
from .file_utils import DUMMY_INPUTS, TF2_WEIGHTS_NAME, WEIGHTS_NAME, cached_path, hf_bucket_url, is_remote_url
from .generation_tf_utils import TFGenerationMixin
from .modeling_tf_pytorch_utils import load_pytorch_checkpoint_in_tf2_model
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
class TFModelUtilsMixin:
......
......@@ -17,7 +17,6 @@
import itertools
import logging
import math
import warnings
from dataclasses import dataclass
......@@ -54,9 +53,10 @@ from .modeling_tf_utils import (
shape_list,
)
from .tokenization_utils import BatchEncoding
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "XLMConfig"
_TOKENIZER_FOR_DOC = "XLMTokenizer"
......
......@@ -15,9 +15,6 @@
# limitations under the License.
""" TF 2.0 XLM-RoBERTa model. """
import logging
from .configuration_xlm_roberta import XLMRobertaConfig
from .file_utils import add_start_docstrings
from .modeling_tf_roberta import (
......@@ -28,9 +25,10 @@ from .modeling_tf_roberta import (
TFRobertaForTokenClassification,
TFRobertaModel,
)
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [
# See all XLM-RoBERTa models at https://huggingface.co/models?filter=xlm-roberta
......
......@@ -17,7 +17,6 @@
"""
import logging
from dataclasses import dataclass
from typing import List, Optional, Tuple
......@@ -47,9 +46,10 @@ from .modeling_tf_utils import (
shape_list,
)
from .tokenization_utils import BatchEncoding
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "XLNetConfig"
_TOKENIZER_FOR_DOC = "XLNetTokenizer"
......
......@@ -19,7 +19,6 @@
"""
import logging
from dataclasses import dataclass
from typing import List, Optional, Tuple
......@@ -31,9 +30,10 @@ from .configuration_transfo_xl import TransfoXLConfig
from .file_utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_callable
from .modeling_transfo_xl_utilities import ProjectedAdaptiveLogSoftmax
from .modeling_utils import PreTrainedModel
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "TransfoXLConfig"
_TOKENIZER_FOR_DOC = "TransfoXLTokenizer"
......
......@@ -15,7 +15,6 @@
# limitations under the License.
import inspect
import logging
import os
import re
from dataclasses import dataclass
......@@ -41,9 +40,10 @@ from .file_utils import (
replace_return_docstrings,
)
from .generation_utils import GenerationMixin
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
try:
......
......@@ -17,7 +17,6 @@
import itertools
import logging
import math
import warnings
from dataclasses import dataclass
......@@ -54,9 +53,10 @@ from .modeling_utils import (
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "XLMConfig"
_TOKENIZER_FOR_DOC = "XLMTokenizer"
......
......@@ -15,9 +15,6 @@
# limitations under the License.
"""PyTorch XLM-RoBERTa model. """
import logging
from .configuration_xlm_roberta import XLMRobertaConfig
from .file_utils import add_start_docstrings
from .modeling_roberta import (
......@@ -28,9 +25,10 @@ from .modeling_roberta import (
RobertaForTokenClassification,
RobertaModel,
)
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [
"xlm-roberta-base",
......
......@@ -17,7 +17,6 @@
"""
import logging
from dataclasses import dataclass
from typing import List, Optional, Tuple
......@@ -43,9 +42,10 @@ from .modeling_utils import (
SequenceSummary,
apply_chunking_to_forward,
)
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "XLNetConfig"
_TOKENIZER_FOR_DOC = "XLNetTokenizer"
......
......@@ -14,7 +14,6 @@
# limitations under the License.
"""PyTorch optimization for BERT model."""
import logging
import math
from typing import Callable, Iterable, Tuple
......@@ -22,8 +21,10 @@ import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
def get_constant_schedule(optimizer: Optimizer, last_epoch: int = -1):
......
......@@ -16,7 +16,6 @@
import csv
import json
import logging
import os
import pickle
import sys
......@@ -39,6 +38,7 @@ from .tokenization_auto import AutoTokenizer
from .tokenization_bert import BasicTokenizer
from .tokenization_utils import PreTrainedTokenizer
from .tokenization_utils_base import BatchEncoding, PaddingStrategy
from .utils import logging
if is_tf_available():
......@@ -80,7 +80,7 @@ if TYPE_CHECKING:
from .modeling_utils import PreTrainedModel
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
def get_framework(model=None):
......
......@@ -15,16 +15,16 @@
""" Tokenization classes for ALBERT model."""
import logging
import os
import unicodedata
from shutil import copyfile
from typing import List, Optional
from .tokenization_utils import PreTrainedTokenizer
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
PRETRAINED_VOCAB_FILES_MAP = {
......
......@@ -15,7 +15,6 @@
""" Auto Tokenizer class. """
import logging
from collections import OrderedDict
from .configuration_auto import (
......@@ -69,9 +68,10 @@ from .tokenization_transfo_xl import TransfoXLTokenizer, TransfoXLTokenizerFast
from .tokenization_xlm import XLMTokenizer
from .tokenization_xlm_roberta import XLMRobertaTokenizer
from .tokenization_xlnet import XLNetTokenizer
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
TOKENIZER_MAPPING = OrderedDict(
......
......@@ -13,14 +13,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import List, Optional
from .tokenization_roberta import RobertaTokenizer, RobertaTokenizerFast
from .tokenization_utils_base import BatchEncoding
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
# vocab and merges same as roberta
......
......@@ -16,7 +16,6 @@
import collections
import logging
import os
import unicodedata
from typing import List, Optional
......@@ -25,9 +24,10 @@ from tokenizers import BertWordPieceTokenizer
from .tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
from .tokenization_utils_fast import PreTrainedTokenizerFast
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
......
......@@ -16,15 +16,15 @@
import collections
import logging
import os
import unicodedata
from typing import Optional
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer, load_vocab
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
......
......@@ -15,7 +15,6 @@
""" Tokenization classes for Camembert model."""
import logging
import os
from shutil import copyfile
from typing import List, Optional
......@@ -24,9 +23,10 @@ import sentencepiece as spm
from .tokenization_utils import PreTrainedTokenizer
from .tokenization_xlnet import SPIECE_UNDERLINE
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"}
......
......@@ -16,15 +16,15 @@
import json
import logging
import os
import regex as re
from .tokenization_utils import PreTrainedTokenizer
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {
"vocab_file": "vocab.json",
......
......@@ -14,13 +14,11 @@
# limitations under the License.
"""Tokenization classes for DistilBERT."""
import logging
from .tokenization_bert import BertTokenizer, BertTokenizerFast
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment