"docs/source/vscode:/vscode.git/clone" did not exist on "9ecd83dace3961eaa161405814b00ea595c86451"
Unverified Commit 77abd1e7 authored by Lysandre Debut's avatar Lysandre Debut Committed by GitHub
Browse files

Centralize logging (#6434)



* Logging

* Style

* hf_logging > utils.logging

* Address @thomwolf's comments

* Update test

* Update src/transformers/benchmark/benchmark_utils.py
Co-authored-by: default avatarSylvain Gugger <35901082+sgugger@users.noreply.github.com>

* Revert bad change
Co-authored-by: default avatarSylvain Gugger <35901082+sgugger@users.noreply.github.com>
parent 461ae868
......@@ -16,13 +16,13 @@
""" XNLI utils (dataset loading and evaluation) """
import logging
import os
from ...utils import logging
from .utils import DataProcessor, InputExample
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
class XnliProcessor(DataProcessor):
......
......@@ -6,7 +6,6 @@ Copyright by the AllenNLP authors.
import fnmatch
import json
import logging
import os
import re
import shutil
......@@ -30,9 +29,10 @@ import requests
from filelock import FileLock
from . import __version__
from .utils import logging
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
try:
USE_TF = os.environ.get("USE_TF", "AUTO").upper()
......@@ -757,7 +757,7 @@ def http_get(url, temp_file, proxies=None, resume_size=0, user_agent: Union[Dict
total=total,
initial=resume_size,
desc="Downloading",
disable=bool(logger.getEffectiveLevel() == logging.NOTSET),
disable=bool(logging.get_verbosity() > logging.NOTSET),
)
for chunk in response.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
......
......@@ -14,13 +14,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import numpy as np
import tensorflow as tf
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
class TFGenerationMixin:
......
......@@ -14,15 +14,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Iterable, List, Optional, Tuple
import torch
from torch import Tensor
from torch.nn import functional as F
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
class GenerationMixin:
......
......@@ -17,7 +17,6 @@
import copy
import json
import logging
import os
from .configuration_auto import ALL_PRETRAINED_CONFIG_ARCHIVE_MAP
......@@ -30,9 +29,10 @@ from .file_utils import (
hf_bucket_url,
is_remote_url,
)
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
class ModelCard:
......
......@@ -14,7 +14,6 @@
# limitations under the License.
"""PyTorch ALBERT model. """
import logging
import math
import os
import warnings
......@@ -44,9 +43,10 @@ from .modeling_outputs import (
TokenClassifierOutput,
)
from .modeling_utils import PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "AlbertConfig"
_TOKENIZER_FOR_DOC = "AlbertTokenizer"
......
......@@ -15,7 +15,6 @@
""" Auto Model class. """
import logging
import warnings
from collections import OrderedDict
......@@ -172,9 +171,10 @@ from .modeling_xlnet import (
XLNetLMHeadModel,
XLNetModel,
)
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
MODEL_MAPPING = OrderedDict(
......
......@@ -13,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BART model, ported from the fairseq repo."""
import logging
import math
import random
import warnings
......@@ -43,9 +42,10 @@ from .modeling_outputs import (
Seq2SeqSequenceClassifierOutput,
)
from .modeling_utils import PreTrainedModel
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "BartConfig"
_TOKENIZER_FOR_DOC = "BartTokenizer"
......
......@@ -16,7 +16,6 @@
"""PyTorch BERT model. """
import logging
import math
import os
import warnings
......@@ -54,9 +53,10 @@ from .modeling_utils import (
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "BertConfig"
_TOKENIZER_FOR_DOC = "BertTokenizer"
......
......@@ -15,8 +15,6 @@
# limitations under the License.
"""PyTorch CamemBERT model. """
import logging
from .configuration_camembert import CamembertConfig
from .file_utils import add_start_docstrings
from .modeling_roberta import (
......@@ -28,9 +26,10 @@ from .modeling_roberta import (
RobertaForTokenClassification,
RobertaModel,
)
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_TOKENIZER_FOR_DOC = "CamembertTokenizer"
......
......@@ -16,7 +16,6 @@
""" PyTorch CTRL model."""
import logging
import warnings
import numpy as np
......@@ -28,9 +27,10 @@ from .configuration_ctrl import CTRLConfig
from .file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_callable
from .modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from .modeling_utils import Conv1D, PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "CTRLConfig"
_TOKENIZER_FOR_DOC = "CTRLTokenizer"
......
......@@ -19,7 +19,6 @@
import copy
import logging
import math
import warnings
......@@ -50,9 +49,10 @@ from .modeling_utils import (
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "DistilBertConfig"
_TOKENIZER_FOR_DOC = "DistilBertTokenizer"
......
......@@ -15,7 +15,6 @@
""" PyTorch DPR model for Open Domain Question Answering."""
import logging
from dataclasses import dataclass
from typing import Optional, Tuple, Union
......@@ -27,9 +26,10 @@ from .file_utils import ModelOutput, add_start_docstrings, add_start_docstrings_
from .modeling_bert import BertModel
from .modeling_outputs import BaseModelOutputWithPooling
from .modeling_utils import PreTrainedModel
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "DPRConfig"
......
import logging
import os
import warnings
from dataclasses import dataclass
......@@ -27,9 +26,10 @@ from .modeling_outputs import (
TokenClassifierOutput,
)
from .modeling_utils import SequenceSummary
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "ElectraConfig"
_TOKENIZER_FOR_DOC = "ElectraTokenizer"
......
......@@ -15,15 +15,15 @@
""" Classes to support Encoder-Decoder architectures """
import logging
from typing import Optional
from .configuration_encoder_decoder import EncoderDecoderConfig
from .configuration_utils import PretrainedConfig
from .modeling_utils import PreTrainedModel
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
class EncoderDecoderModel(PreTrainedModel):
......
......@@ -15,7 +15,6 @@
""" PyTorch Flaubert model, based on XLM. """
import logging
import random
import torch
......@@ -34,9 +33,10 @@ from .modeling_xlm import (
XLMWithLMHeadModel,
get_masks,
)
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "FlaubertConfig"
_TOKENIZER_FOR_DOC = "FlaubertTokenizer"
......
......@@ -16,7 +16,6 @@
"""PyTorch OpenAI GPT-2 model."""
import logging
import os
import warnings
from dataclasses import dataclass
......@@ -43,9 +42,10 @@ from .modeling_utils import (
find_pruneable_heads_and_indices,
prune_conv1d_layer,
)
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "GPT2Config"
_TOKENIZER_FOR_DOC = "GPT2Tokenizer"
......
......@@ -14,7 +14,6 @@
# limitations under the License.
"""PyTorch Longformer model. """
import logging
import math
import warnings
......@@ -47,9 +46,10 @@ from .modeling_utils import (
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "LongformerConfig"
_TOKENIZER_FOR_DOC = "LongformerTokenizer"
......
......@@ -16,8 +16,6 @@
"""PyTorch MMBT model. """
import logging
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss, MSELoss
......@@ -25,9 +23,10 @@ from torch.nn import CrossEntropyLoss, MSELoss
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable, replace_return_docstrings
from .modeling_outputs import BaseModelOutputWithPooling, SequenceClassifierOutput
from .modeling_utils import ModuleUtilsMixin
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "MMBTConfig"
......
......@@ -20,7 +20,6 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import math
import os
import warnings
......@@ -53,9 +52,10 @@ from .modeling_outputs import (
TokenClassifierOutput,
)
from .modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
from .utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "MobileBertConfig"
_TOKENIZER_FOR_DOC = "MobileBertTokenizer"
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment