Unverified Commit 77abd1e7 authored by Lysandre Debut's avatar Lysandre Debut Committed by GitHub
Browse files

Centralize logging (#6434)



* Logging

* Style

* hf_logging > utils.logging

* Address @thomwolf's comments

* Update test

* Update src/transformers/benchmark/benchmark_utils.py
Co-authored-by: default avatarSylvain Gugger <35901082+sgugger@users.noreply.github.com>

* Revert bad change
Co-authored-by: default avatarSylvain Gugger <35901082+sgugger@users.noreply.github.com>
parent 461ae868
...@@ -8,7 +8,6 @@ The script re-maps the TF2.x Bert weight names to the original names, so the mod ...@@ -8,7 +8,6 @@ The script re-maps the TF2.x Bert weight names to the original names, so the mod
You may adapt this script to include classification/MLM/NSP/etc. heads. You may adapt this script to include classification/MLM/NSP/etc. heads.
""" """
import argparse import argparse
import logging
import os import os
import re import re
...@@ -17,9 +16,11 @@ import torch ...@@ -17,9 +16,11 @@ import torch
from transformers import BertConfig, BertModel from transformers import BertConfig, BertModel
from .utils import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__) logging.set_verbosity_info()
logger = logging.get_logger(__name__)
def load_tf2_weights_in_bert(model, tf_checkpoint_path, config): def load_tf2_weights_in_bert(model, tf_checkpoint_path, config):
......
...@@ -16,14 +16,15 @@ ...@@ -16,14 +16,15 @@
import argparse import argparse
import logging
import torch import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from .utils import logging
logging.basicConfig(level=logging.INFO)
logging.set_verbosity_info()
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path): def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path):
......
...@@ -16,14 +16,15 @@ ...@@ -16,14 +16,15 @@
import argparse import argparse
import logging
import torch import torch
from transformers import ElectraConfig, ElectraForMaskedLM, ElectraForPreTraining, load_tf_weights_in_electra from transformers import ElectraConfig, ElectraForMaskedLM, ElectraForPreTraining, load_tf_weights_in_electra
from .utils import logging
logging.basicConfig(level=logging.INFO)
logging.set_verbosity_info()
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path, discriminator_or_generator): def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path, discriminator_or_generator):
......
...@@ -16,14 +16,15 @@ ...@@ -16,14 +16,15 @@
import argparse import argparse
import logging
import torch import torch
from transformers import CONFIG_NAME, WEIGHTS_NAME, GPT2Config, GPT2Model, load_tf_weights_in_gpt2 from transformers import CONFIG_NAME, WEIGHTS_NAME, GPT2Config, GPT2Model, load_tf_weights_in_gpt2
from .utils import logging
logging.basicConfig(level=logging.INFO)
logging.set_verbosity_info()
def convert_gpt2_checkpoint_to_pytorch(gpt2_checkpoint_path, gpt2_config_file, pytorch_dump_folder_path): def convert_gpt2_checkpoint_to_pytorch(gpt2_checkpoint_path, gpt2_config_file, pytorch_dump_folder_path):
......
import argparse import argparse
import logging
import torch import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from .utils import logging
logging.basicConfig(level=logging.INFO)
logging.set_verbosity_info()
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, mobilebert_config_file, pytorch_dump_path): def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, mobilebert_config_file, pytorch_dump_path):
......
...@@ -16,14 +16,15 @@ ...@@ -16,14 +16,15 @@
import argparse import argparse
import logging
import torch import torch
from transformers import CONFIG_NAME, WEIGHTS_NAME, OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt from transformers import CONFIG_NAME, WEIGHTS_NAME, OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from .utils import logging
logging.basicConfig(level=logging.INFO)
logging.set_verbosity_info()
def convert_openai_checkpoint_to_pytorch(openai_checkpoint_folder_path, openai_config_file, pytorch_dump_folder_path): def convert_openai_checkpoint_to_pytorch(openai_checkpoint_folder_path, openai_config_file, pytorch_dump_folder_path):
......
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
import argparse import argparse
import logging
import os import os
from transformers import ( from transformers import (
...@@ -76,6 +75,8 @@ from transformers import ( ...@@ -76,6 +75,8 @@ from transformers import (
) )
from transformers.file_utils import hf_bucket_url from transformers.file_utils import hf_bucket_url
from .utils import logging
if is_torch_available(): if is_torch_available():
import numpy as np import numpy as np
...@@ -104,7 +105,7 @@ if is_torch_available(): ...@@ -104,7 +105,7 @@ if is_torch_available():
) )
logging.basicConfig(level=logging.INFO) logging.set_verbosity_info()
MODEL_CLASSES = { MODEL_CLASSES = {
"bert": (BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,), "bert": (BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,),
......
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
import argparse import argparse
import logging
import pickle import pickle
import numpy as np import numpy as np
...@@ -24,8 +23,10 @@ import torch ...@@ -24,8 +23,10 @@ import torch
from transformers import ReformerConfig, ReformerModelWithLMHead from transformers import ReformerConfig, ReformerModelWithLMHead
from .utils import logging
logging.basicConfig(level=logging.INFO)
logging.set_verbosity_info()
def set_param(torch_layer, weight, bias=None): def set_param(torch_layer, weight, bias=None):
......
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
import argparse import argparse
import logging
import pathlib import pathlib
import fairseq import fairseq
...@@ -28,13 +27,15 @@ from packaging import version ...@@ -28,13 +27,15 @@ from packaging import version
from transformers.modeling_bert import BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput from transformers.modeling_bert import BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput
from transformers.modeling_roberta import RobertaConfig, RobertaForMaskedLM, RobertaForSequenceClassification from transformers.modeling_roberta import RobertaConfig, RobertaForMaskedLM, RobertaForSequenceClassification
from .utils import logging
if version.parse(fairseq.__version__) < version.parse("0.9.0"): if version.parse(fairseq.__version__) < version.parse("0.9.0"):
raise Exception("requires fairseq >= 0.9.0") raise Exception("requires fairseq >= 0.9.0")
logging.basicConfig(level=logging.INFO) logging.set_verbosity_info()
logger = logging.getLogger(__name__) logger = logging.get_logger(__name__)
SAMPLE_TEXT = "Hello world! cécé herlolip" SAMPLE_TEXT = "Hello world! cécé herlolip"
......
...@@ -16,14 +16,15 @@ ...@@ -16,14 +16,15 @@
import argparse import argparse
import logging
import torch import torch
from transformers import T5Config, T5Model, load_tf_weights_in_t5 from transformers import T5Config, T5Model, load_tf_weights_in_t5
from .utils import logging
logging.basicConfig(level=logging.INFO)
logging.set_verbosity_info()
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path): def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path):
......
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
import argparse import argparse
import logging
import os import os
import pickle import pickle
import sys import sys
...@@ -33,8 +32,10 @@ from transformers import ( ...@@ -33,8 +32,10 @@ from transformers import (
) )
from transformers.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from .utils import logging
logging.basicConfig(level=logging.INFO)
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles # We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
......
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
import argparse import argparse
import json import json
import logging
import numpy import numpy
import torch import torch
...@@ -25,8 +24,10 @@ import torch ...@@ -25,8 +24,10 @@ import torch
from transformers import CONFIG_NAME, WEIGHTS_NAME from transformers import CONFIG_NAME, WEIGHTS_NAME
from transformers.tokenization_xlm import VOCAB_FILES_NAMES from transformers.tokenization_xlm import VOCAB_FILES_NAMES
from .utils import logging
logging.basicConfig(level=logging.INFO)
logging.set_verbosity_info()
def convert_xlm_checkpoint_to_pytorch(xlm_checkpoint_path, pytorch_dump_folder_path): def convert_xlm_checkpoint_to_pytorch(xlm_checkpoint_path, pytorch_dump_folder_path):
......
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
import argparse import argparse
import logging
import os import os
import torch import torch
...@@ -31,6 +30,8 @@ from transformers import ( ...@@ -31,6 +30,8 @@ from transformers import (
load_tf_weights_in_xlnet, load_tf_weights_in_xlnet,
) )
from .utils import logging
GLUE_TASKS_NUM_LABELS = { GLUE_TASKS_NUM_LABELS = {
"cola": 2, "cola": 2,
...@@ -45,7 +46,7 @@ GLUE_TASKS_NUM_LABELS = { ...@@ -45,7 +46,7 @@ GLUE_TASKS_NUM_LABELS = {
} }
logging.basicConfig(level=logging.INFO) logging.set_verbosity_info()
def convert_xlnet_checkpoint_to_pytorch( def convert_xlnet_checkpoint_to_pytorch(
......
import logging
import os import os
import time import time
from dataclasses import dataclass, field from dataclasses import dataclass, field
...@@ -14,11 +13,12 @@ from ...tokenization_bart import BartTokenizer, BartTokenizerFast ...@@ -14,11 +13,12 @@ from ...tokenization_bart import BartTokenizer, BartTokenizerFast
from ...tokenization_roberta import RobertaTokenizer, RobertaTokenizerFast from ...tokenization_roberta import RobertaTokenizer, RobertaTokenizerFast
from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_xlm_roberta import XLMRobertaTokenizer from ...tokenization_xlm_roberta import XLMRobertaTokenizer
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures from ..processors.utils import InputFeatures
logger = logging.getLogger(__name__) logger = logging.get_logger(__name__)
@dataclass @dataclass
......
import logging
import os import os
import pickle import pickle
import time import time
...@@ -9,9 +8,10 @@ from torch.utils.data.dataset import Dataset ...@@ -9,9 +8,10 @@ from torch.utils.data.dataset import Dataset
from filelock import FileLock from filelock import FileLock
from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
logger = logging.getLogger(__name__) logger = logging.get_logger(__name__)
class TextDataset(Dataset): class TextDataset(Dataset):
......
import logging
import os import os
import time import time
from dataclasses import dataclass, field from dataclasses import dataclass, field
...@@ -12,10 +11,11 @@ from filelock import FileLock ...@@ -12,10 +11,11 @@ from filelock import FileLock
from ...modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadV1Processor, SquadV2Processor, squad_convert_examples_to_features from ..processors.squad import SquadFeatures, SquadV1Processor, SquadV2Processor, squad_convert_examples_to_features
logger = logging.getLogger(__name__) logger = logging.get_logger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) MODEL_CONFIG_CLASSES = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
......
...@@ -10,15 +10,16 @@ that a question is unanswerable. ...@@ -10,15 +10,16 @@ that a question is unanswerable.
import collections import collections
import json import json
import logging
import math import math
import re import re
import string import string
from transformers.tokenization_bert import BasicTokenizer from transformers.tokenization_bert import BasicTokenizer
from ...utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
def normalize_answer(s): def normalize_answer(s):
......
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
# limitations under the License. # limitations under the License.
""" GLUE processors and helpers """ """ GLUE processors and helpers """
import logging
import os import os
from dataclasses import asdict from dataclasses import asdict
from enum import Enum from enum import Enum
...@@ -23,13 +22,14 @@ from typing import List, Optional, Union ...@@ -23,13 +22,14 @@ from typing import List, Optional, Union
from ...file_utils import is_tf_available from ...file_utils import is_tf_available
from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from .utils import DataProcessor, InputExample, InputFeatures from .utils import DataProcessor, InputExample, InputFeatures
if is_tf_available(): if is_tf_available():
import tensorflow as tf import tensorflow as tf
logger = logging.getLogger(__name__) logger = logging.get_logger(__name__)
def glue_convert_examples_to_features( def glue_convert_examples_to_features(
......
import json import json
import logging
import os import os
from functools import partial from functools import partial
from multiprocessing import Pool, cpu_count from multiprocessing import Pool, cpu_count
...@@ -10,6 +9,7 @@ from tqdm import tqdm ...@@ -10,6 +9,7 @@ from tqdm import tqdm
from ...file_utils import is_tf_available, is_torch_available from ...file_utils import is_tf_available, is_torch_available
from ...tokenization_bert import whitespace_tokenize from ...tokenization_bert import whitespace_tokenize
from ...tokenization_utils_base import TruncationStrategy from ...tokenization_utils_base import TruncationStrategy
from ...utils import logging
from .utils import DataProcessor from .utils import DataProcessor
...@@ -24,7 +24,7 @@ if is_torch_available(): ...@@ -24,7 +24,7 @@ if is_torch_available():
if is_tf_available(): if is_tf_available():
import tensorflow as tf import tensorflow as tf
logger = logging.getLogger(__name__) logger = logging.get_logger(__name__)
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text): def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text):
......
...@@ -17,14 +17,14 @@ ...@@ -17,14 +17,14 @@
import csv import csv
import dataclasses import dataclasses
import json import json
import logging
from dataclasses import dataclass from dataclasses import dataclass
from typing import List, Optional, Union from typing import List, Optional, Union
from ...file_utils import is_tf_available, is_torch_available from ...file_utils import is_tf_available, is_torch_available
from ...utils import logging
logger = logging.getLogger(__name__) logger = logging.get_logger(__name__)
@dataclass @dataclass
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment