"llm/vscode:/vscode.git/clone" did not exist on "c3d321d405df2076768de49cf999a3542224eabd"
Unverified Commit 77abd1e7 authored by Lysandre Debut's avatar Lysandre Debut Committed by GitHub
Browse files

Centralize logging (#6434)



* Logging

* Style

* hf_logging > utils.logging

* Address @thomwolf's comments

* Update test

* Update src/transformers/benchmark/benchmark_utils.py
Co-authored-by: default avatarSylvain Gugger <35901082+sgugger@users.noreply.github.com>

* Revert bad change
Co-authored-by: default avatarSylvain Gugger <35901082+sgugger@users.noreply.github.com>
parent 461ae868
......@@ -8,7 +8,6 @@ The script re-maps the TF2.x Bert weight names to the original names, so the mod
You may adapt this script to include classification/MLM/NSP/etc. heads.
"""
import argparse
import logging
import os
import re
......@@ -17,9 +16,11 @@ import torch
from transformers import BertConfig, BertModel
from .utils import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
def load_tf2_weights_in_bert(model, tf_checkpoint_path, config):
......
......@@ -16,14 +16,15 @@
import argparse
import logging
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from .utils import logging
logging.basicConfig(level=logging.INFO)
logging.set_verbosity_info()
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path):
......
......@@ -16,14 +16,15 @@
import argparse
import logging
import torch
from transformers import ElectraConfig, ElectraForMaskedLM, ElectraForPreTraining, load_tf_weights_in_electra
from .utils import logging
logging.basicConfig(level=logging.INFO)
logging.set_verbosity_info()
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path, discriminator_or_generator):
......
......@@ -16,14 +16,15 @@
import argparse
import logging
import torch
from transformers import CONFIG_NAME, WEIGHTS_NAME, GPT2Config, GPT2Model, load_tf_weights_in_gpt2
from .utils import logging
logging.basicConfig(level=logging.INFO)
logging.set_verbosity_info()
def convert_gpt2_checkpoint_to_pytorch(gpt2_checkpoint_path, gpt2_config_file, pytorch_dump_folder_path):
......
import argparse
import logging
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from .utils import logging
logging.basicConfig(level=logging.INFO)
logging.set_verbosity_info()
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, mobilebert_config_file, pytorch_dump_path):
......
......@@ -16,14 +16,15 @@
import argparse
import logging
import torch
from transformers import CONFIG_NAME, WEIGHTS_NAME, OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from .utils import logging
logging.basicConfig(level=logging.INFO)
logging.set_verbosity_info()
def convert_openai_checkpoint_to_pytorch(openai_checkpoint_folder_path, openai_config_file, pytorch_dump_folder_path):
......
......@@ -16,7 +16,6 @@
import argparse
import logging
import os
from transformers import (
......@@ -76,6 +75,8 @@ from transformers import (
)
from transformers.file_utils import hf_bucket_url
from .utils import logging
if is_torch_available():
import numpy as np
......@@ -104,7 +105,7 @@ if is_torch_available():
)
logging.basicConfig(level=logging.INFO)
logging.set_verbosity_info()
MODEL_CLASSES = {
"bert": (BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,),
......
......@@ -16,7 +16,6 @@
import argparse
import logging
import pickle
import numpy as np
......@@ -24,8 +23,10 @@ import torch
from transformers import ReformerConfig, ReformerModelWithLMHead
from .utils import logging
logging.basicConfig(level=logging.INFO)
logging.set_verbosity_info()
def set_param(torch_layer, weight, bias=None):
......
......@@ -16,7 +16,6 @@
import argparse
import logging
import pathlib
import fairseq
......@@ -28,13 +27,15 @@ from packaging import version
from transformers.modeling_bert import BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput
from transformers.modeling_roberta import RobertaConfig, RobertaForMaskedLM, RobertaForSequenceClassification
from .utils import logging
if version.parse(fairseq.__version__) < version.parse("0.9.0"):
raise Exception("requires fairseq >= 0.9.0")
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
SAMPLE_TEXT = "Hello world! cécé herlolip"
......
......@@ -16,14 +16,15 @@
import argparse
import logging
import torch
from transformers import T5Config, T5Model, load_tf_weights_in_t5
from .utils import logging
logging.basicConfig(level=logging.INFO)
logging.set_verbosity_info()
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path):
......
......@@ -16,7 +16,6 @@
import argparse
import logging
import os
import pickle
import sys
......@@ -33,8 +32,10 @@ from transformers import (
)
from transformers.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from .utils import logging
logging.basicConfig(level=logging.INFO)
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
......
......@@ -17,7 +17,6 @@
import argparse
import json
import logging
import numpy
import torch
......@@ -25,8 +24,10 @@ import torch
from transformers import CONFIG_NAME, WEIGHTS_NAME
from transformers.tokenization_xlm import VOCAB_FILES_NAMES
from .utils import logging
logging.basicConfig(level=logging.INFO)
logging.set_verbosity_info()
def convert_xlm_checkpoint_to_pytorch(xlm_checkpoint_path, pytorch_dump_folder_path):
......
......@@ -16,7 +16,6 @@
import argparse
import logging
import os
import torch
......@@ -31,6 +30,8 @@ from transformers import (
load_tf_weights_in_xlnet,
)
from .utils import logging
GLUE_TASKS_NUM_LABELS = {
"cola": 2,
......@@ -45,7 +46,7 @@ GLUE_TASKS_NUM_LABELS = {
}
logging.basicConfig(level=logging.INFO)
logging.set_verbosity_info()
def convert_xlnet_checkpoint_to_pytorch(
......
import logging
import os
import time
from dataclasses import dataclass, field
......@@ -14,11 +13,12 @@ from ...tokenization_bart import BartTokenizer, BartTokenizerFast
from ...tokenization_roberta import RobertaTokenizer, RobertaTokenizerFast
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_xlm_roberta import XLMRobertaTokenizer
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
@dataclass
......
import logging
import os
import pickle
import time
......@@ -9,9 +8,10 @@ from torch.utils.data.dataset import Dataset
from filelock import FileLock
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
class TextDataset(Dataset):
......
import logging
import os
import time
from dataclasses import dataclass, field
......@@ -12,10 +11,11 @@ from filelock import FileLock
from ...modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadV1Processor, SquadV2Processor, squad_convert_examples_to_features
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
......
......@@ -10,15 +10,16 @@ that a question is unanswerable.
import collections
import json
import logging
import math
import re
import string
from transformers.tokenization_bert import BasicTokenizer
from ...utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
def normalize_answer(s):
......
......@@ -15,7 +15,6 @@
# limitations under the License.
""" GLUE processors and helpers """
import logging
import os
from dataclasses import asdict
from enum import Enum
......@@ -23,13 +22,14 @@ from typing import List, Optional, Union
from ...file_utils import is_tf_available
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from .utils import DataProcessor, InputExample, InputFeatures
if is_tf_available():
import tensorflow as tf
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
def glue_convert_examples_to_features(
......
import json
import logging
import os
from functools import partial
from multiprocessing import Pool, cpu_count
......@@ -10,6 +9,7 @@ from tqdm import tqdm
from ...file_utils import is_tf_available, is_torch_available
from ...tokenization_bert import whitespace_tokenize
from ...tokenization_utils_base import TruncationStrategy
from ...utils import logging
from .utils import DataProcessor
......@@ -24,7 +24,7 @@ if is_torch_available():
if is_tf_available():
import tensorflow as tf
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text):
......
......@@ -17,14 +17,14 @@
import csv
import dataclasses
import json
import logging
from dataclasses import dataclass
from typing import List, Optional, Union
from ...file_utils import is_tf_available, is_torch_available
from ...utils import logging
logger = logging.getLogger(__name__)
logger = logging.get_logger(__name__)
@dataclass
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment