"git@developer.sourcefind.cn:chenpangpang/transformers.git" did not exist on "6e195eb9de4eee95c75a0225dd88a46d1f670692"
Unverified Commit acc3bd9d authored by Sylvain Gugger's avatar Sylvain Gugger Committed by GitHub
Browse files

Enforce string-formatting with f-strings (#10980)



* First third

* Styling and fix mistake

* Quality

* All the rest

* Treat %s and %d

* typo

* Missing )

* Apply suggestions from code review
Co-authored-by: default avatarLysandre Debut <lysandre@huggingface.co>
Co-authored-by: default avatarLysandre Debut <lysandre@huggingface.co>
parent d0b3797a
...@@ -213,7 +213,7 @@ def main(): ...@@ -213,7 +213,7 @@ def main():
transformers.utils.logging.set_verbosity_info() transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format() transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", training_args) logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model. # Set seed before initializing model.
set_seed(training_args.seed) set_seed(training_args.seed)
......
...@@ -223,7 +223,7 @@ def main(): ...@@ -223,7 +223,7 @@ def main():
transformers.utils.logging.set_verbosity_info() transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format() transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", training_args) logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model. # Set seed before initializing model.
set_seed(training_args.seed) set_seed(training_args.seed)
......
...@@ -307,7 +307,7 @@ def create_learning_rate_scheduler( ...@@ -307,7 +307,7 @@ def create_learning_rate_scheduler(
progress = jnp.maximum(0.0, (step - warmup_steps) / float(steps_per_cycle)) progress = jnp.maximum(0.0, (step - warmup_steps) / float(steps_per_cycle))
ret *= jnp.maximum(0.0, 0.5 * (1.0 + jnp.cos(jnp.pi * (progress % 1.0)))) ret *= jnp.maximum(0.0, 0.5 * (1.0 + jnp.cos(jnp.pi * (progress % 1.0))))
else: else:
raise ValueError("Unknown factor %s." % name) raise ValueError(f"Unknown factor {name}.")
return jnp.asarray(ret, dtype=jnp.float32) return jnp.asarray(ret, dtype=jnp.float32)
return step_fn return step_fn
...@@ -332,9 +332,7 @@ def accuracy(logits, targets, weights=None): ...@@ -332,9 +332,7 @@ def accuracy(logits, targets, weights=None):
Tuple of scalar loss and batch normalizing factor. Tuple of scalar loss and batch normalizing factor.
""" """
if logits.ndim != targets.ndim + 1: if logits.ndim != targets.ndim + 1:
raise ValueError( raise ValueError(f"Incorrect shapes. Got shape {logits.shape} logits and {targets.shape} targets")
"Incorrect shapes. Got shape %s logits and %s targets" % (str(logits.shape), str(targets.shape))
)
loss = jnp.equal(jnp.argmax(logits, axis=-1), targets) loss = jnp.equal(jnp.argmax(logits, axis=-1), targets)
loss *= weights loss *= weights
...@@ -353,9 +351,7 @@ def cross_entropy(logits, targets, weights=None, label_smoothing=0.0): ...@@ -353,9 +351,7 @@ def cross_entropy(logits, targets, weights=None, label_smoothing=0.0):
Tuple of scalar loss and batch normalizing factor. Tuple of scalar loss and batch normalizing factor.
""" """
if logits.ndim != targets.ndim + 1: if logits.ndim != targets.ndim + 1:
raise ValueError( raise ValueError(f"Incorrect shapes. Got shape {logits.shape} logits and {targets.shape} targets")
"Incorrect shapes. Got shape %s logits and %s targets" % (str(logits.shape), str(targets.shape))
)
vocab_size = logits.shape[-1] vocab_size = logits.shape[-1]
confidence = 1.0 - label_smoothing confidence = 1.0 - label_smoothing
...@@ -463,7 +459,7 @@ if __name__ == "__main__": ...@@ -463,7 +459,7 @@ if __name__ == "__main__":
) )
# Set the verbosity to info of the Transformers logger (on main process only): # Set the verbosity to info of the Transformers logger (on main process only):
logger.info("Training/evaluation parameters %s", training_args) logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model. # Set seed before initializing model.
set_seed(training_args.seed) set_seed(training_args.seed)
......
...@@ -220,7 +220,7 @@ def main(): ...@@ -220,7 +220,7 @@ def main():
transformers.utils.logging.set_verbosity_info() transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format() transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", training_args) logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model. # Set seed before initializing model.
set_seed(training_args.seed) set_seed(training_args.seed)
......
...@@ -247,7 +247,7 @@ def main(): ...@@ -247,7 +247,7 @@ def main():
transformers.utils.logging.set_verbosity_info() transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format() transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", training_args) logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model. # Set seed before initializing model.
set_seed(training_args.seed) set_seed(training_args.seed)
......
...@@ -116,12 +116,10 @@ def main(): ...@@ -116,12 +116,10 @@ def main():
level=logging.INFO, level=logging.INFO,
) )
logger.warning( logger.warning(
"device: %s, n_replicas: %s, 16-bits training: %s", f"device: {training_args.device}, n_replicas: {training_args.n_replicas}, "
training_args.device, f"16-bits training: {training_args.fp16}"
training_args.n_replicas,
training_args.fp16,
) )
logger.info("Training/evaluation parameters %s", training_args) logger.info(f"Training/evaluation parameters {training_args}")
# Set seed # Set seed
set_seed(training_args.seed) set_seed(training_args.seed)
...@@ -131,7 +129,7 @@ def main(): ...@@ -131,7 +129,7 @@ def main():
label_list = processor.get_labels() label_list = processor.get_labels()
num_labels = len(label_list) num_labels = len(label_list)
except KeyError: except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name)) raise ValueError(f"Task not found: {data_args.task_name}")
# Load pretrained model and tokenizer # Load pretrained model and tokenizer
# #
...@@ -210,8 +208,8 @@ def main(): ...@@ -210,8 +208,8 @@ def main():
with open(output_eval_file, "w") as writer: with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****") logger.info("***** Eval results *****")
for key, value in result.items(): for key, value in result.items():
logger.info(" %s = %s", key, value) logger.info(f" {key} = {value}")
writer.write("%s = %s\n" % (key, value)) writer.write(f"{key} = {value}\n")
results.update(result) results.update(result)
......
...@@ -99,13 +99,7 @@ if is_torch_available(): ...@@ -99,13 +99,7 @@ if is_torch_available():
processor = processors[task]() processor = processors[task]()
cached_features_file = os.path.join( cached_features_file = os.path.join(
data_dir, data_dir, f"cached_{mode.value}_{tokenizer.__class__.__name__}_{max_seq_length}_{task}"
"cached_{}_{}_{}_{}".format(
mode.value,
tokenizer.__class__.__name__,
str(max_seq_length),
task,
),
) )
# Make sure only the first process in distributed training processes the dataset, # Make sure only the first process in distributed training processes the dataset,
...@@ -125,14 +119,14 @@ if is_torch_available(): ...@@ -125,14 +119,14 @@ if is_torch_available():
examples = processor.get_test_examples(data_dir) examples = processor.get_test_examples(data_dir)
else: else:
examples = processor.get_train_examples(data_dir) examples = processor.get_train_examples(data_dir)
logger.info("Training examples: %s", len(examples)) logger.info(f"Training examples: {len(examples)}")
self.features = convert_examples_to_features( self.features = convert_examples_to_features(
examples, examples,
label_list, label_list,
max_seq_length, max_seq_length,
tokenizer, tokenizer,
) )
logger.info("Saving features into cached file %s", cached_features_file) logger.info(f"Saving features into cached file {cached_features_file}")
torch.save(self.features, cached_features_file) torch.save(self.features, cached_features_file)
def __len__(self): def __len__(self):
...@@ -172,7 +166,7 @@ if is_tf_available(): ...@@ -172,7 +166,7 @@ if is_tf_available():
examples = processor.get_test_examples(data_dir) examples = processor.get_test_examples(data_dir)
else: else:
examples = processor.get_train_examples(data_dir) examples = processor.get_train_examples(data_dir)
logger.info("Training examples: %s", len(examples)) logger.info(f"Training examples: {len(examples)}")
self.features = convert_examples_to_features( self.features = convert_examples_to_features(
examples, examples,
...@@ -184,7 +178,7 @@ if is_tf_available(): ...@@ -184,7 +178,7 @@ if is_tf_available():
def gen(): def gen():
for (ex_index, ex) in tqdm.tqdm(enumerate(self.features), desc="convert examples to features"): for (ex_index, ex) in tqdm.tqdm(enumerate(self.features), desc="convert examples to features"):
if ex_index % 10000 == 0: if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples))) logger.info(f"Writing example {ex_index} of {len(examples)}")
yield ( yield (
{ {
...@@ -255,7 +249,7 @@ class RaceProcessor(DataProcessor): ...@@ -255,7 +249,7 @@ class RaceProcessor(DataProcessor):
def get_train_examples(self, data_dir): def get_train_examples(self, data_dir):
"""See base class.""" """See base class."""
logger.info("LOOKING AT {} train".format(data_dir)) logger.info(f"LOOKING AT {data_dir} train")
high = os.path.join(data_dir, "train/high") high = os.path.join(data_dir, "train/high")
middle = os.path.join(data_dir, "train/middle") middle = os.path.join(data_dir, "train/middle")
high = self._read_txt(high) high = self._read_txt(high)
...@@ -264,7 +258,7 @@ class RaceProcessor(DataProcessor): ...@@ -264,7 +258,7 @@ class RaceProcessor(DataProcessor):
def get_dev_examples(self, data_dir): def get_dev_examples(self, data_dir):
"""See base class.""" """See base class."""
logger.info("LOOKING AT {} dev".format(data_dir)) logger.info(f"LOOKING AT {data_dir} dev")
high = os.path.join(data_dir, "dev/high") high = os.path.join(data_dir, "dev/high")
middle = os.path.join(data_dir, "dev/middle") middle = os.path.join(data_dir, "dev/middle")
high = self._read_txt(high) high = self._read_txt(high)
...@@ -273,7 +267,7 @@ class RaceProcessor(DataProcessor): ...@@ -273,7 +267,7 @@ class RaceProcessor(DataProcessor):
def get_test_examples(self, data_dir): def get_test_examples(self, data_dir):
"""See base class.""" """See base class."""
logger.info("LOOKING AT {} test".format(data_dir)) logger.info(f"LOOKING AT {data_dir} test")
high = os.path.join(data_dir, "test/high") high = os.path.join(data_dir, "test/high")
middle = os.path.join(data_dir, "test/middle") middle = os.path.join(data_dir, "test/middle")
high = self._read_txt(high) high = self._read_txt(high)
...@@ -298,7 +292,7 @@ class RaceProcessor(DataProcessor): ...@@ -298,7 +292,7 @@ class RaceProcessor(DataProcessor):
"""Creates examples for the training and dev sets.""" """Creates examples for the training and dev sets."""
examples = [] examples = []
for (_, data_raw) in enumerate(lines): for (_, data_raw) in enumerate(lines):
race_id = "%s-%s" % (set_type, data_raw["race_id"]) race_id = f"{set_type}-{data_raw['race_id']}"
article = data_raw["article"] article = data_raw["article"]
for i in range(len(data_raw["answers"])): for i in range(len(data_raw["answers"])):
truth = str(ord(data_raw["answers"][i]) - ord("A")) truth = str(ord(data_raw["answers"][i]) - ord("A"))
...@@ -322,17 +316,17 @@ class SynonymProcessor(DataProcessor): ...@@ -322,17 +316,17 @@ class SynonymProcessor(DataProcessor):
def get_train_examples(self, data_dir): def get_train_examples(self, data_dir):
"""See base class.""" """See base class."""
logger.info("LOOKING AT {} train".format(data_dir)) logger.info(f"LOOKING AT {data_dir} train")
return self._create_examples(self._read_csv(os.path.join(data_dir, "mctrain.csv")), "train") return self._create_examples(self._read_csv(os.path.join(data_dir, "mctrain.csv")), "train")
def get_dev_examples(self, data_dir): def get_dev_examples(self, data_dir):
"""See base class.""" """See base class."""
logger.info("LOOKING AT {} dev".format(data_dir)) logger.info(f"LOOKING AT {data_dir} dev")
return self._create_examples(self._read_csv(os.path.join(data_dir, "mchp.csv")), "dev") return self._create_examples(self._read_csv(os.path.join(data_dir, "mchp.csv")), "dev")
def get_test_examples(self, data_dir): def get_test_examples(self, data_dir):
"""See base class.""" """See base class."""
logger.info("LOOKING AT {} dev".format(data_dir)) logger.info(f"LOOKING AT {data_dir} dev")
return self._create_examples(self._read_csv(os.path.join(data_dir, "mctest.csv")), "test") return self._create_examples(self._read_csv(os.path.join(data_dir, "mctest.csv")), "test")
...@@ -368,17 +362,17 @@ class SwagProcessor(DataProcessor): ...@@ -368,17 +362,17 @@ class SwagProcessor(DataProcessor):
def get_train_examples(self, data_dir): def get_train_examples(self, data_dir):
"""See base class.""" """See base class."""
logger.info("LOOKING AT {} train".format(data_dir)) logger.info(f"LOOKING AT {data_dir} train")
return self._create_examples(self._read_csv(os.path.join(data_dir, "train.csv")), "train") return self._create_examples(self._read_csv(os.path.join(data_dir, "train.csv")), "train")
def get_dev_examples(self, data_dir): def get_dev_examples(self, data_dir):
"""See base class.""" """See base class."""
logger.info("LOOKING AT {} dev".format(data_dir)) logger.info(f"LOOKING AT {data_dir} dev")
return self._create_examples(self._read_csv(os.path.join(data_dir, "val.csv")), "dev") return self._create_examples(self._read_csv(os.path.join(data_dir, "val.csv")), "dev")
def get_test_examples(self, data_dir): def get_test_examples(self, data_dir):
"""See base class.""" """See base class."""
logger.info("LOOKING AT {} dev".format(data_dir)) logger.info(f"LOOKING AT {data_dir} dev")
raise ValueError( raise ValueError(
"For swag testing, the input file does not contain a label column. It can not be tested in current code" "For swag testing, the input file does not contain a label column. It can not be tested in current code"
"setting!" "setting!"
...@@ -419,16 +413,16 @@ class ArcProcessor(DataProcessor): ...@@ -419,16 +413,16 @@ class ArcProcessor(DataProcessor):
def get_train_examples(self, data_dir): def get_train_examples(self, data_dir):
"""See base class.""" """See base class."""
logger.info("LOOKING AT {} train".format(data_dir)) logger.info(f"LOOKING AT {data_dir} train")
return self._create_examples(self._read_json(os.path.join(data_dir, "train.jsonl")), "train") return self._create_examples(self._read_json(os.path.join(data_dir, "train.jsonl")), "train")
def get_dev_examples(self, data_dir): def get_dev_examples(self, data_dir):
"""See base class.""" """See base class."""
logger.info("LOOKING AT {} dev".format(data_dir)) logger.info(f"LOOKING AT {data_dir} dev")
return self._create_examples(self._read_json(os.path.join(data_dir, "dev.jsonl")), "dev") return self._create_examples(self._read_json(os.path.join(data_dir, "dev.jsonl")), "dev")
def get_test_examples(self, data_dir): def get_test_examples(self, data_dir):
logger.info("LOOKING AT {} test".format(data_dir)) logger.info(f"LOOKING AT {data_dir} test")
return self._create_examples(self._read_json(os.path.join(data_dir, "test.jsonl")), "test") return self._create_examples(self._read_json(os.path.join(data_dir, "test.jsonl")), "test")
def get_labels(self): def get_labels(self):
...@@ -450,7 +444,7 @@ class ArcProcessor(DataProcessor): ...@@ -450,7 +444,7 @@ class ArcProcessor(DataProcessor):
elif truth in "1234": elif truth in "1234":
return int(truth) - 1 return int(truth) - 1
else: else:
logger.info("truth ERROR! %s", str(truth)) logger.info(f"truth ERROR! {truth}")
return None return None
examples = [] examples = []
...@@ -496,11 +490,11 @@ class ArcProcessor(DataProcessor): ...@@ -496,11 +490,11 @@ class ArcProcessor(DataProcessor):
if type == "train": if type == "train":
assert len(examples) > 1 assert len(examples) > 1
assert examples[0].label is not None assert examples[0].label is not None
logger.info("len examples: %s}", str(len(examples))) logger.info(f"len examples: {len(examples)}")
logger.info("Three choices: %s", str(three_choice)) logger.info(f"Three choices: {three_choice}")
logger.info("Five choices: %s", str(five_choice)) logger.info(f"Five choices: {five_choice}")
logger.info("Other choices: %s", str(other_choices)) logger.info(f"Other choices: {other_choices}")
logger.info("four choices: %s", str(four_choice)) logger.info(f"four choices: {four_choice}")
return examples return examples
...@@ -520,7 +514,7 @@ def convert_examples_to_features( ...@@ -520,7 +514,7 @@ def convert_examples_to_features(
features = [] features = []
for (ex_index, example) in tqdm.tqdm(enumerate(examples), desc="convert examples to features"): for (ex_index, example) in tqdm.tqdm(enumerate(examples), desc="convert examples to features"):
if ex_index % 10000 == 0: if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples))) logger.info(f"Writing example {ex_index} of {len(examples)}")
choices_inputs = [] choices_inputs = []
for ending_idx, (context, ending) in enumerate(zip(example.contexts, example.endings)): for ending_idx, (context, ending) in enumerate(zip(example.contexts, example.endings)):
text_a = context text_a = context
...@@ -570,7 +564,7 @@ def convert_examples_to_features( ...@@ -570,7 +564,7 @@ def convert_examples_to_features(
for f in features[:2]: for f in features[:2]:
logger.info("*** Example ***") logger.info("*** Example ***")
logger.info("feature: %s" % f) logger.info("feature: {f}")
return features return features
......
...@@ -240,7 +240,7 @@ def main(): ...@@ -240,7 +240,7 @@ def main():
transformers.utils.logging.set_verbosity_info() transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format() transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", training_args) logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model. # Set seed before initializing model.
set_seed(training_args.seed) set_seed(training_args.seed)
......
...@@ -239,7 +239,7 @@ def main(): ...@@ -239,7 +239,7 @@ def main():
transformers.utils.logging.set_verbosity_info() transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format() transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", training_args) logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model. # Set seed before initializing model.
set_seed(training_args.seed) set_seed(training_args.seed)
......
...@@ -148,12 +148,10 @@ def main(): ...@@ -148,12 +148,10 @@ def main():
level=logging.INFO, level=logging.INFO,
) )
logger.info( logger.info(
"n_replicas: %s, distributed training: %s, 16-bits training: %s", f"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1)}, "
training_args.n_replicas, f"16-bits training: {training_args.fp16}"
bool(training_args.n_replicas > 1),
training_args.fp16,
) )
logger.info("Training/evaluation parameters %s", training_args) logger.info(f"Training/evaluation parameters {training_args}")
# Prepare Question-Answering task # Prepare Question-Answering task
# Load pretrained model and tokenizer # Load pretrained model and tokenizer
......
...@@ -294,7 +294,7 @@ def main(): ...@@ -294,7 +294,7 @@ def main():
# Set the verbosity to info of the Transformers logger (on main process only): # Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank): if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info() transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s", training_args) logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model. # Set seed before initializing model.
set_seed(training_args.seed) set_seed(training_args.seed)
......
...@@ -264,7 +264,7 @@ def main(): ...@@ -264,7 +264,7 @@ def main():
# Set the verbosity to info of the Transformers logger (on main process only): # Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank): if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info() transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s", training_args) logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model. # Set seed before initializing model.
set_seed(training_args.seed) set_seed(training_args.seed)
......
...@@ -160,18 +160,16 @@ def main(): ...@@ -160,18 +160,16 @@ def main():
level=logging.INFO, level=logging.INFO,
) )
logger.info( logger.info(
"n_replicas: %s, distributed training: %s, 16-bits training: %s", f"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1)}, "
training_args.n_replicas, f"16-bits training: {training_args.fp16}",
bool(training_args.n_replicas > 1),
training_args.fp16,
) )
logger.info("Training/evaluation parameters %s", training_args) logger.info(f"Training/evaluation parameters {training_args}")
try: try:
num_labels = glue_tasks_num_labels["mnli" if data_args.task_name == "mnli-mm" else data_args.task_name] num_labels = glue_tasks_num_labels["mnli" if data_args.task_name == "mnli-mm" else data_args.task_name]
output_mode = glue_output_modes[data_args.task_name] output_mode = glue_output_modes[data_args.task_name]
except KeyError: except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name)) raise ValueError(f"Task not found: {data_args.task_name}")
# Load pretrained model and tokenizer # Load pretrained model and tokenizer
# #
...@@ -255,8 +253,8 @@ def main(): ...@@ -255,8 +253,8 @@ def main():
logger.info("***** Eval results *****") logger.info("***** Eval results *****")
for key, value in result.items(): for key, value in result.items():
logger.info(" %s = %s", key, value) logger.info(f" {key} = {value}")
writer.write("%s = %s\n" % (key, value)) writer.write(f"{key} = {value}\n")
results.update(result) results.update(result)
......
...@@ -225,12 +225,10 @@ def main(): ...@@ -225,12 +225,10 @@ def main():
level=logging.INFO, level=logging.INFO,
) )
logger.info( logger.info(
"n_replicas: %s, distributed training: %s, 16-bits training: %s", f"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1)}, "
training_args.n_replicas, f"16-bits training: {training_args.fp16}"
bool(training_args.n_replicas > 1),
training_args.fp16,
) )
logger.info("Training/evaluation parameters %s", training_args) logger.info(f"Training/evaluation parameters {training_args}")
# Load pretrained model and tokenizer # Load pretrained model and tokenizer
# #
...@@ -300,8 +298,8 @@ def main(): ...@@ -300,8 +298,8 @@ def main():
logger.info("***** Eval results *****") logger.info("***** Eval results *****")
for key, value in result.items(): for key, value in result.items():
logger.info(" %s = %s", key, value) logger.info(f" {key} = {value}")
writer.write("%s = %s\n" % (key, value)) writer.write(f"{key} = {value}\n")
results.update(result) results.update(result)
......
...@@ -201,12 +201,7 @@ def main(): ...@@ -201,12 +201,7 @@ def main():
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count() args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
logger.warning( logger.warning(f"device: {args.device}, n_gpu: {args.n_gpu}, 16-bits training: {args.fp16}")
"device: %s, n_gpu: %s, 16-bits training: %s",
args.device,
args.n_gpu,
args.fp16,
)
set_seed(args) set_seed(args)
...@@ -271,7 +266,7 @@ def main(): ...@@ -271,7 +266,7 @@ def main():
generated_sequences = [] generated_sequences = []
for generated_sequence_idx, generated_sequence in enumerate(output_sequences): for generated_sequence_idx, generated_sequence in enumerate(output_sequences):
print("=== GENERATED SEQUENCE {} ===".format(generated_sequence_idx + 1)) print(f"=== GENERATED SEQUENCE {generated_sequence_idx + 1} ===")
generated_sequence = generated_sequence.tolist() generated_sequence = generated_sequence.tolist()
# Decode text # Decode text
......
...@@ -213,7 +213,7 @@ def main(): ...@@ -213,7 +213,7 @@ def main():
transformers.utils.logging.set_verbosity_info() transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format() transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", training_args) logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model. # Set seed before initializing model.
set_seed(training_args.seed) set_seed(training_args.seed)
......
...@@ -95,4 +95,4 @@ def get_activation(activation_string): ...@@ -95,4 +95,4 @@ def get_activation(activation_string):
if activation_string in ACT2FN: if activation_string in ACT2FN:
return ACT2FN[activation_string] return ACT2FN[activation_string]
else: else:
raise KeyError("function {} not found in ACT2FN mapping {}".format(activation_string, list(ACT2FN.keys()))) raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACT2FN.keys())}")
...@@ -91,4 +91,4 @@ def get_tf_activation(activation_string): ...@@ -91,4 +91,4 @@ def get_tf_activation(activation_string):
if activation_string in ACT2FN: if activation_string in ACT2FN:
return ACT2FN[activation_string] return ACT2FN[activation_string]
else: else:
raise KeyError("function {} not found in ACT2FN mapping {}".format(activation_string, list(ACT2FN.keys()))) raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACT2FN.keys())}")
...@@ -218,7 +218,7 @@ class PyTorchBenchmark(Benchmark): ...@@ -218,7 +218,7 @@ class PyTorchBenchmark(Benchmark):
return min(runtimes) / 10.0 return min(runtimes) / 10.0
except RuntimeError as e: except RuntimeError as e:
self.print_fn("Doesn't fit on GPU. {}".format(e)) self.print_fn(f"Doesn't fit on GPU. {e}")
return "N/A" return "N/A"
def _measure_memory(self, func: Callable[[], None]) -> [Memory, MemorySummary]: def _measure_memory(self, func: Callable[[], None]) -> [Memory, MemorySummary]:
...@@ -263,5 +263,5 @@ class PyTorchBenchmark(Benchmark): ...@@ -263,5 +263,5 @@ class PyTorchBenchmark(Benchmark):
return memory, summary return memory, summary
except RuntimeError as e: except RuntimeError as e:
self.print_fn("Doesn't fit on GPU. {}".format(e)) self.print_fn(f"Doesn't fit on GPU. {e}")
return "N/A", None return "N/A", None
...@@ -227,7 +227,7 @@ class TensorFlowBenchmark(Benchmark): ...@@ -227,7 +227,7 @@ class TensorFlowBenchmark(Benchmark):
return min(runtimes) / 10.0 return min(runtimes) / 10.0
except ResourceExhaustedError as e: except ResourceExhaustedError as e:
self.print_fn("Doesn't fit on GPU. {}".format(e)) self.print_fn(f"Doesn't fit on GPU. {e}")
def _measure_memory(self, func: Callable[[], None]) -> [Memory, MemorySummary]: def _measure_memory(self, func: Callable[[], None]) -> [Memory, MemorySummary]:
logger.info( logger.info(
...@@ -290,5 +290,5 @@ class TensorFlowBenchmark(Benchmark): ...@@ -290,5 +290,5 @@ class TensorFlowBenchmark(Benchmark):
return memory, summary return memory, summary
except ResourceExhaustedError as e: except ResourceExhaustedError as e:
self.print_fn("Doesn't fit on GPU. {}".format(e)) self.print_fn(f"Doesn't fit on GPU. {e}")
return "N/A", None return "N/A", None
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment