Commit ef28df05 authored by Sylvain's avatar Sylvain
Browse files

Fix quality due to ruff release

parent 73fdc8c5
...@@ -319,15 +319,13 @@ class FlaxDataCollatorForBartDenoisingLM: ...@@ -319,15 +319,13 @@ class FlaxDataCollatorForBartDenoisingLM:
sentence_ends = np.argwhere(end_sentence_mask) sentence_ends = np.argwhere(end_sentence_mask)
sentence_ends[:, 1] += 1 sentence_ends[:, 1] += 1
example_has_multiple_sentences, num_sentences = np.unique(sentence_ends[:, 0], return_counts=True) example_has_multiple_sentences, num_sentences = np.unique(sentence_ends[:, 0], return_counts=True)
num_sentences_map = {sent_idx: count for sent_idx, count in zip(example_has_multiple_sentences, num_sentences)} num_sentences_map = dict(zip(example_has_multiple_sentences, num_sentences))
num_to_permute = np.ceil(num_sentences * self.permute_sentence_ratio).astype(int) num_to_permute = np.ceil(num_sentences * self.permute_sentence_ratio).astype(int)
num_to_permute_map = { num_to_permute_map = dict(zip(example_has_multiple_sentences, num_to_permute))
sent_idx: count for sent_idx, count in zip(example_has_multiple_sentences, num_to_permute)
}
sentence_ends = np.split(sentence_ends[:, 1], np.unique(sentence_ends[:, 0], return_index=True)[1][1:]) sentence_ends = np.split(sentence_ends[:, 1], np.unique(sentence_ends[:, 0], return_index=True)[1][1:])
sentence_ends_map = {sent_idx: count for sent_idx, count in zip(example_has_multiple_sentences, sentence_ends)} sentence_ends_map = dict(zip(example_has_multiple_sentences, sentence_ends))
for i in range(input_ids.shape[0]): for i in range(input_ids.shape[0]):
if i not in example_has_multiple_sentences: if i not in example_has_multiple_sentences:
......
...@@ -124,7 +124,7 @@ class GLUETransformer(BaseTransformer): ...@@ -124,7 +124,7 @@ class GLUETransformer(BaseTransformer):
results = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task, preds, out_label_ids)} results = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task, preds, out_label_ids)}
ret = {k: v for k, v in results.items()} ret = dict(results.items())
ret["log"] = results ret["log"] = results
return ret, preds_list, out_label_list return ret, preds_list, out_label_list
......
...@@ -122,7 +122,7 @@ class NERTransformer(BaseTransformer): ...@@ -122,7 +122,7 @@ class NERTransformer(BaseTransformer):
preds = np.argmax(preds, axis=2) preds = np.argmax(preds, axis=2)
out_label_ids = np.concatenate([x["target"] for x in outputs], axis=0) out_label_ids = np.concatenate([x["target"] for x in outputs], axis=0)
label_map = {i: label for i, label in enumerate(self.labels)} label_map = dict(enumerate(self.labels))
out_label_list = [[] for _ in range(out_label_ids.shape[0])] out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])] preds_list = [[] for _ in range(out_label_ids.shape[0])]
...@@ -140,7 +140,7 @@ class NERTransformer(BaseTransformer): ...@@ -140,7 +140,7 @@ class NERTransformer(BaseTransformer):
"f1": f1_score(out_label_list, preds_list), "f1": f1_score(out_label_list, preds_list),
} }
ret = {k: v for k, v in results.items()} ret = dict(results.items())
ret["log"] = results ret["log"] = results
return ret, preds_list, out_label_list return ret, preds_list, out_label_list
......
...@@ -34,7 +34,7 @@ task_score_names = { ...@@ -34,7 +34,7 @@ task_score_names = {
def parse_search_arg(search): def parse_search_arg(search):
groups = search.split() groups = search.split()
entries = {k: vs for k, vs in (g.split("=") for g in groups)} entries = dict((g.split("=") for g in groups))
entry_names = list(entries.keys()) entry_names = list(entries.keys())
sets = [[f"--{k} {v}" for v in vs.split(":")] for k, vs in entries.items()] sets = [[f"--{k} {v}" for v in vs.split(":")] for k, vs in entries.items()]
matrix = [list(x) for x in itertools.product(*sets)] matrix = [list(x) for x in itertools.product(*sets)]
...@@ -105,7 +105,7 @@ def run_search(): ...@@ -105,7 +105,7 @@ def run_search():
col_widths = {col: len(str(col)) for col in col_names} col_widths = {col: len(str(col)) for col in col_names}
results = [] results = []
for r in matrix: for r in matrix:
hparams = {k: v for k, v in (x.replace("--", "").split() for x in r)} hparams = dict((x.replace("--", "").split() for x in r))
args_exp = " ".join(r).split() args_exp = " ".join(r).split()
args_exp.extend(["--bs", str(args.bs)]) # in case we need to reduce its size due to CUDA OOM args_exp.extend(["--bs", str(args.bs)]) # in case we need to reduce its size due to CUDA OOM
sys.argv = args_normal + args_exp sys.argv = args_normal + args_exp
......
...@@ -158,7 +158,7 @@ def main(): ...@@ -158,7 +158,7 @@ def main():
# Prepare CONLL-2003 task # Prepare CONLL-2003 task
labels = token_classification_task.get_labels(data_args.labels) labels = token_classification_task.get_labels(data_args.labels)
label_map: Dict[int, str] = {i: label for i, label in enumerate(labels)} label_map: Dict[int, str] = dict(enumerate(labels))
num_labels = len(labels) num_labels = len(labels)
# Load pretrained model and tokenizer # Load pretrained model and tokenizer
......
...@@ -144,7 +144,7 @@ def main(): ...@@ -144,7 +144,7 @@ def main():
# Prepare Token Classification task # Prepare Token Classification task
labels = token_classification_task.get_labels(data_args.labels) labels = token_classification_task.get_labels(data_args.labels)
label_map: Dict[int, str] = {i: label for i, label in enumerate(labels)} label_map: Dict[int, str] = dict(enumerate(labels))
num_labels = len(labels) num_labels = len(labels)
# Load pretrained model and tokenizer # Load pretrained model and tokenizer
......
...@@ -407,7 +407,7 @@ def main(): ...@@ -407,7 +407,7 @@ def main():
# Set the correspondences label/ID inside the model config # Set the correspondences label/ID inside the model config
model.config.label2id = {l: i for i, l in enumerate(label_list)} model.config.label2id = {l: i for i, l in enumerate(label_list)}
model.config.id2label = {i: l for i, l in enumerate(label_list)} model.config.id2label = dict(enumerate(label_list))
# Map that sends B-Xxx label to its I-Xxx counterpart # Map that sends B-Xxx label to its I-Xxx counterpart
b_to_i_label = [] b_to_i_label = []
......
...@@ -442,7 +442,7 @@ def main(): ...@@ -442,7 +442,7 @@ def main():
# Set the correspondences label/ID inside the model config # Set the correspondences label/ID inside the model config
model.config.label2id = {l: i for i, l in enumerate(label_list)} model.config.label2id = {l: i for i, l in enumerate(label_list)}
model.config.id2label = {i: l for i, l in enumerate(label_list)} model.config.id2label = dict(enumerate(label_list))
# Map that sends B-Xxx label to its I-Xxx counterpart # Map that sends B-Xxx label to its I-Xxx counterpart
b_to_i_label = [] b_to_i_label = []
......
...@@ -294,11 +294,11 @@ def main(): ...@@ -294,11 +294,11 @@ def main():
if isinstance(features[label_column_name].feature, ClassLabel): if isinstance(features[label_column_name].feature, ClassLabel):
label_list = features[label_column_name].feature.names label_list = features[label_column_name].feature.names
# No need to convert the labels since they are already ints. # No need to convert the labels since they are already ints.
id2label = {k: v for k, v in enumerate(label_list)} id2label = dict(enumerate(label_list))
label2id = {v: k for k, v in enumerate(label_list)} label2id = {v: k for k, v in enumerate(label_list)}
else: else:
label_list = get_label_list(datasets["train"][label_column_name]) label_list = get_label_list(datasets["train"][label_column_name])
id2label = {k: v for k, v in enumerate(label_list)} id2label = dict(enumerate(label_list))
label2id = {v: k for k, v in enumerate(label_list)} label2id = {v: k for k, v in enumerate(label_list)}
num_labels = len(label_list) num_labels = len(label_list)
......
...@@ -360,7 +360,7 @@ class GenerativeQAModule(BaseTransformer): ...@@ -360,7 +360,7 @@ class GenerativeQAModule(BaseTransformer):
loss_tensors = self._step(batch) loss_tensors = self._step(batch)
logs = {name: loss for name, loss in zip(self.loss_names, loss_tensors)} logs = dict(zip(self.loss_names, loss_tensors))
# tokens per batch # tokens per batch
tgt_pad_token_id = ( tgt_pad_token_id = (
self.tokenizer.generator.pad_token_id self.tokenizer.generator.pad_token_id
...@@ -434,7 +434,7 @@ class GenerativeQAModule(BaseTransformer): ...@@ -434,7 +434,7 @@ class GenerativeQAModule(BaseTransformer):
target: List[str] = self.ids_to_clean_text(batch["decoder_input_ids"]) target: List[str] = self.ids_to_clean_text(batch["decoder_input_ids"])
# print(preds,target) # print(preds,target)
loss_tensors = self._step(batch) loss_tensors = self._step(batch)
base_metrics = {name: loss for name, loss in zip(self.loss_names, loss_tensors)} base_metrics = dict(zip(self.loss_names, loss_tensors))
gen_metrics: Dict = self.calc_generative_metrics(preds, target) gen_metrics: Dict = self.calc_generative_metrics(preds, target)
summ_len = np.mean(lmap(len, generated_ids)) summ_len = np.mean(lmap(len, generated_ids))
......
...@@ -321,7 +321,7 @@ class GenerativeQAModule(BaseTransformer): ...@@ -321,7 +321,7 @@ class GenerativeQAModule(BaseTransformer):
preds: List[str] = self.ids_to_clean_text(generated_ids) preds: List[str] = self.ids_to_clean_text(generated_ids)
target: List[str] = self.ids_to_clean_text(batch["decoder_input_ids"]) target: List[str] = self.ids_to_clean_text(batch["decoder_input_ids"])
loss_tensors = self._step(batch) loss_tensors = self._step(batch)
base_metrics = {name: loss for name, loss in zip(self.loss_names, loss_tensors)} base_metrics = dict(zip(self.loss_names, loss_tensors))
gen_metrics: Dict = self.calc_generative_metrics(preds, target) gen_metrics: Dict = self.calc_generative_metrics(preds, target)
summ_len = np.mean(lmap(len, generated_ids)) summ_len = np.mean(lmap(len, generated_ids))
......
...@@ -170,7 +170,7 @@ class SummarizationModule(BaseTransformer): ...@@ -170,7 +170,7 @@ class SummarizationModule(BaseTransformer):
def training_step(self, batch, batch_idx) -> Dict: def training_step(self, batch, batch_idx) -> Dict:
loss_tensors = self._step(batch) loss_tensors = self._step(batch)
logs = {name: loss for name, loss in zip(self.loss_names, loss_tensors)} logs = dict(zip(self.loss_names, loss_tensors))
# tokens per batch # tokens per batch
logs["tpb"] = batch["input_ids"].ne(self.pad).sum() + batch["labels"].ne(self.pad).sum() logs["tpb"] = batch["input_ids"].ne(self.pad).sum() + batch["labels"].ne(self.pad).sum()
logs["bs"] = batch["input_ids"].shape[0] logs["bs"] = batch["input_ids"].shape[0]
...@@ -225,7 +225,7 @@ class SummarizationModule(BaseTransformer): ...@@ -225,7 +225,7 @@ class SummarizationModule(BaseTransformer):
preds: List[str] = self.ids_to_clean_text(generated_ids) preds: List[str] = self.ids_to_clean_text(generated_ids)
target: List[str] = self.ids_to_clean_text(batch["labels"]) target: List[str] = self.ids_to_clean_text(batch["labels"])
loss_tensors = self._step(batch) loss_tensors = self._step(batch)
base_metrics = {name: loss for name, loss in zip(self.loss_names, loss_tensors)} base_metrics = dict(zip(self.loss_names, loss_tensors))
rouge: Dict = self.calc_generative_metrics(preds, target) rouge: Dict = self.calc_generative_metrics(preds, target)
summ_len = np.mean(lmap(len, generated_ids)) summ_len = np.mean(lmap(len, generated_ids))
base_metrics.update(gen_time=gen_time, gen_len=summ_len, preds=preds, target=target, **rouge) base_metrics.update(gen_time=gen_time, gen_len=summ_len, preds=preds, target=target, **rouge)
......
...@@ -303,7 +303,7 @@ def main(): ...@@ -303,7 +303,7 @@ def main():
student_args.student_name_or_path, num_labels=len(class_names) student_args.student_name_or_path, num_labels=len(class_names)
) )
tokenizer = AutoTokenizer.from_pretrained(student_args.student_name_or_path, use_fast=data_args.use_fast_tokenizer) tokenizer = AutoTokenizer.from_pretrained(student_args.student_name_or_path, use_fast=data_args.use_fast_tokenizer)
model.config.id2label = {i: label for i, label in enumerate(class_names)} model.config.id2label = dict(enumerate(class_names))
model.config.label2id = {label: i for i, label in enumerate(class_names)} model.config.label2id = {label: i for i, label in enumerate(class_names)}
# 4. train student on teacher predictions # 4. train student on teacher predictions
......
...@@ -610,7 +610,7 @@ class Benchmark(ABC): ...@@ -610,7 +610,7 @@ class Benchmark(ABC):
model_name: AutoConfig.from_pretrained(model_name) for model_name in self.args.model_names model_name: AutoConfig.from_pretrained(model_name) for model_name in self.args.model_names
} }
else: else:
self.config_dict = {model_name: config for model_name, config in zip(self.args.model_names, configs)} self.config_dict = dict(zip(self.args.model_names, configs))
warnings.warn( warnings.warn(
f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils" f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
......
...@@ -399,9 +399,9 @@ class TrainingSummary: ...@@ -399,9 +399,9 @@ class TrainingSummary:
dataset_metadata = _listify(self.dataset_metadata) dataset_metadata = _listify(self.dataset_metadata)
if len(dataset_args) < len(dataset_tags): if len(dataset_args) < len(dataset_tags):
dataset_args = dataset_args + [None] * (len(dataset_tags) - len(dataset_args)) dataset_args = dataset_args + [None] * (len(dataset_tags) - len(dataset_args))
dataset_mapping = {tag: name for tag, name in zip(dataset_tags, dataset_names)} dataset_mapping = dict(zip(dataset_tags, dataset_names))
dataset_arg_mapping = {tag: arg for tag, arg in zip(dataset_tags, dataset_args)} dataset_arg_mapping = dict(zip(dataset_tags, dataset_args))
dataset_metadata_mapping = {tag: metadata for tag, metadata in zip(dataset_tags, dataset_metadata)} dataset_metadata_mapping = dict(zip(dataset_tags, dataset_metadata))
task_mapping = { task_mapping = {
task: TASK_TAG_TO_NAME_MAPPING[task] for task in _listify(self.tasks) if task in TASK_TAG_TO_NAME_MAPPING task: TASK_TAG_TO_NAME_MAPPING[task] for task in _listify(self.tasks) if task in TASK_TAG_TO_NAME_MAPPING
......
...@@ -57,7 +57,7 @@ class EsmTokenizer(PreTrainedTokenizer): ...@@ -57,7 +57,7 @@ class EsmTokenizer(PreTrainedTokenizer):
def __init__(self, vocab_file, **kwargs): def __init__(self, vocab_file, **kwargs):
super().__init__(**kwargs) super().__init__(**kwargs)
self.all_tokens = load_vocab_file(vocab_file) self.all_tokens = load_vocab_file(vocab_file)
self._id_to_token = {ind: tok for ind, tok in enumerate(self.all_tokens)} self._id_to_token = dict(enumerate(self.all_tokens))
self._token_to_id = {tok: ind for ind, tok in enumerate(self.all_tokens)} self._token_to_id = {tok: ind for ind, tok in enumerate(self.all_tokens)}
self.unk_token = "<unk>" self.unk_token = "<unk>"
self.cls_token = "<cls>" self.cls_token = "<cls>"
......
...@@ -111,7 +111,7 @@ class OriginalMaskFormerConfigToOursConverter: ...@@ -111,7 +111,7 @@ class OriginalMaskFormerConfigToOursConverter:
swin = model.SWIN swin = model.SWIN
dataset_catalog = MetadataCatalog.get(original_config.DATASETS.TEST[0]) dataset_catalog = MetadataCatalog.get(original_config.DATASETS.TEST[0])
id2label = {idx: label for idx, label in enumerate(dataset_catalog.stuff_classes)} id2label = dict(enumerate(dataset_catalog.stuff_classes))
label2id = {label: idx for idx, label in id2label.items()} label2id = {label: idx for idx, label in id2label.items()}
config: MaskFormerConfig = MaskFormerConfig( config: MaskFormerConfig = MaskFormerConfig(
......
...@@ -122,7 +122,7 @@ class OriginalOneFormerConfigToOursConverter: ...@@ -122,7 +122,7 @@ class OriginalOneFormerConfigToOursConverter:
model = original_config.MODEL model = original_config.MODEL
dataset_catalog = MetadataCatalog.get(original_config.DATASETS.TEST_PANOPTIC[0]) dataset_catalog = MetadataCatalog.get(original_config.DATASETS.TEST_PANOPTIC[0])
id2label = {idx: label for idx, label in enumerate(dataset_catalog.stuff_classes)} id2label = dict(enumerate(dataset_catalog.stuff_classes))
label2id = {label: idx for idx, label in id2label.items()} label2id = {label: idx for idx, label in id2label.items()}
if is_swin: if is_swin:
......
...@@ -207,7 +207,7 @@ def create_vocab_dict(dict_path): ...@@ -207,7 +207,7 @@ def create_vocab_dict(dict_path):
"<unk>": 3, "<unk>": 3,
} }
vocab_dict.update({k: v for k, v in zip(words, range(4, num_words + 4))}) vocab_dict.update(dict(zip(words, range(4, num_words + 4))))
return vocab_dict return vocab_dict
......
...@@ -179,9 +179,7 @@ def export_pytorch( ...@@ -179,9 +179,7 @@ def export_pytorch(
f=output.as_posix(), f=output.as_posix(),
input_names=list(config.inputs.keys()), input_names=list(config.inputs.keys()),
output_names=onnx_outputs, output_names=onnx_outputs,
dynamic_axes={ dynamic_axes=dict(chain(config.inputs.items(), config.outputs.items())),
name: axes for name, axes in chain(config.inputs.items(), config.outputs.items())
},
do_constant_folding=True, do_constant_folding=True,
use_external_data_format=config.use_external_data_format(model.num_parameters()), use_external_data_format=config.use_external_data_format(model.num_parameters()),
enable_onnx_checker=True, enable_onnx_checker=True,
...@@ -208,7 +206,7 @@ def export_pytorch( ...@@ -208,7 +206,7 @@ def export_pytorch(
f=output.as_posix(), f=output.as_posix(),
input_names=list(config.inputs.keys()), input_names=list(config.inputs.keys()),
output_names=onnx_outputs, output_names=onnx_outputs,
dynamic_axes={name: axes for name, axes in chain(config.inputs.items(), config.outputs.items())}, dynamic_axes=dict(chain(config.inputs.items(), config.outputs.items())),
do_constant_folding=True, do_constant_folding=True,
opset_version=opset, opset_version=opset,
) )
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment