"test/git@developer.sourcefind.cn:OpenDAS/nni.git" did not exist on "b183c3d8321a258bb9e3a45ec7277c9083a9e391"
Unverified Commit 910faa3e authored by Phuc Van Phan's avatar Phuc Van Phan Committed by GitHub
Browse files

feat: adding num_proc to load_dataset (#26326)

* feat: adding num_proc to load_dataset

* feat: add add_num_proc for run_mlm_flax

* feat: add num_proc for bart and t5

* chorse: remove
parent 576cd45a
...@@ -531,6 +531,7 @@ def main(): ...@@ -531,6 +531,7 @@ def main():
data_args.dataset_config_name, data_args.dataset_config_name,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
token=model_args.token, token=model_args.token,
num_proc=data_args.preprocessing_num_workers,
) )
if "validation" not in datasets.keys(): if "validation" not in datasets.keys():
...@@ -540,6 +541,7 @@ def main(): ...@@ -540,6 +541,7 @@ def main():
split=f"train[:{data_args.validation_split_percentage}%]", split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
token=model_args.token, token=model_args.token,
num_proc=data_args.preprocessing_num_workers,
) )
datasets["train"] = load_dataset( datasets["train"] = load_dataset(
data_args.dataset_name, data_args.dataset_name,
...@@ -547,6 +549,7 @@ def main(): ...@@ -547,6 +549,7 @@ def main():
split=f"train[{data_args.validation_split_percentage}%:]", split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
token=model_args.token, token=model_args.token,
num_proc=data_args.preprocessing_num_workers,
) )
else: else:
data_files = {} data_files = {}
...@@ -562,6 +565,7 @@ def main(): ...@@ -562,6 +565,7 @@ def main():
data_files=data_files, data_files=data_files,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
token=model_args.token, token=model_args.token,
num_proc=data_args.preprocessing_num_workers,
) )
if "validation" not in datasets.keys(): if "validation" not in datasets.keys():
...@@ -571,6 +575,7 @@ def main(): ...@@ -571,6 +575,7 @@ def main():
split=f"train[:{data_args.validation_split_percentage}%]", split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
token=model_args.token, token=model_args.token,
num_proc=data_args.preprocessing_num_workers,
) )
datasets["train"] = load_dataset( datasets["train"] = load_dataset(
extension, extension,
...@@ -578,6 +583,7 @@ def main(): ...@@ -578,6 +583,7 @@ def main():
split=f"train[{data_args.validation_split_percentage}%:]", split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
token=model_args.token, token=model_args.token,
num_proc=data_args.preprocessing_num_workers,
) )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html. # https://huggingface.co/docs/datasets/loading_datasets.html.
......
...@@ -421,6 +421,7 @@ def main(): ...@@ -421,6 +421,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
keep_in_memory=False, keep_in_memory=False,
token=model_args.token, token=model_args.token,
num_proc=data_args.preprocessing_num_workers,
) )
if "validation" not in dataset.keys(): if "validation" not in dataset.keys():
...@@ -430,6 +431,7 @@ def main(): ...@@ -430,6 +431,7 @@ def main():
split=f"train[:{data_args.validation_split_percentage}%]", split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
token=model_args.token, token=model_args.token,
num_proc=data_args.preprocessing_num_workers,
) )
dataset["train"] = load_dataset( dataset["train"] = load_dataset(
data_args.dataset_name, data_args.dataset_name,
...@@ -437,6 +439,7 @@ def main(): ...@@ -437,6 +439,7 @@ def main():
split=f"train[{data_args.validation_split_percentage}%:]", split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
token=model_args.token, token=model_args.token,
num_proc=data_args.preprocessing_num_workers,
) )
else: else:
data_files = {} data_files = {}
...@@ -455,6 +458,7 @@ def main(): ...@@ -455,6 +458,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
**dataset_args, **dataset_args,
token=model_args.token, token=model_args.token,
num_proc=data_args.preprocessing_num_workers,
) )
if "validation" not in dataset.keys(): if "validation" not in dataset.keys():
...@@ -465,6 +469,7 @@ def main(): ...@@ -465,6 +469,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
**dataset_args, **dataset_args,
token=model_args.token, token=model_args.token,
num_proc=data_args.preprocessing_num_workers,
) )
dataset["train"] = load_dataset( dataset["train"] = load_dataset(
extension, extension,
...@@ -473,6 +478,7 @@ def main(): ...@@ -473,6 +478,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
**dataset_args, **dataset_args,
token=model_args.token, token=model_args.token,
num_proc=data_args.preprocessing_num_workers,
) )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html. # https://huggingface.co/docs/datasets/loading_datasets.html.
......
...@@ -458,6 +458,7 @@ def main(): ...@@ -458,6 +458,7 @@ def main():
data_args.dataset_config_name, data_args.dataset_config_name,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
token=model_args.token, token=model_args.token,
num_proc=data_args.preprocessing_num_workers,
) )
if "validation" not in datasets.keys(): if "validation" not in datasets.keys():
...@@ -467,6 +468,7 @@ def main(): ...@@ -467,6 +468,7 @@ def main():
split=f"train[:{data_args.validation_split_percentage}%]", split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
token=model_args.token, token=model_args.token,
num_proc=data_args.preprocessing_num_workers,
) )
datasets["train"] = load_dataset( datasets["train"] = load_dataset(
data_args.dataset_name, data_args.dataset_name,
...@@ -474,6 +476,7 @@ def main(): ...@@ -474,6 +476,7 @@ def main():
split=f"train[{data_args.validation_split_percentage}%:]", split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
token=model_args.token, token=model_args.token,
num_proc=data_args.preprocessing_num_workers,
) )
else: else:
data_files = {} data_files = {}
...@@ -489,6 +492,7 @@ def main(): ...@@ -489,6 +492,7 @@ def main():
data_files=data_files, data_files=data_files,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
token=model_args.token, token=model_args.token,
num_proc=data_args.preprocessing_num_workers,
) )
if "validation" not in datasets.keys(): if "validation" not in datasets.keys():
...@@ -498,6 +502,7 @@ def main(): ...@@ -498,6 +502,7 @@ def main():
split=f"train[:{data_args.validation_split_percentage}%]", split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
token=model_args.token, token=model_args.token,
num_proc=data_args.preprocessing_num_workers,
) )
datasets["train"] = load_dataset( datasets["train"] = load_dataset(
extension, extension,
...@@ -505,6 +510,7 @@ def main(): ...@@ -505,6 +510,7 @@ def main():
split=f"train[{data_args.validation_split_percentage}%:]", split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
token=model_args.token, token=model_args.token,
num_proc=data_args.preprocessing_num_workers,
) )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html. # https://huggingface.co/docs/datasets/loading_datasets.html.
......
...@@ -572,6 +572,7 @@ def main(): ...@@ -572,6 +572,7 @@ def main():
data_args.dataset_config_name, data_args.dataset_config_name,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
token=model_args.token, token=model_args.token,
num_proc=data_args.preprocessing_num_workers,
) )
if "validation" not in datasets.keys(): if "validation" not in datasets.keys():
...@@ -581,6 +582,7 @@ def main(): ...@@ -581,6 +582,7 @@ def main():
split=f"train[:{data_args.validation_split_percentage}%]", split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
token=model_args.token, token=model_args.token,
num_proc=data_args.preprocessing_num_workers,
) )
datasets["train"] = load_dataset( datasets["train"] = load_dataset(
data_args.dataset_name, data_args.dataset_name,
...@@ -588,6 +590,7 @@ def main(): ...@@ -588,6 +590,7 @@ def main():
split=f"train[{data_args.validation_split_percentage}%:]", split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
token=model_args.token, token=model_args.token,
num_proc=data_args.preprocessing_num_workers,
) )
else: else:
data_files = {} data_files = {}
...@@ -603,6 +606,7 @@ def main(): ...@@ -603,6 +606,7 @@ def main():
data_files=data_files, data_files=data_files,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
token=model_args.token, token=model_args.token,
num_proc=data_args.preprocessing_num_workers,
) )
if "validation" not in datasets.keys(): if "validation" not in datasets.keys():
...@@ -612,6 +616,7 @@ def main(): ...@@ -612,6 +616,7 @@ def main():
split=f"train[:{data_args.validation_split_percentage}%]", split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
token=model_args.token, token=model_args.token,
num_proc=data_args.preprocessing_num_workers,
) )
datasets["train"] = load_dataset( datasets["train"] = load_dataset(
extension, extension,
...@@ -619,6 +624,7 @@ def main(): ...@@ -619,6 +624,7 @@ def main():
split=f"train[{data_args.validation_split_percentage}%:]", split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
token=model_args.token, token=model_args.token,
num_proc=data_args.preprocessing_num_workers,
) )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html. # https://huggingface.co/docs/datasets/loading_datasets.html.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment