Unverified Commit 319d840b authored by Stefan Schweter's avatar Stefan Schweter Committed by GitHub
Browse files

examples: add keep_linebreaks option to CLM examples (#13150)

* examples: add keep_linebreaks option to text dataset loader for all CLM examples

* examples: introduce new keep_linebreaks option as data argument in CLM examples
parent 45a8eb66
...@@ -156,6 +156,9 @@ class DataTrainingArguments: ...@@ -156,6 +156,9 @@ class DataTrainingArguments:
default=None, default=None,
metadata={"help": "The number of processes to use for the preprocessing."}, metadata={"help": "The number of processes to use for the preprocessing."},
) )
keep_linebreaks: bool = field(
default=True, metadata={"help": "Whether to keep line breaks when using CSV/JSON/TXT files or not."}
)
def __post_init__(self): def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None: if self.dataset_name is None and self.train_file is None and self.validation_file is None:
...@@ -314,12 +317,14 @@ def main(): ...@@ -314,12 +317,14 @@ def main():
if "validation" not in dataset.keys(): if "validation" not in dataset.keys():
dataset["validation"] = load_dataset( dataset["validation"] = load_dataset(
extension, extension,
keep_linebreaks=data_args.keep_linebreaks,
data_files=data_files, data_files=data_files,
split=f"train[:{data_args.validation_split_percentage}%]", split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
) )
dataset["train"] = load_dataset( dataset["train"] = load_dataset(
extension, extension,
keep_linebreaks=data_args.keep_linebreaks,
data_files=data_files, data_files=data_files,
split=f"train[{data_args.validation_split_percentage}%:]", split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
......
...@@ -172,6 +172,9 @@ class DataTrainingArguments: ...@@ -172,6 +172,9 @@ class DataTrainingArguments:
default=None, default=None,
metadata={"help": "The number of processes to use for the preprocessing."}, metadata={"help": "The number of processes to use for the preprocessing."},
) )
keep_linebreaks: bool = field(
default=True, metadata={"help": "Whether to keep line breaks when using CSV/JSON/TXT files or not."}
)
def __post_init__(self): def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None: if self.dataset_name is None and self.train_file is None and self.validation_file is None:
...@@ -282,12 +285,14 @@ def main(): ...@@ -282,12 +285,14 @@ def main():
if "validation" not in raw_datasets.keys(): if "validation" not in raw_datasets.keys():
raw_datasets["validation"] = load_dataset( raw_datasets["validation"] = load_dataset(
extension, extension,
keep_linebreaks=data_args.keep_linebreaks,
data_files=data_files, data_files=data_files,
split=f"train[:{data_args.validation_split_percentage}%]", split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
) )
raw_datasets["train"] = load_dataset( raw_datasets["train"] = load_dataset(
extension, extension,
keep_linebreaks=data_args.keep_linebreaks,
data_files=data_files, data_files=data_files,
split=f"train[{data_args.validation_split_percentage}%:]", split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
......
...@@ -173,6 +173,9 @@ def parse_args(): ...@@ -173,6 +173,9 @@ def parse_args():
parser.add_argument( parser.add_argument(
"--overwrite_cache", type=bool, default=False, help="Overwrite the cached training and evaluation sets" "--overwrite_cache", type=bool, default=False, help="Overwrite the cached training and evaluation sets"
) )
parser.add_argument(
"--no_keep_linebreaks", action="store_true", help="Do not keep line breaks when using CSV/JSON/TXT files."
)
args = parser.parse_args() args = parser.parse_args()
...@@ -257,11 +260,13 @@ def main(): ...@@ -257,11 +260,13 @@ def main():
if "validation" not in raw_datasets.keys(): if "validation" not in raw_datasets.keys():
raw_datasets["validation"] = load_dataset( raw_datasets["validation"] = load_dataset(
extension, extension,
keep_linebreaks=not args.no_keep_linebreaks,
data_files=data_files, data_files=data_files,
split=f"train[:{args.validation_split_percentage}%]", split=f"train[:{args.validation_split_percentage}%]",
) )
raw_datasets["train"] = load_dataset( raw_datasets["train"] = load_dataset(
extension, extension,
keep_linebreaks=not args.no_keep_linebreaks,
data_files=data_files, data_files=data_files,
split=f"train[{args.validation_split_percentage}%:]", split=f"train[{args.validation_split_percentage}%:]",
) )
......
...@@ -186,6 +186,9 @@ class DataTrainingArguments: ...@@ -186,6 +186,9 @@ class DataTrainingArguments:
"value if set." "value if set."
}, },
) )
keep_linebreaks: bool = field(
default=True, metadata={"help": "Whether to keep line breaks when using CSV/JSON/TXT files or not."}
)
def __post_init__(self): def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None: if self.dataset_name is None and self.train_file is None and self.validation_file is None:
...@@ -325,7 +328,7 @@ def main(): ...@@ -325,7 +328,7 @@ def main():
extension = data_args.train_file.split(".")[-1] extension = data_args.train_file.split(".")[-1]
if extension == "txt": if extension == "txt":
extension = "text" extension = "text"
raw_datasets = load_dataset(extension, data_files=data_files) raw_datasets = load_dataset(extension, keep_linebreaks=data_args.keep_linebreaks, data_files=data_files)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html. # https://huggingface.co/docs/datasets/loading_datasets.html.
# endregion # endregion
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment