Unverified Commit 24a85cca authored by Karim Foda's avatar Karim Foda Committed by GitHub
Browse files

Add use_auth to load_datasets for private datasets to PT and TF examples (#16521)

* fix formatting and remove use_auth

* Add use_auth_token to Flax examples
parent b9a768b3
...@@ -403,7 +403,10 @@ def main(): ...@@ -403,7 +403,10 @@ def main():
for dataset_config_name, train_split_name in zip(args.dataset_config_names, args.dataset_split_names): for dataset_config_name, train_split_name in zip(args.dataset_config_names, args.dataset_split_names):
# load dataset # load dataset
dataset_split = load_dataset( dataset_split = load_dataset(
args.dataset_name, dataset_config_name, split=train_split_name, cache_dir=args.cache_dir args.dataset_name,
dataset_config_name,
split=train_split_name,
cache_dir=args.cache_dir,
) )
datasets_splits.append(dataset_split) datasets_splits.append(dataset_split)
......
...@@ -278,12 +278,18 @@ def main(): ...@@ -278,12 +278,18 @@ def main():
if training_args.do_train: if training_args.do_train:
raw_datasets["train"] = load_dataset( raw_datasets["train"] = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=data_args.train_split_name data_args.dataset_name,
data_args.dataset_config_name,
split=data_args.train_split_name,
use_auth_token=True if model_args.use_auth_token else None,
) )
if training_args.do_eval: if training_args.do_eval:
raw_datasets["eval"] = load_dataset( raw_datasets["eval"] = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=data_args.eval_split_name data_args.dataset_name,
data_args.dataset_config_name,
split=data_args.eval_split_name,
use_auth_token=True if model_args.use_auth_token else None,
) )
if data_args.audio_column_name not in next(iter(raw_datasets.values())).column_names: if data_args.audio_column_name not in next(iter(raw_datasets.values())).column_names:
......
...@@ -341,7 +341,10 @@ def main(): ...@@ -341,7 +341,10 @@ def main():
if data_args.dataset_name is not None: if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub. # Downloading and loading a dataset from the hub.
raw_datasets = load_dataset( raw_datasets = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
else: else:
data_files = {} data_files = {}
...@@ -354,7 +357,12 @@ def main(): ...@@ -354,7 +357,12 @@ def main():
if data_args.test_file is not None: if data_args.test_file is not None:
data_files["test"] = data_args.test_file data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1] extension = data_args.test_file.split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) raw_datasets = load_dataset(
extension,
data_files=data_files,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html. # https://huggingface.co/docs/datasets/loading_datasets.html.
......
...@@ -252,11 +252,19 @@ def main(): ...@@ -252,11 +252,19 @@ def main():
# download the dataset. # download the dataset.
if data_args.task_name is not None: if data_args.task_name is not None:
# Downloading and loading a dataset from the hub. # Downloading and loading a dataset from the hub.
raw_datasets = load_dataset("glue", data_args.task_name, cache_dir=model_args.cache_dir) raw_datasets = load_dataset(
"glue",
data_args.task_name,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
elif data_args.dataset_name is not None: elif data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub. # Downloading and loading a dataset from the hub.
raw_datasets = load_dataset( raw_datasets = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
else: else:
# Loading a dataset from your local files. # Loading a dataset from your local files.
...@@ -281,10 +289,20 @@ def main(): ...@@ -281,10 +289,20 @@ def main():
if data_args.train_file.endswith(".csv"): if data_args.train_file.endswith(".csv"):
# Loading a dataset from local csv files # Loading a dataset from local csv files
raw_datasets = load_dataset("csv", data_files=data_files, cache_dir=model_args.cache_dir) raw_datasets = load_dataset(
"csv",
data_files=data_files,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
else: else:
# Loading a dataset from local json files # Loading a dataset from local json files
raw_datasets = load_dataset("json", data_files=data_files, cache_dir=model_args.cache_dir) raw_datasets = load_dataset(
"json",
data_files=data_files,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
# See more about loading any type of standard or custom dataset at # See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html. # https://huggingface.co/docs/datasets/loading_datasets.html.
......
...@@ -213,19 +213,41 @@ def main(): ...@@ -213,19 +213,41 @@ def main():
# Downloading and loading xnli dataset from the hub. # Downloading and loading xnli dataset from the hub.
if training_args.do_train: if training_args.do_train:
if model_args.train_language is None: if model_args.train_language is None:
train_dataset = load_dataset("xnli", model_args.language, split="train", cache_dir=model_args.cache_dir) train_dataset = load_dataset(
"xnli",
model_args.language,
split="train",
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
else: else:
train_dataset = load_dataset( train_dataset = load_dataset(
"xnli", model_args.train_language, split="train", cache_dir=model_args.cache_dir "xnli",
model_args.train_language,
split="train",
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
label_list = train_dataset.features["label"].names label_list = train_dataset.features["label"].names
if training_args.do_eval: if training_args.do_eval:
eval_dataset = load_dataset("xnli", model_args.language, split="validation", cache_dir=model_args.cache_dir) eval_dataset = load_dataset(
"xnli",
model_args.language,
split="validation",
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
label_list = eval_dataset.features["label"].names label_list = eval_dataset.features["label"].names
if training_args.do_predict: if training_args.do_predict:
predict_dataset = load_dataset("xnli", model_args.language, split="test", cache_dir=model_args.cache_dir) predict_dataset = load_dataset(
"xnli",
model_args.language,
split="test",
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
label_list = predict_dataset.features["label"].names label_list = predict_dataset.features["label"].names
# Labels # Labels
......
...@@ -249,7 +249,10 @@ def main(): ...@@ -249,7 +249,10 @@ def main():
if data_args.dataset_name is not None: if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub. # Downloading and loading a dataset from the hub.
raw_datasets = load_dataset( raw_datasets = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
else: else:
data_files = {} data_files = {}
......
...@@ -306,7 +306,10 @@ def main(): ...@@ -306,7 +306,10 @@ def main():
if data_args.dataset_name is not None: if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub. # Downloading and loading a dataset from the hub.
raw_datasets = load_dataset( raw_datasets = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
else: else:
data_files = {} data_files = {}
...@@ -319,7 +322,12 @@ def main(): ...@@ -319,7 +322,12 @@ def main():
if data_args.test_file is not None: if data_args.test_file is not None:
data_files["test"] = data_args.test_file data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1] extension = data_args.test_file.split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) raw_datasets = load_dataset(
extension,
data_files=data_files,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html. # https://huggingface.co/docs/datasets/loading_datasets.html.
......
...@@ -280,17 +280,23 @@ def main(): ...@@ -280,17 +280,23 @@ def main():
# download the dataset. # download the dataset.
if data_args.dataset_name is not None: if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub. # Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name) raw_datasets = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
use_auth_token=True if model_args.use_auth_token else None,
)
if "validation" not in raw_datasets.keys(): if "validation" not in raw_datasets.keys():
raw_datasets["validation"] = load_dataset( raw_datasets["validation"] = load_dataset(
data_args.dataset_name, data_args.dataset_name,
data_args.dataset_config_name, data_args.dataset_config_name,
split=f"train[:{data_args.validation_split_percentage}%]", split=f"train[:{data_args.validation_split_percentage}%]",
use_auth_token=True if model_args.use_auth_token else None,
) )
raw_datasets["train"] = load_dataset( raw_datasets["train"] = load_dataset(
data_args.dataset_name, data_args.dataset_name,
data_args.dataset_config_name, data_args.dataset_config_name,
split=f"train[{data_args.validation_split_percentage}%:]", split=f"train[{data_args.validation_split_percentage}%:]",
use_auth_token=True if model_args.use_auth_token else None,
) )
else: else:
data_files = {} data_files = {}
...@@ -303,7 +309,12 @@ def main(): ...@@ -303,7 +309,12 @@ def main():
if extension == "txt": if extension == "txt":
extension = "text" extension = "text"
dataset_args["keep_linebreaks"] = data_args.keep_linebreaks dataset_args["keep_linebreaks"] = data_args.keep_linebreaks
raw_datasets = load_dataset(extension, data_files=data_files, **dataset_args) raw_datasets = load_dataset(
extension,
data_files=data_files,
use_auth_token=True if model_args.use_auth_token else None,
**dataset_args,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html. # https://huggingface.co/docs/datasets/loading_datasets.html.
# endregion # endregion
......
...@@ -292,17 +292,23 @@ def main(): ...@@ -292,17 +292,23 @@ def main():
# download the dataset. # download the dataset.
if data_args.dataset_name is not None: if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub. # Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name) raw_datasets = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
use_auth_token=True if model_args.use_auth_token else None,
)
if "validation" not in raw_datasets.keys(): if "validation" not in raw_datasets.keys():
raw_datasets["validation"] = load_dataset( raw_datasets["validation"] = load_dataset(
data_args.dataset_name, data_args.dataset_name,
data_args.dataset_config_name, data_args.dataset_config_name,
split=f"train[:{data_args.validation_split_percentage}%]", split=f"train[:{data_args.validation_split_percentage}%]",
use_auth_token=True if model_args.use_auth_token else None,
) )
raw_datasets["train"] = load_dataset( raw_datasets["train"] = load_dataset(
data_args.dataset_name, data_args.dataset_name,
data_args.dataset_config_name, data_args.dataset_config_name,
split=f"train[{data_args.validation_split_percentage}%:]", split=f"train[{data_args.validation_split_percentage}%:]",
use_auth_token=True if model_args.use_auth_token else None,
) )
else: else:
data_files = {} data_files = {}
...@@ -313,7 +319,11 @@ def main(): ...@@ -313,7 +319,11 @@ def main():
extension = data_args.train_file.split(".")[-1] extension = data_args.train_file.split(".")[-1]
if extension == "txt": if extension == "txt":
extension = "text" extension = "text"
raw_datasets = load_dataset(extension, data_files=data_files) raw_datasets = load_dataset(
extension,
data_files=data_files,
use_auth_token=True if model_args.use_auth_token else None,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html. # https://huggingface.co/docs/datasets/loading_datasets.html.
......
...@@ -290,10 +290,20 @@ def main(): ...@@ -290,10 +290,20 @@ def main():
if data_args.validation_file is not None: if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file data_files["validation"] = data_args.validation_file
extension = data_args.train_file.split(".")[-1] extension = data_args.train_file.split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) raw_datasets = load_dataset(
extension,
data_files=data_files,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
else: else:
# Downloading and loading the swag dataset from the hub. # Downloading and loading the swag dataset from the hub.
raw_datasets = load_dataset("swag", "regular", cache_dir=model_args.cache_dir) raw_datasets = load_dataset(
"swag",
"regular",
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html. # https://huggingface.co/docs/datasets/loading_datasets.html.
......
...@@ -278,7 +278,12 @@ def main(): ...@@ -278,7 +278,12 @@ def main():
# download the dataset. # download the dataset.
if data_args.dataset_name is not None: if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub. # Downloading and loading a dataset from the hub.
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir) datasets = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
else: else:
data_files = {} data_files = {}
if data_args.train_file is not None: if data_args.train_file is not None:
...@@ -291,7 +296,13 @@ def main(): ...@@ -291,7 +296,13 @@ def main():
if data_args.test_file is not None: if data_args.test_file is not None:
data_files["test"] = data_args.test_file data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1] extension = data_args.test_file.split(".")[-1]
datasets = load_dataset(extension, data_files=data_files, field="data", cache_dir=model_args.cache_dir) datasets = load_dataset(
extension,
data_files=data_files,
field="data",
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html. # https://huggingface.co/docs/datasets/loading_datasets.html.
# endregion # endregion
......
...@@ -391,7 +391,10 @@ def main(): ...@@ -391,7 +391,10 @@ def main():
if data_args.dataset_name is not None: if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub. # Downloading and loading a dataset from the hub.
raw_datasets = load_dataset( raw_datasets = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
else: else:
data_files = {} data_files = {}
...@@ -404,7 +407,12 @@ def main(): ...@@ -404,7 +407,12 @@ def main():
if data_args.test_file is not None: if data_args.test_file is not None:
data_files["test"] = data_args.test_file data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1] extension = data_args.test_file.split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) raw_datasets = load_dataset(
extension,
data_files=data_files,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html. # https://huggingface.co/docs/datasets/loading_datasets.html.
# endregion # endregion
......
...@@ -236,7 +236,12 @@ def main(): ...@@ -236,7 +236,12 @@ def main():
# Downloading and loading a dataset from the hub. In distributed training, the load_dataset function guarantee # Downloading and loading a dataset from the hub. In distributed training, the load_dataset function guarantee
# that only one local process can concurrently download the dataset. # that only one local process can concurrently download the dataset.
datasets = load_dataset("glue", data_args.task_name, cache_dir=model_args.cache_dir) datasets = load_dataset(
"glue",
data_args.task_name,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
# See more about loading any type of standard or custom dataset at # See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html. # https://huggingface.co/docs/datasets/loading_datasets.html.
......
...@@ -236,7 +236,12 @@ def main(): ...@@ -236,7 +236,12 @@ def main():
if data_args.input_file_extension == "csv": if data_args.input_file_extension == "csv":
# Loading a dataset from local csv files # Loading a dataset from local csv files
datasets = load_dataset("csv", data_files=data_files, cache_dir=model_args.cache_dir) datasets = load_dataset(
"csv",
data_files=data_files,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
else: else:
# Loading a dataset from local json files # Loading a dataset from local json files
datasets = load_dataset("json", data_files=data_files, cache_dir=model_args.cache_dir) datasets = load_dataset("json", data_files=data_files, cache_dir=model_args.cache_dir)
......
...@@ -266,7 +266,11 @@ def main(): ...@@ -266,7 +266,11 @@ def main():
# download the dataset. # download the dataset.
if data_args.dataset_name is not None: if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub. # Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name) raw_datasets = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
use_auth_token=True if model_args.use_auth_token else None,
)
else: else:
data_files = {} data_files = {}
if data_args.train_file is not None: if data_args.train_file is not None:
...@@ -274,7 +278,11 @@ def main(): ...@@ -274,7 +278,11 @@ def main():
if data_args.validation_file is not None: if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file data_files["validation"] = data_args.validation_file
extension = data_args.train_file.split(".")[-1] extension = data_args.train_file.split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files) raw_datasets = load_dataset(
extension,
data_files=data_files,
use_auth_token=True if model_args.use_auth_token else None,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html. # https://huggingface.co/docs/datasets/loading_datasets.html.
......
...@@ -347,7 +347,10 @@ def main(): ...@@ -347,7 +347,10 @@ def main():
if data_args.dataset_name is not None: if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub. # Downloading and loading a dataset from the hub.
raw_datasets = load_dataset( raw_datasets = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
else: else:
data_files = {} data_files = {}
...@@ -357,7 +360,12 @@ def main(): ...@@ -357,7 +360,12 @@ def main():
if data_args.validation_file is not None: if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file data_files["validation"] = data_args.validation_file
extension = data_args.validation_file.split(".")[-1] extension = data_args.validation_file.split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) raw_datasets = load_dataset(
extension,
data_files=data_files,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html. # https://huggingface.co/docs/datasets/loading_datasets.html.
# endregion # endregion
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment