"git@developer.sourcefind.cn:chenpangpang/transformers.git" did not exist on "87677fcc4dfda7ee9e0b5609344b46d6e3ccd227"
Unverified Commit 24a85cca authored by Karim Foda's avatar Karim Foda Committed by GitHub
Browse files

Add use_auth to load_datasets for private datasets to PT and TF examples (#16521)

* fix formatting and remove use_auth

* Add use_auth_token to Flax examples
parent b9a768b3
...@@ -178,6 +178,13 @@ class ModelArguments: ...@@ -178,6 +178,13 @@ class ModelArguments:
"help": "Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`." "help": "Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`."
}, },
) )
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
@dataclass @dataclass
...@@ -418,6 +425,7 @@ def main(): ...@@ -418,6 +425,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
keep_in_memory=False, keep_in_memory=False,
data_dir=data_args.data_dir, data_dir=data_args.data_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
else: else:
data_files = {} data_files = {}
...@@ -430,7 +438,12 @@ def main(): ...@@ -430,7 +438,12 @@ def main():
if data_args.test_file is not None: if data_args.test_file is not None:
data_files["test"] = data_args.test_file data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1] extension = data_args.test_file.split(".")[-1]
dataset = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) dataset = load_dataset(
extension,
data_files=data_files,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html. # https://huggingface.co/docs/datasets/loading_datasets.html.
...@@ -439,12 +452,18 @@ def main(): ...@@ -439,12 +452,18 @@ def main():
model_args.model_name_or_path, model_args.model_name_or_path,
seed=training_args.seed, seed=training_args.seed,
dtype=getattr(jnp, model_args.dtype), dtype=getattr(jnp, model_args.dtype),
use_auth_token=True if model_args.use_auth_token else None,
) )
feature_extractor = AutoFeatureExtractor.from_pretrained( feature_extractor = AutoFeatureExtractor.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
use_auth_token=True if model_args.use_auth_token else None,
) )
tokenizer.pad_token = tokenizer.convert_ids_to_tokens(model.config.pad_token_id) tokenizer.pad_token = tokenizer.convert_ids_to_tokens(model.config.pad_token_id)
......
...@@ -165,6 +165,13 @@ class ModelArguments: ...@@ -165,6 +165,13 @@ class ModelArguments:
"help": "Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`." "help": "Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`."
}, },
) )
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
@dataclass @dataclass
...@@ -363,7 +370,11 @@ def main(): ...@@ -363,7 +370,11 @@ def main():
if data_args.dataset_name is not None: if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub. # Downloading and loading a dataset from the hub.
dataset = load_dataset( dataset = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, keep_in_memory=False data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
keep_in_memory=False,
use_auth_token=True if model_args.use_auth_token else None,
) )
if "validation" not in dataset.keys(): if "validation" not in dataset.keys():
...@@ -372,12 +383,14 @@ def main(): ...@@ -372,12 +383,14 @@ def main():
data_args.dataset_config_name, data_args.dataset_config_name,
split=f"train[:{data_args.validation_split_percentage}%]", split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
dataset["train"] = load_dataset( dataset["train"] = load_dataset(
data_args.dataset_name, data_args.dataset_name,
data_args.dataset_config_name, data_args.dataset_config_name,
split=f"train[{data_args.validation_split_percentage}%:]", split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
else: else:
data_files = {} data_files = {}
...@@ -390,7 +403,13 @@ def main(): ...@@ -390,7 +403,13 @@ def main():
if extension == "txt": if extension == "txt":
extension = "text" extension = "text"
dataset_args["keep_linebreaks"] = data_args.keep_linebreaks dataset_args["keep_linebreaks"] = data_args.keep_linebreaks
dataset = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir, **dataset_args) dataset = load_dataset(
extension,
data_files=data_files,
cache_dir=model_args.cache_dir,
**dataset_args,
use_auth_token=True if model_args.use_auth_token else None,
)
if "validation" not in dataset.keys(): if "validation" not in dataset.keys():
dataset["validation"] = load_dataset( dataset["validation"] = load_dataset(
...@@ -399,6 +418,7 @@ def main(): ...@@ -399,6 +418,7 @@ def main():
split=f"train[:{data_args.validation_split_percentage}%]", split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
**dataset_args, **dataset_args,
use_auth_token=True if model_args.use_auth_token else None,
) )
dataset["train"] = load_dataset( dataset["train"] = load_dataset(
extension, extension,
...@@ -406,6 +426,7 @@ def main(): ...@@ -406,6 +426,7 @@ def main():
split=f"train[{data_args.validation_split_percentage}%:]", split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
**dataset_args, **dataset_args,
use_auth_token=True if model_args.use_auth_token else None,
) )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html. # https://huggingface.co/docs/datasets/loading_datasets.html.
...@@ -416,20 +437,34 @@ def main(): ...@@ -416,20 +437,34 @@ def main():
# The .from_pretrained methods guarantee that only one local process can concurrently # The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab. # download model & vocab.
if model_args.config_name: if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir) config = AutoConfig.from_pretrained(
model_args.config_name,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
elif model_args.model_name_or_path: elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir) config = AutoConfig.from_pretrained(
model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
else: else:
config = CONFIG_MAPPING[model_args.model_type]() config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.") logger.warning("You are instantiating a new config instance from scratch.")
if model_args.tokenizer_name: if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer model_args.tokenizer_name,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
use_auth_token=True if model_args.use_auth_token else None,
) )
elif model_args.model_name_or_path: elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
use_auth_token=True if model_args.use_auth_token else None,
) )
else: else:
raise ValueError( raise ValueError(
...@@ -439,11 +474,18 @@ def main(): ...@@ -439,11 +474,18 @@ def main():
if model_args.model_name_or_path: if model_args.model_name_or_path:
model = FlaxAutoModelForCausalLM.from_pretrained( model = FlaxAutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path, config=config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype) model_args.model_name_or_path,
config=config,
seed=training_args.seed,
dtype=getattr(jnp, model_args.dtype),
use_auth_token=True if model_args.use_auth_token else None,
) )
else: else:
model = FlaxAutoModelForCausalLM.from_config( model = FlaxAutoModelForCausalLM.from_config(
config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype) config,
seed=training_args.seed,
dtype=getattr(jnp, model_args.dtype),
use_auth_token=True if model_args.use_auth_token else None,
) )
# Preprocessing the datasets. # Preprocessing the datasets.
......
...@@ -163,6 +163,13 @@ class ModelArguments: ...@@ -163,6 +163,13 @@ class ModelArguments:
"help": "Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`." "help": "Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`."
}, },
) )
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
@dataclass @dataclass
...@@ -396,7 +403,12 @@ def main(): ...@@ -396,7 +403,12 @@ def main():
# download the dataset. # download the dataset.
if data_args.dataset_name is not None: if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub. # Downloading and loading a dataset from the hub.
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir) datasets = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
if "validation" not in datasets.keys(): if "validation" not in datasets.keys():
datasets["validation"] = load_dataset( datasets["validation"] = load_dataset(
...@@ -404,12 +416,14 @@ def main(): ...@@ -404,12 +416,14 @@ def main():
data_args.dataset_config_name, data_args.dataset_config_name,
split=f"train[:{data_args.validation_split_percentage}%]", split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
datasets["train"] = load_dataset( datasets["train"] = load_dataset(
data_args.dataset_name, data_args.dataset_name,
data_args.dataset_config_name, data_args.dataset_config_name,
split=f"train[{data_args.validation_split_percentage}%:]", split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
else: else:
data_files = {} data_files = {}
...@@ -420,7 +434,12 @@ def main(): ...@@ -420,7 +434,12 @@ def main():
extension = data_args.train_file.split(".")[-1] extension = data_args.train_file.split(".")[-1]
if extension == "txt": if extension == "txt":
extension = "text" extension = "text"
datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) datasets = load_dataset(
extension,
data_files=data_files,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
if "validation" not in datasets.keys(): if "validation" not in datasets.keys():
datasets["validation"] = load_dataset( datasets["validation"] = load_dataset(
...@@ -428,12 +447,14 @@ def main(): ...@@ -428,12 +447,14 @@ def main():
data_files=data_files, data_files=data_files,
split=f"train[:{data_args.validation_split_percentage}%]", split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
datasets["train"] = load_dataset( datasets["train"] = load_dataset(
extension, extension,
data_files=data_files, data_files=data_files,
split=f"train[{data_args.validation_split_percentage}%:]", split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html. # https://huggingface.co/docs/datasets/loading_datasets.html.
...@@ -444,20 +465,34 @@ def main(): ...@@ -444,20 +465,34 @@ def main():
# The .from_pretrained methods guarantee that only one local process can concurrently # The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab. # download model & vocab.
if model_args.config_name: if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir) config = AutoConfig.from_pretrained(
model_args.config_name,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
elif model_args.model_name_or_path: elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir) config = AutoConfig.from_pretrained(
model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
else: else:
config = CONFIG_MAPPING[model_args.model_type]() config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.") logger.warning("You are instantiating a new config instance from scratch.")
if model_args.tokenizer_name: if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer model_args.tokenizer_name,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
use_auth_token=True if model_args.use_auth_token else None,
) )
elif model_args.model_name_or_path: elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
use_auth_token=True if model_args.use_auth_token else None,
) )
else: else:
raise ValueError( raise ValueError(
...@@ -572,11 +607,18 @@ def main(): ...@@ -572,11 +607,18 @@ def main():
if model_args.model_name_or_path: if model_args.model_name_or_path:
model = FlaxAutoModelForMaskedLM.from_pretrained( model = FlaxAutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path, config=config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype) model_args.model_name_or_path,
config=config,
seed=training_args.seed,
dtype=getattr(jnp, model_args.dtype),
use_auth_token=True if model_args.use_auth_token else None,
) )
else: else:
model = FlaxAutoModelForMaskedLM.from_config( model = FlaxAutoModelForMaskedLM.from_config(
config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype) config,
seed=training_args.seed,
dtype=getattr(jnp, model_args.dtype),
use_auth_token=True if model_args.use_auth_token else None,
) )
# Store some constant # Store some constant
......
...@@ -162,6 +162,13 @@ class ModelArguments: ...@@ -162,6 +162,13 @@ class ModelArguments:
"help": "Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`." "help": "Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`."
}, },
) )
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
@dataclass @dataclass
...@@ -525,7 +532,12 @@ def main(): ...@@ -525,7 +532,12 @@ def main():
# 'text' is found. You can easily tweak this behavior (see below). # 'text' is found. You can easily tweak this behavior (see below).
if data_args.dataset_name is not None: if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub. # Downloading and loading a dataset from the hub.
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir) datasets = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
if "validation" not in datasets.keys(): if "validation" not in datasets.keys():
datasets["validation"] = load_dataset( datasets["validation"] = load_dataset(
...@@ -533,12 +545,14 @@ def main(): ...@@ -533,12 +545,14 @@ def main():
data_args.dataset_config_name, data_args.dataset_config_name,
split=f"train[:{data_args.validation_split_percentage}%]", split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
datasets["train"] = load_dataset( datasets["train"] = load_dataset(
data_args.dataset_name, data_args.dataset_name,
data_args.dataset_config_name, data_args.dataset_config_name,
split=f"train[{data_args.validation_split_percentage}%:]", split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
else: else:
data_files = {} data_files = {}
...@@ -549,7 +563,12 @@ def main(): ...@@ -549,7 +563,12 @@ def main():
extension = data_args.train_file.split(".")[-1] extension = data_args.train_file.split(".")[-1]
if extension == "txt": if extension == "txt":
extension = "text" extension = "text"
datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) datasets = load_dataset(
extension,
data_files=data_files,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
if "validation" not in datasets.keys(): if "validation" not in datasets.keys():
datasets["validation"] = load_dataset( datasets["validation"] = load_dataset(
...@@ -557,12 +576,14 @@ def main(): ...@@ -557,12 +576,14 @@ def main():
data_files=data_files, data_files=data_files,
split=f"train[:{data_args.validation_split_percentage}%]", split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
datasets["train"] = load_dataset( datasets["train"] = load_dataset(
extension, extension,
data_files=data_files, data_files=data_files,
split=f"train[{data_args.validation_split_percentage}%:]", split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html. # https://huggingface.co/docs/datasets/loading_datasets.html.
...@@ -571,11 +592,17 @@ def main(): ...@@ -571,11 +592,17 @@ def main():
if model_args.tokenizer_name: if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer model_args.tokenizer_name,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
use_auth_token=True if model_args.use_auth_token else None,
) )
elif model_args.model_name_or_path: elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
use_auth_token=True if model_args.use_auth_token else None,
) )
else: else:
raise ValueError( raise ValueError(
...@@ -585,10 +612,17 @@ def main(): ...@@ -585,10 +612,17 @@ def main():
if model_args.config_name: if model_args.config_name:
config = T5Config.from_pretrained( config = T5Config.from_pretrained(
model_args.config_name, cache_dir=model_args.cache_dir, vocab_size=len(tokenizer) model_args.config_name,
cache_dir=model_args.cache_dir,
vocab_size=len(tokenizer),
use_auth_token=True if model_args.use_auth_token else None,
) )
elif model_args.model_name_or_path: elif model_args.model_name_or_path:
config = T5Config.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir) config = T5Config.from_pretrained(
model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
else: else:
config = CONFIG_MAPPING[model_args.model_type]() config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.") logger.warning("You are instantiating a new config instance from scratch.")
...@@ -678,11 +712,20 @@ def main(): ...@@ -678,11 +712,20 @@ def main():
if model_args.model_name_or_path: if model_args.model_name_or_path:
model = FlaxT5ForConditionalGeneration.from_pretrained( model = FlaxT5ForConditionalGeneration.from_pretrained(
model_args.model_name_or_path, config=config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype) model_args.model_name_or_path,
config=config,
seed=training_args.seed,
dtype=getattr(jnp, model_args.dtype),
use_auth_token=True if model_args.use_auth_token else None,
) )
else: else:
config.vocab_size = len(tokenizer) config.vocab_size = len(tokenizer)
model = FlaxT5ForConditionalGeneration(config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype)) model = FlaxT5ForConditionalGeneration(
config,
seed=training_args.seed,
dtype=getattr(jnp, model_args.dtype),
use_auth_token=True if model_args.use_auth_token else None,
)
# Data collator # Data collator
# This one will take care of randomly masking the tokens. # This one will take care of randomly masking the tokens.
......
...@@ -448,7 +448,10 @@ def main(): ...@@ -448,7 +448,10 @@ def main():
if data_args.dataset_name is not None: if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub. # Downloading and loading a dataset from the hub.
raw_datasets = load_dataset( raw_datasets = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
else: else:
# Loading the dataset from local csv or json file. # Loading the dataset from local csv or json file.
...@@ -463,7 +466,13 @@ def main(): ...@@ -463,7 +466,13 @@ def main():
if data_args.test_file is not None: if data_args.test_file is not None:
data_files["test"] = data_args.test_file data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1] extension = data_args.test_file.split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files, field="data", cache_dir=model_args.cache_dir) raw_datasets = load_dataset(
extension,
data_files=data_files,
field="data",
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html. # https://huggingface.co/docs/datasets/loading_datasets.html.
# endregion # endregion
......
...@@ -176,6 +176,13 @@ class ModelArguments: ...@@ -176,6 +176,13 @@ class ModelArguments:
"help": "Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`." "help": "Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`."
}, },
) )
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
@dataclass @dataclass
...@@ -421,7 +428,11 @@ def main(): ...@@ -421,7 +428,11 @@ def main():
if data_args.dataset_name is not None: if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub. # Downloading and loading a dataset from the hub.
dataset = load_dataset( dataset = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, keep_in_memory=False data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
keep_in_memory=False,
use_auth_token=True if model_args.use_auth_token else None,
) )
else: else:
data_files = {} data_files = {}
...@@ -434,27 +445,46 @@ def main(): ...@@ -434,27 +445,46 @@ def main():
if data_args.test_file is not None: if data_args.test_file is not None:
data_files["test"] = data_args.test_file data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1] extension = data_args.test_file.split(".")[-1]
dataset = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) dataset = load_dataset(
extension,
data_files=data_files,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html. # https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer # Load pretrained model and tokenizer
if model_args.config_name: if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir) config = AutoConfig.from_pretrained(
model_args.config_name,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
elif model_args.model_name_or_path: elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir) config = AutoConfig.from_pretrained(
model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
else: else:
config = CONFIG_MAPPING[model_args.model_type]() config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.") logger.warning("You are instantiating a new config instance from scratch.")
if model_args.tokenizer_name: if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer model_args.tokenizer_name,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
use_auth_token=True if model_args.use_auth_token else None,
) )
elif model_args.model_name_or_path: elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
use_auth_token=True if model_args.use_auth_token else None,
) )
else: else:
raise ValueError( raise ValueError(
...@@ -464,11 +494,18 @@ def main(): ...@@ -464,11 +494,18 @@ def main():
if model_args.model_name_or_path: if model_args.model_name_or_path:
model = FlaxAutoModelForSeq2SeqLM.from_pretrained( model = FlaxAutoModelForSeq2SeqLM.from_pretrained(
model_args.model_name_or_path, config=config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype) model_args.model_name_or_path,
config=config,
seed=training_args.seed,
dtype=getattr(jnp, model_args.dtype),
use_auth_token=True if model_args.use_auth_token else None,
) )
else: else:
model = FlaxAutoModelForSeq2SeqLM.from_config( model = FlaxAutoModelForSeq2SeqLM.from_config(
config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype) config,
seed=training_args.seed,
dtype=getattr(jnp, model_args.dtype),
use_auth_token=True if model_args.use_auth_token else None,
) )
if model.config.decoder_start_token_id is None: if model.config.decoder_start_token_id is None:
......
...@@ -337,7 +337,11 @@ def main(): ...@@ -337,7 +337,11 @@ def main():
# download the dataset. # download the dataset.
if data_args.task_name is not None: if data_args.task_name is not None:
# Downloading and loading a dataset from the hub. # Downloading and loading a dataset from the hub.
raw_datasets = load_dataset("glue", data_args.task_name) raw_datasets = load_dataset(
"glue",
data_args.task_name,
use_auth_token=True if model_args.use_auth_token else None,
)
else: else:
# Loading the dataset from local csv or json file. # Loading the dataset from local csv or json file.
data_files = {} data_files = {}
...@@ -346,7 +350,11 @@ def main(): ...@@ -346,7 +350,11 @@ def main():
if data_args.validation_file is not None: if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file data_files["validation"] = data_args.validation_file
extension = (data_args.train_file if data_args.train_file is not None else data_args.valid_file).split(".")[-1] extension = (data_args.train_file if data_args.train_file is not None else data_args.valid_file).split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files) raw_datasets = load_dataset(
extension,
data_files=data_files,
use_auth_token=True if model_args.use_auth_token else None,
)
# See more about loading any type of standard or custom dataset at # See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html. # https://huggingface.co/docs/datasets/loading_datasets.html.
...@@ -372,12 +380,21 @@ def main(): ...@@ -372,12 +380,21 @@ def main():
# Load pretrained model and tokenizer # Load pretrained model and tokenizer
config = AutoConfig.from_pretrained( config = AutoConfig.from_pretrained(
model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
use_auth_token=True if model_args.use_auth_token else None,
) )
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, use_fast=not model_args.use_slow_tokenizer model_args.model_name_or_path,
use_fast=not model_args.use_slow_tokenizer,
use_auth_token=True if model_args.use_auth_token else None,
)
model = FlaxAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
config=config,
use_auth_token=True if model_args.use_auth_token else None,
) )
model = FlaxAutoModelForSequenceClassification.from_pretrained(model_args.model_name_or_path, config=config)
# Preprocessing the datasets # Preprocessing the datasets
if data_args.task_name is not None: if data_args.task_name is not None:
......
...@@ -391,7 +391,10 @@ def main(): ...@@ -391,7 +391,10 @@ def main():
if data_args.dataset_name is not None: if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub. # Downloading and loading a dataset from the hub.
raw_datasets = load_dataset( raw_datasets = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
else: else:
# Loading the dataset from local csv or json file. # Loading the dataset from local csv or json file.
...@@ -401,7 +404,12 @@ def main(): ...@@ -401,7 +404,12 @@ def main():
if data_args.validation_file is not None: if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file data_files["validation"] = data_args.validation_file
extension = (data_args.train_file if data_args.train_file is not None else data_args.valid_file).split(".")[-1] extension = (data_args.train_file if data_args.train_file is not None else data_args.valid_file).split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) raw_datasets = load_dataset(
extension,
data_files=data_files,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
# See more about loading any type of standard or custom dataset at # See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html. # https://huggingface.co/docs/datasets/loading_datasets.html.
......
...@@ -154,6 +154,13 @@ class ModelArguments: ...@@ -154,6 +154,13 @@ class ModelArguments:
"help": "Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`." "help": "Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`."
}, },
) )
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
@dataclass @dataclass
...@@ -315,6 +322,7 @@ def main(): ...@@ -315,6 +322,7 @@ def main():
num_labels=len(train_dataset.classes), num_labels=len(train_dataset.classes),
image_size=data_args.image_size, image_size=data_args.image_size,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
elif model_args.model_name_or_path: elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained( config = AutoConfig.from_pretrained(
...@@ -322,6 +330,7 @@ def main(): ...@@ -322,6 +330,7 @@ def main():
num_labels=len(train_dataset.classes), num_labels=len(train_dataset.classes),
image_size=data_args.image_size, image_size=data_args.image_size,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
else: else:
config = CONFIG_MAPPING[model_args.model_type]() config = CONFIG_MAPPING[model_args.model_type]()
...@@ -329,11 +338,18 @@ def main(): ...@@ -329,11 +338,18 @@ def main():
if model_args.model_name_or_path: if model_args.model_name_or_path:
model = FlaxAutoModelForImageClassification.from_pretrained( model = FlaxAutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path, config=config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype) model_args.model_name_or_path,
config=config,
seed=training_args.seed,
dtype=getattr(jnp, model_args.dtype),
use_auth_token=True if model_args.use_auth_token else None,
) )
else: else:
model = FlaxAutoModelForImageClassification.from_config( model = FlaxAutoModelForImageClassification.from_config(
config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype) config,
seed=training_args.seed,
dtype=getattr(jnp, model_args.dtype),
use_auth_token=True if model_args.use_auth_token else None,
) )
# Store some constant # Store some constant
......
...@@ -227,10 +227,16 @@ def main(): ...@@ -227,10 +227,16 @@ def main():
# Initialize our dataset and prepare it for the audio classification task. # Initialize our dataset and prepare it for the audio classification task.
raw_datasets = DatasetDict() raw_datasets = DatasetDict()
raw_datasets["train"] = load_dataset( raw_datasets["train"] = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=data_args.train_split_name data_args.dataset_name,
data_args.dataset_config_name,
split=data_args.train_split_name,
use_auth_token=True if model_args.use_auth_token else None,
) )
raw_datasets["eval"] = load_dataset( raw_datasets["eval"] = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=data_args.eval_split_name data_args.dataset_name,
data_args.dataset_config_name,
split=data_args.eval_split_name,
use_auth_token=True if model_args.use_auth_token else None,
) )
if data_args.audio_column_name not in raw_datasets["train"].column_names: if data_args.audio_column_name not in raw_datasets["train"].column_names:
......
...@@ -276,6 +276,7 @@ def main(): ...@@ -276,6 +276,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
keep_in_memory=False, keep_in_memory=False,
data_dir=data_args.data_dir, data_dir=data_args.data_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
else: else:
data_files = {} data_files = {}
...@@ -288,7 +289,12 @@ def main(): ...@@ -288,7 +289,12 @@ def main():
if data_args.test_file is not None: if data_args.test_file is not None:
data_files["test"] = data_args.test_file data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1] extension = data_args.test_file.split(".")[-1]
dataset = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) dataset = load_dataset(
extension,
data_files=data_files,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html. # https://huggingface.co/docs/datasets/loading_datasets.html.
......
...@@ -207,6 +207,7 @@ def main(): ...@@ -207,6 +207,7 @@ def main():
data_files=data_args.data_files, data_files=data_args.data_files,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
task="image-classification", task="image-classification",
use_auth_token=True if model_args.use_auth_token else None,
) )
# If we don't have a validation split, split off a percentage of train as validation. # If we don't have a validation split, split off a percentage of train as validation.
......
...@@ -207,6 +207,7 @@ def main(): ...@@ -207,6 +207,7 @@ def main():
data_args.dataset_config_name, data_args.dataset_config_name,
data_files=data_args.data_files, data_files=data_args.data_files,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
# If we don't have a validation split, split off a percentage of train as validation. # If we don't have a validation split, split off a percentage of train as validation.
......
...@@ -266,6 +266,7 @@ def main(): ...@@ -266,6 +266,7 @@ def main():
data_args.dataset_config_name, data_args.dataset_config_name,
data_files=data_args.data_files, data_files=data_args.data_files,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
# If we don't have a validation split, split off a percentage of train as validation. # If we don't have a validation split, split off a percentage of train as validation.
......
...@@ -254,7 +254,10 @@ def main(): ...@@ -254,7 +254,10 @@ def main():
if data_args.dataset_name is not None: if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub. # Downloading and loading a dataset from the hub.
raw_datasets = load_dataset( raw_datasets = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
if "validation" not in raw_datasets.keys(): if "validation" not in raw_datasets.keys():
raw_datasets["validation"] = load_dataset( raw_datasets["validation"] = load_dataset(
...@@ -262,12 +265,14 @@ def main(): ...@@ -262,12 +265,14 @@ def main():
data_args.dataset_config_name, data_args.dataset_config_name,
split=f"train[:{data_args.validation_split_percentage}%]", split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
raw_datasets["train"] = load_dataset( raw_datasets["train"] = load_dataset(
data_args.dataset_name, data_args.dataset_name,
data_args.dataset_config_name, data_args.dataset_config_name,
split=f"train[{data_args.validation_split_percentage}%:]", split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
else: else:
data_files = {} data_files = {}
...@@ -284,7 +289,13 @@ def main(): ...@@ -284,7 +289,13 @@ def main():
if extension == "txt": if extension == "txt":
extension = "text" extension = "text"
dataset_args["keep_linebreaks"] = data_args.keep_linebreaks dataset_args["keep_linebreaks"] = data_args.keep_linebreaks
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir, **dataset_args) raw_datasets = load_dataset(
extension,
data_files=data_files,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
**dataset_args,
)
# If no validation data is there, validation_split_percentage will be used to divide the dataset. # If no validation data is there, validation_split_percentage will be used to divide the dataset.
if "validation" not in raw_datasets.keys(): if "validation" not in raw_datasets.keys():
raw_datasets["validation"] = load_dataset( raw_datasets["validation"] = load_dataset(
...@@ -292,6 +303,7 @@ def main(): ...@@ -292,6 +303,7 @@ def main():
data_files=data_files, data_files=data_files,
split=f"train[:{data_args.validation_split_percentage}%]", split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
**dataset_args, **dataset_args,
) )
raw_datasets["train"] = load_dataset( raw_datasets["train"] = load_dataset(
...@@ -299,6 +311,7 @@ def main(): ...@@ -299,6 +311,7 @@ def main():
data_files=data_files, data_files=data_files,
split=f"train[{data_args.validation_split_percentage}%:]", split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
**dataset_args, **dataset_args,
) )
......
...@@ -263,7 +263,10 @@ def main(): ...@@ -263,7 +263,10 @@ def main():
if data_args.dataset_name is not None: if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub. # Downloading and loading a dataset from the hub.
raw_datasets = load_dataset( raw_datasets = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
if "validation" not in raw_datasets.keys(): if "validation" not in raw_datasets.keys():
raw_datasets["validation"] = load_dataset( raw_datasets["validation"] = load_dataset(
...@@ -271,12 +274,14 @@ def main(): ...@@ -271,12 +274,14 @@ def main():
data_args.dataset_config_name, data_args.dataset_config_name,
split=f"train[:{data_args.validation_split_percentage}%]", split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
raw_datasets["train"] = load_dataset( raw_datasets["train"] = load_dataset(
data_args.dataset_name, data_args.dataset_name,
data_args.dataset_config_name, data_args.dataset_config_name,
split=f"train[{data_args.validation_split_percentage}%:]", split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
else: else:
data_files = {} data_files = {}
...@@ -288,7 +293,12 @@ def main(): ...@@ -288,7 +293,12 @@ def main():
extension = data_args.validation_file.split(".")[-1] extension = data_args.validation_file.split(".")[-1]
if extension == "txt": if extension == "txt":
extension = "text" extension = "text"
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) raw_datasets = load_dataset(
extension,
data_files=data_files,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
# If no validation data is there, validation_split_percentage will be used to divide the dataset. # If no validation data is there, validation_split_percentage will be used to divide the dataset.
if "validation" not in raw_datasets.keys(): if "validation" not in raw_datasets.keys():
...@@ -297,12 +307,14 @@ def main(): ...@@ -297,12 +307,14 @@ def main():
data_files=data_files, data_files=data_files,
split=f"train[:{data_args.validation_split_percentage}%]", split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
raw_datasets["train"] = load_dataset( raw_datasets["train"] = load_dataset(
extension, extension,
data_files=data_files, data_files=data_files,
split=f"train[{data_args.validation_split_percentage}%:]", split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
......
...@@ -256,7 +256,10 @@ def main(): ...@@ -256,7 +256,10 @@ def main():
if data_args.dataset_name is not None: if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub. # Downloading and loading a dataset from the hub.
raw_datasets = load_dataset( raw_datasets = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
if "validation" not in raw_datasets.keys(): if "validation" not in raw_datasets.keys():
raw_datasets["validation"] = load_dataset( raw_datasets["validation"] = load_dataset(
...@@ -264,12 +267,14 @@ def main(): ...@@ -264,12 +267,14 @@ def main():
data_args.dataset_config_name, data_args.dataset_config_name,
split=f"train[:{data_args.validation_split_percentage}%]", split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
raw_datasets["train"] = load_dataset( raw_datasets["train"] = load_dataset(
data_args.dataset_name, data_args.dataset_name,
data_args.dataset_config_name, data_args.dataset_config_name,
split=f"train[{data_args.validation_split_percentage}%:]", split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
else: else:
data_files = {} data_files = {}
...@@ -288,12 +293,14 @@ def main(): ...@@ -288,12 +293,14 @@ def main():
data_files=data_files, data_files=data_files,
split=f"train[:{data_args.validation_split_percentage}%]", split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
raw_datasets["train"] = load_dataset( raw_datasets["train"] = load_dataset(
extension, extension,
data_files=data_files, data_files=data_files,
split=f"train[{data_args.validation_split_percentage}%:]", split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
......
...@@ -269,10 +269,20 @@ def main(): ...@@ -269,10 +269,20 @@ def main():
if data_args.validation_file is not None: if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file data_files["validation"] = data_args.validation_file
extension = data_args.train_file.split(".")[-1] extension = data_args.train_file.split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) raw_datasets = load_dataset(
extension,
data_files=data_files,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
else: else:
# Downloading and loading the swag dataset from the hub. # Downloading and loading the swag dataset from the hub.
raw_datasets = load_dataset("swag", "regular", cache_dir=model_args.cache_dir) raw_datasets = load_dataset(
"swag",
"regular",
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html. # https://huggingface.co/docs/datasets/loading_datasets.html.
......
...@@ -262,7 +262,10 @@ def main(): ...@@ -262,7 +262,10 @@ def main():
if data_args.dataset_name is not None: if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub. # Downloading and loading a dataset from the hub.
raw_datasets = load_dataset( raw_datasets = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
else: else:
data_files = {} data_files = {}
...@@ -276,7 +279,13 @@ def main(): ...@@ -276,7 +279,13 @@ def main():
if data_args.test_file is not None: if data_args.test_file is not None:
data_files["test"] = data_args.test_file data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1] extension = data_args.test_file.split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files, field="data", cache_dir=model_args.cache_dir) raw_datasets = load_dataset(
extension,
data_files=data_files,
field="data",
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html. # https://huggingface.co/docs/datasets/loading_datasets.html.
......
...@@ -260,7 +260,10 @@ def main(): ...@@ -260,7 +260,10 @@ def main():
if data_args.dataset_name is not None: if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub. # Downloading and loading a dataset from the hub.
raw_datasets = load_dataset( raw_datasets = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
) )
else: else:
data_files = {} data_files = {}
...@@ -273,7 +276,13 @@ def main(): ...@@ -273,7 +276,13 @@ def main():
if data_args.test_file is not None: if data_args.test_file is not None:
data_files["test"] = data_args.test_file data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1] extension = data_args.test_file.split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files, field="data", cache_dir=model_args.cache_dir) raw_datasets = load_dataset(
extension,
data_files=data_files,
field="data",
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html. # https://huggingface.co/docs/datasets/loading_datasets.html.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment