Unverified Commit d53b8ad7 authored by Yih-Dar's avatar Yih-Dar Committed by GitHub
Browse files

Update `use_auth_token` -> `token` in example scripts (#25167)



* pytorch examples

* tensorflow examples

* flax examples

---------
Co-authored-by: default avatarydshieh <ydshieh@users.noreply.github.com>
parent 3cbc560d
...@@ -475,18 +475,18 @@ def main(): ...@@ -475,18 +475,18 @@ def main():
model_args.model_name_or_path, model_args.model_name_or_path,
seed=training_args.seed, seed=training_args.seed,
dtype=getattr(jnp, model_args.dtype), dtype=getattr(jnp, model_args.dtype),
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
image_processor = AutoImageProcessor.from_pretrained( image_processor = AutoImageProcessor.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer, use_fast=model_args.use_fast_tokenizer,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
tokenizer.pad_token = tokenizer.convert_ids_to_tokens(model.config.pad_token_id) tokenizer.pad_token = tokenizer.convert_ids_to_tokens(model.config.pad_token_id)
......
...@@ -576,14 +576,14 @@ def main(): ...@@ -576,14 +576,14 @@ def main():
model_args.tokenizer_name, model_args.tokenizer_name,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer, use_fast=model_args.use_fast_tokenizer,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
elif model_args.model_name_or_path: elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer, use_fast=model_args.use_fast_tokenizer,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
else: else:
raise ValueError( raise ValueError(
...@@ -596,13 +596,13 @@ def main(): ...@@ -596,13 +596,13 @@ def main():
model_args.config_name, model_args.config_name,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
vocab_size=len(tokenizer), vocab_size=len(tokenizer),
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
elif model_args.model_name_or_path: elif model_args.model_name_or_path:
config = BartConfig.from_pretrained( config = BartConfig.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
else: else:
config = CONFIG_MAPPING[model_args.model_type]() config = CONFIG_MAPPING[model_args.model_type]()
...@@ -707,7 +707,7 @@ def main(): ...@@ -707,7 +707,7 @@ def main():
config=config, config=config,
seed=training_args.seed, seed=training_args.seed,
dtype=getattr(jnp, model_args.dtype), dtype=getattr(jnp, model_args.dtype),
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
else: else:
config.vocab_size = len(tokenizer) config.vocab_size = len(tokenizer)
......
...@@ -463,13 +463,13 @@ def main(): ...@@ -463,13 +463,13 @@ def main():
config = AutoConfig.from_pretrained( config = AutoConfig.from_pretrained(
model_args.config_name, model_args.config_name,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
elif model_args.model_name_or_path: elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained( config = AutoConfig.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
else: else:
config = CONFIG_MAPPING[model_args.model_type]() config = CONFIG_MAPPING[model_args.model_type]()
...@@ -480,14 +480,14 @@ def main(): ...@@ -480,14 +480,14 @@ def main():
model_args.tokenizer_name, model_args.tokenizer_name,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer, use_fast=model_args.use_fast_tokenizer,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
elif model_args.model_name_or_path: elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer, use_fast=model_args.use_fast_tokenizer,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
else: else:
raise ValueError( raise ValueError(
...@@ -501,7 +501,7 @@ def main(): ...@@ -501,7 +501,7 @@ def main():
config=config, config=config,
seed=training_args.seed, seed=training_args.seed,
dtype=getattr(jnp, model_args.dtype), dtype=getattr(jnp, model_args.dtype),
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
else: else:
model = FlaxAutoModelForCausalLM.from_config( model = FlaxAutoModelForCausalLM.from_config(
......
...@@ -495,13 +495,13 @@ def main(): ...@@ -495,13 +495,13 @@ def main():
config = AutoConfig.from_pretrained( config = AutoConfig.from_pretrained(
model_args.config_name, model_args.config_name,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
elif model_args.model_name_or_path: elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained( config = AutoConfig.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
else: else:
config = CONFIG_MAPPING[model_args.model_type]() config = CONFIG_MAPPING[model_args.model_type]()
...@@ -512,14 +512,14 @@ def main(): ...@@ -512,14 +512,14 @@ def main():
model_args.tokenizer_name, model_args.tokenizer_name,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer, use_fast=model_args.use_fast_tokenizer,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
elif model_args.model_name_or_path: elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer, use_fast=model_args.use_fast_tokenizer,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
else: else:
raise ValueError( raise ValueError(
...@@ -638,7 +638,7 @@ def main(): ...@@ -638,7 +638,7 @@ def main():
config=config, config=config,
seed=training_args.seed, seed=training_args.seed,
dtype=getattr(jnp, model_args.dtype), dtype=getattr(jnp, model_args.dtype),
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
else: else:
model = FlaxAutoModelForMaskedLM.from_config( model = FlaxAutoModelForMaskedLM.from_config(
......
...@@ -617,14 +617,14 @@ def main(): ...@@ -617,14 +617,14 @@ def main():
model_args.tokenizer_name, model_args.tokenizer_name,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer, use_fast=model_args.use_fast_tokenizer,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
elif model_args.model_name_or_path: elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer, use_fast=model_args.use_fast_tokenizer,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
else: else:
raise ValueError( raise ValueError(
...@@ -637,13 +637,13 @@ def main(): ...@@ -637,13 +637,13 @@ def main():
model_args.config_name, model_args.config_name,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
vocab_size=len(tokenizer), vocab_size=len(tokenizer),
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
elif model_args.model_name_or_path: elif model_args.model_name_or_path:
config = T5Config.from_pretrained( config = T5Config.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
else: else:
config = CONFIG_MAPPING[model_args.model_type]() config = CONFIG_MAPPING[model_args.model_type]()
...@@ -738,7 +738,7 @@ def main(): ...@@ -738,7 +738,7 @@ def main():
config=config, config=config,
seed=training_args.seed, seed=training_args.seed,
dtype=getattr(jnp, model_args.dtype), dtype=getattr(jnp, model_args.dtype),
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
else: else:
config.vocab_size = len(tokenizer) config.vocab_size = len(tokenizer)
......
...@@ -520,14 +520,14 @@ def main(): ...@@ -520,14 +520,14 @@ def main():
model_args.config_name if model_args.config_name else model_args.model_name_or_path, model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_fast=True, use_fast=True,
revision=model_args.model_revision, revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
# endregion # endregion
...@@ -874,7 +874,7 @@ def main(): ...@@ -874,7 +874,7 @@ def main():
config=config, config=config,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
seed=training_args.seed, seed=training_args.seed,
dtype=getattr(jnp, model_args.dtype), dtype=getattr(jnp, model_args.dtype),
) )
......
...@@ -503,13 +503,13 @@ def main(): ...@@ -503,13 +503,13 @@ def main():
config = AutoConfig.from_pretrained( config = AutoConfig.from_pretrained(
model_args.config_name, model_args.config_name,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
elif model_args.model_name_or_path: elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained( config = AutoConfig.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
else: else:
config = CONFIG_MAPPING[model_args.model_type]() config = CONFIG_MAPPING[model_args.model_type]()
...@@ -520,14 +520,14 @@ def main(): ...@@ -520,14 +520,14 @@ def main():
model_args.tokenizer_name, model_args.tokenizer_name,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer, use_fast=model_args.use_fast_tokenizer,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
elif model_args.model_name_or_path: elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer, use_fast=model_args.use_fast_tokenizer,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
else: else:
raise ValueError( raise ValueError(
...@@ -541,7 +541,7 @@ def main(): ...@@ -541,7 +541,7 @@ def main():
config=config, config=config,
seed=training_args.seed, seed=training_args.seed,
dtype=getattr(jnp, model_args.dtype), dtype=getattr(jnp, model_args.dtype),
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
else: else:
model = FlaxAutoModelForSeq2SeqLM.from_config( model = FlaxAutoModelForSeq2SeqLM.from_config(
......
...@@ -411,17 +411,17 @@ def main(): ...@@ -411,17 +411,17 @@ def main():
model_args.model_name_or_path, model_args.model_name_or_path,
num_labels=num_labels, num_labels=num_labels,
finetuning_task=data_args.task_name, finetuning_task=data_args.task_name,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
use_fast=not model_args.use_slow_tokenizer, use_fast=not model_args.use_slow_tokenizer,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
model = FlaxAutoModelForSequenceClassification.from_pretrained( model = FlaxAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
config=config, config=config,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
# Preprocessing the datasets # Preprocessing the datasets
......
...@@ -490,7 +490,7 @@ def main(): ...@@ -490,7 +490,7 @@ def main():
finetuning_task=data_args.task_name, finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
tokenizer_name_or_path = model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path tokenizer_name_or_path = model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path
if config.model_type in {"gpt2", "roberta"}: if config.model_type in {"gpt2", "roberta"}:
...@@ -498,7 +498,7 @@ def main(): ...@@ -498,7 +498,7 @@ def main():
tokenizer_name_or_path, tokenizer_name_or_path,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
add_prefix_space=True, add_prefix_space=True,
) )
else: else:
...@@ -506,14 +506,14 @@ def main(): ...@@ -506,14 +506,14 @@ def main():
tokenizer_name_or_path, tokenizer_name_or_path,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
model = FlaxAutoModelForTokenClassification.from_pretrained( model = FlaxAutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
config=config, config=config,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
# Preprocessing the datasets # Preprocessing the datasets
......
...@@ -338,7 +338,7 @@ def main(): ...@@ -338,7 +338,7 @@ def main():
num_labels=len(train_dataset.classes), num_labels=len(train_dataset.classes),
image_size=data_args.image_size, image_size=data_args.image_size,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
elif model_args.model_name_or_path: elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained( config = AutoConfig.from_pretrained(
...@@ -346,7 +346,7 @@ def main(): ...@@ -346,7 +346,7 @@ def main():
num_labels=len(train_dataset.classes), num_labels=len(train_dataset.classes),
image_size=data_args.image_size, image_size=data_args.image_size,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
else: else:
config = CONFIG_MAPPING[model_args.model_type]() config = CONFIG_MAPPING[model_args.model_type]()
...@@ -358,7 +358,7 @@ def main(): ...@@ -358,7 +358,7 @@ def main():
config=config, config=config,
seed=training_args.seed, seed=training_args.seed,
dtype=getattr(jnp, model_args.dtype), dtype=getattr(jnp, model_args.dtype),
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
else: else:
model = FlaxAutoModelForImageClassification.from_config( model = FlaxAutoModelForImageClassification.from_config(
......
...@@ -280,7 +280,7 @@ def main(): ...@@ -280,7 +280,7 @@ def main():
return_attention_mask=model_args.attention_mask, return_attention_mask=model_args.attention_mask,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
# `datasets` takes care of automatically loading and resampling the audio, # `datasets` takes care of automatically loading and resampling the audio,
...@@ -340,7 +340,7 @@ def main(): ...@@ -340,7 +340,7 @@ def main():
finetuning_task="audio-classification", finetuning_task="audio-classification",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
model = AutoModelForAudioClassification.from_pretrained( model = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
...@@ -348,7 +348,7 @@ def main(): ...@@ -348,7 +348,7 @@ def main():
config=config, config=config,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes,
) )
......
...@@ -336,14 +336,14 @@ def main(): ...@@ -336,14 +336,14 @@ def main():
model_args.image_processor_name or model_args.model_name_or_path, model_args.image_processor_name or model_args.model_name_or_path,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
model = AutoModel.from_pretrained( model = AutoModel.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
config = model.config config = model.config
......
...@@ -276,7 +276,7 @@ def main(): ...@@ -276,7 +276,7 @@ def main():
finetuning_task="image-classification", finetuning_task="image-classification",
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
model = AutoModelForImageClassification.from_pretrained( model = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
...@@ -284,14 +284,14 @@ def main(): ...@@ -284,14 +284,14 @@ def main():
config=config, config=config,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes,
) )
image_processor = AutoImageProcessor.from_pretrained( image_processor = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path, model_args.image_processor_name or model_args.model_name_or_path,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
# Define torchvision transforms to be applied to each image. # Define torchvision transforms to be applied to each image.
......
...@@ -280,7 +280,7 @@ def main(): ...@@ -280,7 +280,7 @@ def main():
config=config, config=config,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
else: else:
logger.info("Training new model from scratch") logger.info("Training new model from scratch")
......
...@@ -357,7 +357,7 @@ def main(): ...@@ -357,7 +357,7 @@ def main():
config=config, config=config,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
else: else:
logger.info("Training new model from scratch") logger.info("Training new model from scratch")
......
...@@ -497,7 +497,7 @@ def main(): ...@@ -497,7 +497,7 @@ def main():
config=config, config=config,
cache_dir=args.cache_dir, cache_dir=args.cache_dir,
revision=args.model_revision, revision=args.model_revision,
use_auth_token=True if args.use_auth_token else None, token=True if args.use_auth_token else None,
) )
else: else:
logger.info("Training new model from scratch") logger.info("Training new model from scratch")
......
...@@ -415,7 +415,7 @@ def main(): ...@@ -415,7 +415,7 @@ def main():
config=config, config=config,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
torch_dtype=torch_dtype, torch_dtype=torch_dtype,
low_cpu_mem_usage=model_args.low_cpu_mem_usage, low_cpu_mem_usage=model_args.low_cpu_mem_usage,
) )
......
...@@ -403,7 +403,7 @@ def main(): ...@@ -403,7 +403,7 @@ def main():
config=config, config=config,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
low_cpu_mem_usage=model_args.low_cpu_mem_usage, low_cpu_mem_usage=model_args.low_cpu_mem_usage,
) )
else: else:
......
...@@ -383,7 +383,7 @@ def main(): ...@@ -383,7 +383,7 @@ def main():
config=config, config=config,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
low_cpu_mem_usage=model_args.low_cpu_mem_usage, low_cpu_mem_usage=model_args.low_cpu_mem_usage,
) )
else: else:
......
...@@ -314,14 +314,14 @@ def main(): ...@@ -314,14 +314,14 @@ def main():
model_args.config_name if model_args.config_name else model_args.model_name_or_path, model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer, use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision, revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
model = AutoModelForMultipleChoice.from_pretrained( model = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
...@@ -329,7 +329,7 @@ def main(): ...@@ -329,7 +329,7 @@ def main():
config=config, config=config,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None, token=True if model_args.use_auth_token else None,
) )
# When using your own dataset or a different dataset from swag, you will probably need to change this. # When using your own dataset or a different dataset from swag, you will probably need to change this.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment