"vscode:/vscode.git/clone" did not exist on "fe34486f129d47abc0dddb39a22b24cdbe52dec8"
Unverified Commit 14510938 authored by Jackmin801's avatar Jackmin801 Committed by GitHub
Browse files

Allow `trust_remote_code` in example scripts (#25248)

* pytorch examples

* pytorch mim no trainer

* cookiecutter

* flax examples

* missed line in pytorch run_glue

* tensorflow examples

* tensorflow run_clip

* tensorflow run_mlm

* tensorflow run_ner

* tensorflow run_clm

* pytorch example from_configs

* pytorch no trainer examples

* Revert "tensorflow run_clip"

This reverts commit 261f86ac1f1c9e05dd3fd0291e1a1f8e573781d5.

* fix: duplicated argument
parent 65001cb1
...@@ -198,6 +198,16 @@ class ModelArguments: ...@@ -198,6 +198,16 @@ class ModelArguments:
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`." "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`."
}, },
) )
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
)
},
)
@dataclass @dataclass
...@@ -489,17 +499,20 @@ def main(): ...@@ -489,17 +499,20 @@ def main():
seed=training_args.seed, seed=training_args.seed,
dtype=getattr(jnp, model_args.dtype), dtype=getattr(jnp, model_args.dtype),
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
image_processor = AutoImageProcessor.from_pretrained( image_processor = AutoImageProcessor.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer, use_fast=model_args.use_fast_tokenizer,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
tokenizer.pad_token = tokenizer.convert_ids_to_tokens(model.config.pad_token_id) tokenizer.pad_token = tokenizer.convert_ids_to_tokens(model.config.pad_token_id)
......
...@@ -185,6 +185,16 @@ class ModelArguments: ...@@ -185,6 +185,16 @@ class ModelArguments:
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`." "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`."
}, },
) )
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
)
},
)
@dataclass @dataclass
...@@ -477,12 +487,14 @@ def main(): ...@@ -477,12 +487,14 @@ def main():
model_args.config_name, model_args.config_name,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
elif model_args.model_name_or_path: elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained( config = AutoConfig.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
else: else:
config = CONFIG_MAPPING[model_args.model_type]() config = CONFIG_MAPPING[model_args.model_type]()
...@@ -494,6 +506,7 @@ def main(): ...@@ -494,6 +506,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer, use_fast=model_args.use_fast_tokenizer,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
elif model_args.model_name_or_path: elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
...@@ -501,6 +514,7 @@ def main(): ...@@ -501,6 +514,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer, use_fast=model_args.use_fast_tokenizer,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
else: else:
raise ValueError( raise ValueError(
...@@ -515,12 +529,14 @@ def main(): ...@@ -515,12 +529,14 @@ def main():
seed=training_args.seed, seed=training_args.seed,
dtype=getattr(jnp, model_args.dtype), dtype=getattr(jnp, model_args.dtype),
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
else: else:
model = FlaxAutoModelForCausalLM.from_config( model = FlaxAutoModelForCausalLM.from_config(
config, config,
seed=training_args.seed, seed=training_args.seed,
dtype=getattr(jnp, model_args.dtype), dtype=getattr(jnp, model_args.dtype),
trust_remote_code=model_args.trust_remote_code,
) )
# Preprocessing the datasets. # Preprocessing the datasets.
......
...@@ -190,6 +190,16 @@ class ModelArguments: ...@@ -190,6 +190,16 @@ class ModelArguments:
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`." "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`."
}, },
) )
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
)
},
)
@dataclass @dataclass
...@@ -509,12 +519,14 @@ def main(): ...@@ -509,12 +519,14 @@ def main():
model_args.config_name, model_args.config_name,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
elif model_args.model_name_or_path: elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained( config = AutoConfig.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
else: else:
config = CONFIG_MAPPING[model_args.model_type]() config = CONFIG_MAPPING[model_args.model_type]()
...@@ -526,6 +538,7 @@ def main(): ...@@ -526,6 +538,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer, use_fast=model_args.use_fast_tokenizer,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
elif model_args.model_name_or_path: elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
...@@ -533,6 +546,7 @@ def main(): ...@@ -533,6 +546,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer, use_fast=model_args.use_fast_tokenizer,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
else: else:
raise ValueError( raise ValueError(
...@@ -652,12 +666,14 @@ def main(): ...@@ -652,12 +666,14 @@ def main():
seed=training_args.seed, seed=training_args.seed,
dtype=getattr(jnp, model_args.dtype), dtype=getattr(jnp, model_args.dtype),
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
else: else:
model = FlaxAutoModelForMaskedLM.from_config( model = FlaxAutoModelForMaskedLM.from_config(
config, config,
seed=training_args.seed, seed=training_args.seed,
dtype=getattr(jnp, model_args.dtype), dtype=getattr(jnp, model_args.dtype),
trust_remote_code=model_args.trust_remote_code,
) )
if training_args.gradient_checkpointing: if training_args.gradient_checkpointing:
......
...@@ -171,6 +171,16 @@ class ModelArguments: ...@@ -171,6 +171,16 @@ class ModelArguments:
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`." "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`."
}, },
) )
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
)
},
)
dtype: Optional[str] = field( dtype: Optional[str] = field(
default="float32", default="float32",
metadata={ metadata={
...@@ -534,6 +544,7 @@ def main(): ...@@ -534,6 +544,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
...@@ -541,6 +552,7 @@ def main(): ...@@ -541,6 +552,7 @@ def main():
use_fast=True, use_fast=True,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
# endregion # endregion
...@@ -888,6 +900,7 @@ def main(): ...@@ -888,6 +900,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
seed=training_args.seed, seed=training_args.seed,
dtype=getattr(jnp, model_args.dtype), dtype=getattr(jnp, model_args.dtype),
) )
......
...@@ -204,6 +204,16 @@ class ModelArguments: ...@@ -204,6 +204,16 @@ class ModelArguments:
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`." "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`."
}, },
) )
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
)
},
)
@dataclass @dataclass
...@@ -517,12 +527,14 @@ def main(): ...@@ -517,12 +527,14 @@ def main():
model_args.config_name, model_args.config_name,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
elif model_args.model_name_or_path: elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained( config = AutoConfig.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
else: else:
config = CONFIG_MAPPING[model_args.model_type]() config = CONFIG_MAPPING[model_args.model_type]()
...@@ -534,6 +546,7 @@ def main(): ...@@ -534,6 +546,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer, use_fast=model_args.use_fast_tokenizer,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
elif model_args.model_name_or_path: elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
...@@ -541,6 +554,7 @@ def main(): ...@@ -541,6 +554,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer, use_fast=model_args.use_fast_tokenizer,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
else: else:
raise ValueError( raise ValueError(
...@@ -555,12 +569,14 @@ def main(): ...@@ -555,12 +569,14 @@ def main():
seed=training_args.seed, seed=training_args.seed,
dtype=getattr(jnp, model_args.dtype), dtype=getattr(jnp, model_args.dtype),
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
else: else:
model = FlaxAutoModelForSeq2SeqLM.from_config( model = FlaxAutoModelForSeq2SeqLM.from_config(
config, config,
seed=training_args.seed, seed=training_args.seed,
dtype=getattr(jnp, model_args.dtype), dtype=getattr(jnp, model_args.dtype),
trust_remote_code=model_args.trust_remote_code,
) )
if training_args.gradient_checkpointing: if training_args.gradient_checkpointing:
......
...@@ -117,6 +117,16 @@ class ModelArguments: ...@@ -117,6 +117,16 @@ class ModelArguments:
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`." "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`."
}, },
) )
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
)
},
)
@dataclass @dataclass
...@@ -425,16 +435,19 @@ def main(): ...@@ -425,16 +435,19 @@ def main():
num_labels=num_labels, num_labels=num_labels,
finetuning_task=data_args.task_name, finetuning_task=data_args.task_name,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
use_fast=not model_args.use_slow_tokenizer, use_fast=not model_args.use_slow_tokenizer,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
model = FlaxAutoModelForSequenceClassification.from_pretrained( model = FlaxAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
config=config, config=config,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
# Preprocessing the datasets # Preprocessing the datasets
......
...@@ -165,6 +165,16 @@ class ModelArguments: ...@@ -165,6 +165,16 @@ class ModelArguments:
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`." "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`."
}, },
) )
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
)
},
)
@dataclass @dataclass
...@@ -504,6 +514,7 @@ def main(): ...@@ -504,6 +514,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
tokenizer_name_or_path = model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path tokenizer_name_or_path = model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path
if config.model_type in {"gpt2", "roberta"}: if config.model_type in {"gpt2", "roberta"}:
...@@ -512,6 +523,7 @@ def main(): ...@@ -512,6 +523,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
add_prefix_space=True, add_prefix_space=True,
) )
else: else:
...@@ -520,6 +532,7 @@ def main(): ...@@ -520,6 +532,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
model = FlaxAutoModelForTokenClassification.from_pretrained( model = FlaxAutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
...@@ -527,6 +540,7 @@ def main(): ...@@ -527,6 +540,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
# Preprocessing the datasets # Preprocessing the datasets
......
...@@ -175,6 +175,16 @@ class ModelArguments: ...@@ -175,6 +175,16 @@ class ModelArguments:
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`." "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`."
}, },
) )
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
)
},
)
@dataclass @dataclass
...@@ -352,6 +362,7 @@ def main(): ...@@ -352,6 +362,7 @@ def main():
image_size=data_args.image_size, image_size=data_args.image_size,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
elif model_args.model_name_or_path: elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained( config = AutoConfig.from_pretrained(
...@@ -360,6 +371,7 @@ def main(): ...@@ -360,6 +371,7 @@ def main():
image_size=data_args.image_size, image_size=data_args.image_size,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
else: else:
config = CONFIG_MAPPING[model_args.model_type]() config = CONFIG_MAPPING[model_args.model_type]()
...@@ -372,12 +384,14 @@ def main(): ...@@ -372,12 +384,14 @@ def main():
seed=training_args.seed, seed=training_args.seed,
dtype=getattr(jnp, model_args.dtype), dtype=getattr(jnp, model_args.dtype),
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
else: else:
model = FlaxAutoModelForImageClassification.from_config( model = FlaxAutoModelForImageClassification.from_config(
config, config,
seed=training_args.seed, seed=training_args.seed,
dtype=getattr(jnp, model_args.dtype), dtype=getattr(jnp, model_args.dtype),
trust_remote_code=model_args.trust_remote_code,
) )
# Store some constant # Store some constant
......
...@@ -167,6 +167,16 @@ class ModelArguments: ...@@ -167,6 +167,16 @@ class ModelArguments:
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`." "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`."
}, },
) )
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
)
},
)
freeze_feature_extractor: Optional[bool] = field( freeze_feature_extractor: Optional[bool] = field(
default=None, metadata={"help": "Whether to freeze the feature extractor layers of the model."} default=None, metadata={"help": "Whether to freeze the feature extractor layers of the model."}
) )
...@@ -293,6 +303,7 @@ def main(): ...@@ -293,6 +303,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
# `datasets` takes care of automatically loading and resampling the audio, # `datasets` takes care of automatically loading and resampling the audio,
...@@ -353,6 +364,7 @@ def main(): ...@@ -353,6 +364,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
model = AutoModelForAudioClassification.from_pretrained( model = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
...@@ -361,6 +373,7 @@ def main(): ...@@ -361,6 +373,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes,
) )
......
...@@ -102,6 +102,16 @@ class ModelArguments: ...@@ -102,6 +102,16 @@ class ModelArguments:
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`." "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`."
}, },
) )
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
)
},
)
freeze_vision_model: bool = field( freeze_vision_model: bool = field(
default=False, metadata={"help": "Whether to freeze the vision model parameters or not."} default=False, metadata={"help": "Whether to freeze the vision model parameters or not."}
) )
...@@ -350,6 +360,7 @@ def main(): ...@@ -350,6 +360,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
model = AutoModel.from_pretrained( model = AutoModel.from_pretrained(
...@@ -357,6 +368,7 @@ def main(): ...@@ -357,6 +368,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
config = model.config config = model.config
......
...@@ -158,6 +158,16 @@ class ModelArguments: ...@@ -158,6 +158,16 @@ class ModelArguments:
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`." "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`."
}, },
) )
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
)
},
)
ignore_mismatched_sizes: bool = field( ignore_mismatched_sizes: bool = field(
default=False, default=False,
metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."}, metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."},
...@@ -290,6 +300,7 @@ def main(): ...@@ -290,6 +300,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
model = AutoModelForImageClassification.from_pretrained( model = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
...@@ -298,6 +309,7 @@ def main(): ...@@ -298,6 +309,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes,
) )
image_processor = AutoImageProcessor.from_pretrained( image_processor = AutoImageProcessor.from_pretrained(
...@@ -305,6 +317,7 @@ def main(): ...@@ -305,6 +317,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
# Define torchvision transforms to be applied to each image. # Define torchvision transforms to be applied to each image.
......
...@@ -146,6 +146,16 @@ def parse_args(): ...@@ -146,6 +146,16 @@ def parse_args():
"--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`."
) )
parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--trust_remote_code",
type=bool,
default=False,
help=(
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
),
)
parser.add_argument( parser.add_argument(
"--checkpointing_steps", "--checkpointing_steps",
type=str, type=str,
...@@ -300,13 +310,18 @@ def main(): ...@@ -300,13 +310,18 @@ def main():
i2label=id2label, i2label=id2label,
label2id=label2id, label2id=label2id,
finetuning_task="image-classification", finetuning_task="image-classification",
trust_remote_code=args.trust_remote_code,
)
image_processor = AutoImageProcessor.from_pretrained(
args.model_name_or_path,
trust_remote_code=args.trust_remote_code,
) )
image_processor = AutoImageProcessor.from_pretrained(args.model_name_or_path)
model = AutoModelForImageClassification.from_pretrained( model = AutoModelForImageClassification.from_pretrained(
args.model_name_or_path, args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path), from_tf=bool(".ckpt" in args.model_name_or_path),
config=config, config=config,
ignore_mismatched_sizes=args.ignore_mismatched_sizes, ignore_mismatched_sizes=args.ignore_mismatched_sizes,
trust_remote_code=args.trust_remote_code,
) )
# Preprocessing the datasets # Preprocessing the datasets
......
...@@ -169,6 +169,16 @@ class ModelArguments: ...@@ -169,6 +169,16 @@ class ModelArguments:
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`." "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`."
}, },
) )
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
)
},
)
image_size: Optional[int] = field( image_size: Optional[int] = field(
default=None, default=None,
metadata={ metadata={
...@@ -319,6 +329,7 @@ def main(): ...@@ -319,6 +329,7 @@ def main():
"cache_dir": model_args.cache_dir, "cache_dir": model_args.cache_dir,
"revision": model_args.model_revision, "revision": model_args.model_revision,
"token": model_args.token, "token": model_args.token,
"trust_remote_code": model_args.trust_remote_code,
} }
if model_args.config_name_or_path: if model_args.config_name_or_path:
config = AutoConfig.from_pretrained(model_args.config_name_or_path, **config_kwargs) config = AutoConfig.from_pretrained(model_args.config_name_or_path, **config_kwargs)
...@@ -371,10 +382,11 @@ def main(): ...@@ -371,10 +382,11 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
else: else:
logger.info("Training new model from scratch") logger.info("Training new model from scratch")
model = AutoModelForMaskedImageModeling.from_config(config) model = AutoModelForMaskedImageModeling.from_config(config, trust_remote_code=model_args.trust_remote_code)
if training_args.do_train: if training_args.do_train:
column_names = ds["train"].column_names column_names = ds["train"].column_names
......
...@@ -195,6 +195,16 @@ def parse_args(): ...@@ -195,6 +195,16 @@ def parse_args():
"with private models)." "with private models)."
), ),
) )
parser.add_argument(
"--trust_remote_code",
type=bool,
default=False,
help=(
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
),
)
parser.add_argument( parser.add_argument(
"--image_size", "--image_size",
type=int, type=int,
...@@ -448,6 +458,7 @@ def main(): ...@@ -448,6 +458,7 @@ def main():
"cache_dir": args.cache_dir, "cache_dir": args.cache_dir,
"revision": args.model_revision, "revision": args.model_revision,
"use_auth_token": True if args.use_auth_token else None, "use_auth_token": True if args.use_auth_token else None,
"trust_remote_code": args.trust_remote_code,
} }
if args.config_name_or_path: if args.config_name_or_path:
config = AutoConfig.from_pretrained(args.config_name_or_path, **config_kwargs) config = AutoConfig.from_pretrained(args.config_name_or_path, **config_kwargs)
...@@ -498,10 +509,14 @@ def main(): ...@@ -498,10 +509,14 @@ def main():
cache_dir=args.cache_dir, cache_dir=args.cache_dir,
revision=args.model_revision, revision=args.model_revision,
token=True if args.use_auth_token else None, token=True if args.use_auth_token else None,
trust_remote_code=args.trust_remote_code,
) )
else: else:
logger.info("Training new model from scratch") logger.info("Training new model from scratch")
model = AutoModelForMaskedImageModeling.from_config(config) model = AutoModelForMaskedImageModeling.from_config(
config,
trust_remote_code=args.trust_remote_code,
)
column_names = ds["train"].column_names column_names = ds["train"].column_names
......
...@@ -127,6 +127,16 @@ class ModelArguments: ...@@ -127,6 +127,16 @@ class ModelArguments:
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`." "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`."
}, },
) )
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
)
},
)
torch_dtype: Optional[str] = field( torch_dtype: Optional[str] = field(
default=None, default=None,
metadata={ metadata={
...@@ -387,6 +397,7 @@ def main(): ...@@ -387,6 +397,7 @@ def main():
"cache_dir": model_args.cache_dir, "cache_dir": model_args.cache_dir,
"revision": model_args.model_revision, "revision": model_args.model_revision,
"token": model_args.token, "token": model_args.token,
"trust_remote_code": model_args.trust_remote_code,
} }
if model_args.config_name: if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs) config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
...@@ -405,6 +416,7 @@ def main(): ...@@ -405,6 +416,7 @@ def main():
"use_fast": model_args.use_fast_tokenizer, "use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision, "revision": model_args.model_revision,
"token": model_args.token, "token": model_args.token,
"trust_remote_code": model_args.trust_remote_code,
} }
if model_args.tokenizer_name: if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs) tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
...@@ -429,11 +441,12 @@ def main(): ...@@ -429,11 +441,12 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
torch_dtype=torch_dtype, torch_dtype=torch_dtype,
low_cpu_mem_usage=model_args.low_cpu_mem_usage, low_cpu_mem_usage=model_args.low_cpu_mem_usage,
) )
else: else:
model = AutoModelForCausalLM.from_config(config) model = AutoModelForCausalLM.from_config(config, trust_remote_code=model_args.trust_remote_code)
n_params = sum({p.data_ptr(): p.numel() for p in model.parameters()}.values()) n_params = sum({p.data_ptr(): p.numel() for p in model.parameters()}.values())
logger.info(f"Training new model from scratch - Total size={n_params/2**20:.2f}M params") logger.info(f"Training new model from scratch - Total size={n_params/2**20:.2f}M params")
......
...@@ -193,6 +193,16 @@ def parse_args(): ...@@ -193,6 +193,16 @@ def parse_args():
"--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`."
) )
parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--trust_remote_code",
type=bool,
default=False,
help=(
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
),
)
parser.add_argument( parser.add_argument(
"--checkpointing_steps", "--checkpointing_steps",
type=str, type=str,
...@@ -362,17 +372,27 @@ def main(): ...@@ -362,17 +372,27 @@ def main():
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab. # download model & vocab.
if args.config_name: if args.config_name:
config = AutoConfig.from_pretrained(args.config_name) config = AutoConfig.from_pretrained(
args.config_name,
trust_remote_code=args.trust_remote_code,
)
elif args.model_name_or_path: elif args.model_name_or_path:
config = AutoConfig.from_pretrained(args.model_name_or_path) config = AutoConfig.from_pretrained(
args.model_name_or_path,
trust_remote_code=args.trust_remote_code,
)
else: else:
config = CONFIG_MAPPING[args.model_type]() config = CONFIG_MAPPING[args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.") logger.warning("You are instantiating a new config instance from scratch.")
if args.tokenizer_name: if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=not args.use_slow_tokenizer) tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name, use_fast=not args.use_slow_tokenizer, trust_remote_code=args.trust_remote_code
)
elif args.model_name_or_path: elif args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer) tokenizer = AutoTokenizer.from_pretrained(
args.model_name_or_path, use_fast=not args.use_slow_tokenizer, trust_remote_code=args.trust_remote_code
)
else: else:
raise ValueError( raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script." "You are instantiating a new tokenizer from scratch. This is not supported by this script."
...@@ -385,10 +405,11 @@ def main(): ...@@ -385,10 +405,11 @@ def main():
from_tf=bool(".ckpt" in args.model_name_or_path), from_tf=bool(".ckpt" in args.model_name_or_path),
config=config, config=config,
low_cpu_mem_usage=args.low_cpu_mem_usage, low_cpu_mem_usage=args.low_cpu_mem_usage,
trust_remote_code=args.trust_remote_code,
) )
else: else:
logger.info("Training new model from scratch") logger.info("Training new model from scratch")
model = AutoModelForCausalLM.from_config(config) model = AutoModelForCausalLM.from_config(config, trust_remote_code=args.trust_remote_code)
# We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch
# on a small vocab and want a smaller embedding size, remove this test. # on a small vocab and want a smaller embedding size, remove this test.
......
...@@ -123,6 +123,16 @@ class ModelArguments: ...@@ -123,6 +123,16 @@ class ModelArguments:
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`." "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`."
}, },
) )
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
)
},
)
low_cpu_mem_usage: bool = field( low_cpu_mem_usage: bool = field(
default=False, default=False,
metadata={ metadata={
...@@ -380,6 +390,7 @@ def main(): ...@@ -380,6 +390,7 @@ def main():
"cache_dir": model_args.cache_dir, "cache_dir": model_args.cache_dir,
"revision": model_args.model_revision, "revision": model_args.model_revision,
"token": model_args.token, "token": model_args.token,
"trust_remote_code": model_args.trust_remote_code,
} }
if model_args.config_name: if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs) config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
...@@ -398,6 +409,7 @@ def main(): ...@@ -398,6 +409,7 @@ def main():
"use_fast": model_args.use_fast_tokenizer, "use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision, "revision": model_args.model_revision,
"token": model_args.token, "token": model_args.token,
"trust_remote_code": model_args.trust_remote_code,
} }
if model_args.tokenizer_name: if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs) tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
...@@ -417,11 +429,12 @@ def main(): ...@@ -417,11 +429,12 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
low_cpu_mem_usage=model_args.low_cpu_mem_usage, low_cpu_mem_usage=model_args.low_cpu_mem_usage,
) )
else: else:
logger.info("Training new model from scratch") logger.info("Training new model from scratch")
model = AutoModelForMaskedLM.from_config(config) model = AutoModelForMaskedLM.from_config(config, trust_remote_code=model_args.trust_remote_code)
# We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch
# on a small vocab and want a smaller embedding size, remove this test. # on a small vocab and want a smaller embedding size, remove this test.
......
...@@ -200,6 +200,16 @@ def parse_args(): ...@@ -200,6 +200,16 @@ def parse_args():
"--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`."
) )
parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--trust_remote_code",
type=bool,
default=False,
help=(
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
),
)
parser.add_argument( parser.add_argument(
"--checkpointing_steps", "--checkpointing_steps",
type=str, type=str,
...@@ -367,17 +377,21 @@ def main(): ...@@ -367,17 +377,21 @@ def main():
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab. # download model & vocab.
if args.config_name: if args.config_name:
config = AutoConfig.from_pretrained(args.config_name) config = AutoConfig.from_pretrained(args.config_name, trust_remote_code=args.trust_remote_code)
elif args.model_name_or_path: elif args.model_name_or_path:
config = AutoConfig.from_pretrained(args.model_name_or_path) config = AutoConfig.from_pretrained(args.model_name_or_path, trust_remote_code=args.trust_remote_code)
else: else:
config = CONFIG_MAPPING[args.model_type]() config = CONFIG_MAPPING[args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.") logger.warning("You are instantiating a new config instance from scratch.")
if args.tokenizer_name: if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=not args.use_slow_tokenizer) tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name, use_fast=not args.use_slow_tokenizer, trust_remote_code=args.trust_remote_code
)
elif args.model_name_or_path: elif args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer) tokenizer = AutoTokenizer.from_pretrained(
args.model_name_or_path, use_fast=not args.use_slow_tokenizer, trust_remote_code=args.trust_remote_code
)
else: else:
raise ValueError( raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script." "You are instantiating a new tokenizer from scratch. This is not supported by this script."
...@@ -390,10 +404,11 @@ def main(): ...@@ -390,10 +404,11 @@ def main():
from_tf=bool(".ckpt" in args.model_name_or_path), from_tf=bool(".ckpt" in args.model_name_or_path),
config=config, config=config,
low_cpu_mem_usage=args.low_cpu_mem_usage, low_cpu_mem_usage=args.low_cpu_mem_usage,
trust_remote_code=args.trust_remote_code,
) )
else: else:
logger.info("Training new model from scratch") logger.info("Training new model from scratch")
model = AutoModelForMaskedLM.from_config(config) model = AutoModelForMaskedLM.from_config(config, trust_remote_code=args.trust_remote_code)
# We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch
# on a small vocab and want a smaller embedding size, remove this test. # on a small vocab and want a smaller embedding size, remove this test.
......
...@@ -95,6 +95,16 @@ class ModelArguments: ...@@ -95,6 +95,16 @@ class ModelArguments:
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`." "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`."
}, },
) )
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
)
},
)
@dataclass @dataclass
...@@ -328,6 +338,7 @@ def main(): ...@@ -328,6 +338,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
...@@ -335,6 +346,7 @@ def main(): ...@@ -335,6 +346,7 @@ def main():
use_fast=model_args.use_fast_tokenizer, use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
model = AutoModelForMultipleChoice.from_pretrained( model = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
...@@ -343,6 +355,7 @@ def main(): ...@@ -343,6 +355,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
# When using your own dataset or a different dataset from swag, you will probably need to change this. # When using your own dataset or a different dataset from swag, you will probably need to change this.
......
...@@ -182,6 +182,16 @@ def parse_args(): ...@@ -182,6 +182,16 @@ def parse_args():
"--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`."
) )
parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--trust_remote_code",
type=bool,
default=False,
help=(
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
),
)
parser.add_argument( parser.add_argument(
"--checkpointing_steps", "--checkpointing_steps",
type=str, type=str,
...@@ -374,17 +384,21 @@ def main(): ...@@ -374,17 +384,21 @@ def main():
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab. # download model & vocab.
if args.config_name: if args.config_name:
config = AutoConfig.from_pretrained(args.model_name_or_path) config = AutoConfig.from_pretrained(args.model_name_or_path, trust_remote_code=args.trust_remote_code)
elif args.model_name_or_path: elif args.model_name_or_path:
config = AutoConfig.from_pretrained(args.model_name_or_path) config = AutoConfig.from_pretrained(args.model_name_or_path, trust_remote_code=args.trust_remote_code)
else: else:
config = CONFIG_MAPPING[args.model_type]() config = CONFIG_MAPPING[args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.") logger.warning("You are instantiating a new config instance from scratch.")
if args.tokenizer_name: if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=not args.use_slow_tokenizer) tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name, use_fast=not args.use_slow_tokenizer, trust_remote_code=args.trust_remote_code
)
elif args.model_name_or_path: elif args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer) tokenizer = AutoTokenizer.from_pretrained(
args.model_name_or_path, use_fast=not args.use_slow_tokenizer, trust_remote_code=args.trust_remote_code
)
else: else:
raise ValueError( raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script." "You are instantiating a new tokenizer from scratch. This is not supported by this script."
...@@ -396,10 +410,11 @@ def main(): ...@@ -396,10 +410,11 @@ def main():
args.model_name_or_path, args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path), from_tf=bool(".ckpt" in args.model_name_or_path),
config=config, config=config,
trust_remote_code=args.trust_remote_code,
) )
else: else:
logger.info("Training new model from scratch") logger.info("Training new model from scratch")
model = AutoModelForMultipleChoice.from_config(config) model = AutoModelForMultipleChoice.from_config(config, trust_remote_code=args.trust_remote_code)
# We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch
# on a small vocab and want a smaller embedding size, remove this test. # on a small vocab and want a smaller embedding size, remove this test.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment