Unverified Commit 14510938 authored by Jackmin801's avatar Jackmin801 Committed by GitHub
Browse files

Allow `trust_remote_code` in example scripts (#25248)

* pytorch examples

* pytorch mim no trainer

* cookiecutter

* flax examples

* missed line in pytorch run_glue

* tensorflow examples

* tensorflow run_clip

* tensorflow run_mlm

* tensorflow run_ner

* tensorflow run_clm

* pytorch example from_configs

* pytorch no trainer examples

* Revert "tensorflow run_clip"

This reverts commit 261f86ac1f1c9e05dd3fd0291e1a1f8e573781d5.

* fix: duplicated argument
parent 65001cb1
...@@ -95,6 +95,16 @@ class ModelArguments: ...@@ -95,6 +95,16 @@ class ModelArguments:
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`." "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`."
}, },
) )
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
)
},
)
@dataclass @dataclass
...@@ -336,6 +346,7 @@ def main(): ...@@ -336,6 +346,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
...@@ -343,6 +354,7 @@ def main(): ...@@ -343,6 +354,7 @@ def main():
use_fast=True, use_fast=True,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
model = AutoModelForQuestionAnswering.from_pretrained( model = AutoModelForQuestionAnswering.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
...@@ -351,6 +363,7 @@ def main(): ...@@ -351,6 +363,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
# Tokenizer check: this script requires a fast tokenizer. # Tokenizer check: this script requires a fast tokenizer.
......
...@@ -273,6 +273,16 @@ def parse_args(): ...@@ -273,6 +273,16 @@ def parse_args():
"--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`."
) )
parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--trust_remote_code",
type=bool,
default=False,
help=(
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
),
)
parser.add_argument( parser.add_argument(
"--checkpointing_steps", "--checkpointing_steps",
type=str, type=str,
...@@ -415,17 +425,21 @@ def main(): ...@@ -415,17 +425,21 @@ def main():
# download model & vocab. # download model & vocab.
if args.config_name: if args.config_name:
config = AutoConfig.from_pretrained(args.config_name) config = AutoConfig.from_pretrained(args.config_name, trust_remote_code=args.trust_remote_code)
elif args.model_name_or_path: elif args.model_name_or_path:
config = AutoConfig.from_pretrained(args.model_name_or_path) config = AutoConfig.from_pretrained(args.model_name_or_path, trust_remote_code=args.trust_remote_code)
else: else:
config = CONFIG_MAPPING[args.model_type]() config = CONFIG_MAPPING[args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.") logger.warning("You are instantiating a new config instance from scratch.")
if args.tokenizer_name: if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name, use_fast=True, trust_remote_code=args.trust_remote_code
)
elif args.model_name_or_path: elif args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=True) tokenizer = AutoTokenizer.from_pretrained(
args.model_name_or_path, use_fast=True, trust_remote_code=args.trust_remote_code
)
else: else:
raise ValueError( raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script." "You are instantiating a new tokenizer from scratch. This is not supported by this script."
...@@ -437,10 +451,11 @@ def main(): ...@@ -437,10 +451,11 @@ def main():
args.model_name_or_path, args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path), from_tf=bool(".ckpt" in args.model_name_or_path),
config=config, config=config,
trust_remote_code=args.trust_remote_code,
) )
else: else:
logger.info("Training new model from scratch") logger.info("Training new model from scratch")
model = AutoModelForQuestionAnswering.from_config(config) model = AutoModelForQuestionAnswering.from_config(config, trust_remote_code=args.trust_remote_code)
# Preprocessing the datasets. # Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation. # Preprocessing is slighlty different for training and evaluation.
......
...@@ -96,6 +96,16 @@ class ModelArguments: ...@@ -96,6 +96,16 @@ class ModelArguments:
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`." "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`."
}, },
) )
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
)
},
)
@dataclass @dataclass
...@@ -381,6 +391,7 @@ def main(): ...@@ -381,6 +391,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
...@@ -388,6 +399,7 @@ def main(): ...@@ -388,6 +399,7 @@ def main():
use_fast=model_args.use_fast_tokenizer, use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
model = AutoModelForSeq2SeqLM.from_pretrained( model = AutoModelForSeq2SeqLM.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
...@@ -396,6 +408,7 @@ def main(): ...@@ -396,6 +408,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
# We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch
......
...@@ -257,6 +257,16 @@ class ModelArguments: ...@@ -257,6 +257,16 @@ class ModelArguments:
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`." "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`."
}, },
) )
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
)
},
)
def main(): def main():
...@@ -393,6 +403,7 @@ def main(): ...@@ -393,6 +403,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
model = AutoModelForSemanticSegmentation.from_pretrained( model = AutoModelForSemanticSegmentation.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
...@@ -401,12 +412,14 @@ def main(): ...@@ -401,12 +412,14 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
image_processor = AutoImageProcessor.from_pretrained( image_processor = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path, model_args.image_processor_name or model_args.model_name_or_path,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
# Define torchvision transforms to be applied to each image + target. # Define torchvision transforms to be applied to each image + target.
......
...@@ -273,6 +273,16 @@ def parse_args(): ...@@ -273,6 +273,16 @@ def parse_args():
"--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`."
) )
parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--trust_remote_code",
type=bool,
default=False,
help=(
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
),
)
parser.add_argument( parser.add_argument(
"--checkpointing_steps", "--checkpointing_steps",
type=str, type=str,
...@@ -400,9 +410,15 @@ def main(): ...@@ -400,9 +410,15 @@ def main():
label2id = {v: k for k, v in id2label.items()} label2id = {v: k for k, v in id2label.items()}
# Load pretrained model and image processor # Load pretrained model and image processor
config = AutoConfig.from_pretrained(args.model_name_or_path, id2label=id2label, label2id=label2id) config = AutoConfig.from_pretrained(
image_processor = AutoImageProcessor.from_pretrained(args.model_name_or_path) args.model_name_or_path, id2label=id2label, label2id=label2id, trust_remote_code=args.trust_remote_code
model = AutoModelForSemanticSegmentation.from_pretrained(args.model_name_or_path, config=config) )
image_processor = AutoImageProcessor.from_pretrained(
args.model_name_or_path, trust_remote_code=args.trust_remote_code
)
model = AutoModelForSemanticSegmentation.from_pretrained(
args.model_name_or_path, config=config, trust_remote_code=args.trust_remote_code
)
# Preprocessing the datasets # Preprocessing the datasets
# Define torchvision transforms to be applied to each image + target. # Define torchvision transforms to be applied to each image + target.
......
...@@ -244,6 +244,16 @@ class DataTrainingArguments: ...@@ -244,6 +244,16 @@ class DataTrainingArguments:
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`." "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`."
}, },
) )
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
)
},
)
unk_token: str = field( unk_token: str = field(
default="[UNK]", default="[UNK]",
metadata={"help": "The unk token for the tokenizer"}, metadata={"help": "The unk token for the tokenizer"},
...@@ -505,6 +515,7 @@ def main(): ...@@ -505,6 +515,7 @@ def main():
model_args.model_name_or_path, model_args.model_name_or_path,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
token=data_args.token, token=data_args.token,
trust_remote_code=data_args.trust_remote_code,
) )
# 4. Next, if no tokenizer file is defined, # 4. Next, if no tokenizer file is defined,
...@@ -561,12 +572,14 @@ def main(): ...@@ -561,12 +572,14 @@ def main():
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
tokenizer_name_or_path, tokenizer_name_or_path,
token=data_args.token, token=data_args.token,
trust_remote_code=data_args.trust_remote_code,
**tokenizer_kwargs, **tokenizer_kwargs,
) )
feature_extractor = AutoFeatureExtractor.from_pretrained( feature_extractor = AutoFeatureExtractor.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
token=data_args.token, token=data_args.token,
trust_remote_code=data_args.trust_remote_code,
) )
# adapt config # adapt config
...@@ -595,6 +608,7 @@ def main(): ...@@ -595,6 +608,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
config=config, config=config,
token=data_args.token, token=data_args.token,
trust_remote_code=data_args.trust_remote_code,
) )
# freeze encoder # freeze encoder
......
...@@ -247,6 +247,16 @@ class DataTrainingArguments: ...@@ -247,6 +247,16 @@ class DataTrainingArguments:
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`." "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`."
}, },
) )
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
)
},
)
unk_token: str = field( unk_token: str = field(
default="[UNK]", default="[UNK]",
metadata={"help": "The unk token for the tokenizer"}, metadata={"help": "The unk token for the tokenizer"},
...@@ -501,6 +511,7 @@ def main(): ...@@ -501,6 +511,7 @@ def main():
model_args.model_name_or_path, model_args.model_name_or_path,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
token=data_args.token, token=data_args.token,
trust_remote_code=data_args.trust_remote_code,
) )
# 4. Next, if no tokenizer file is defined, # 4. Next, if no tokenizer file is defined,
...@@ -517,6 +528,7 @@ def main(): ...@@ -517,6 +528,7 @@ def main():
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
tokenizer_name_or_path, tokenizer_name_or_path,
token=data_args.token, token=data_args.token,
trust_remote_code=data_args.trust_remote_code,
) )
vocab_dict = tokenizer.vocab.copy() vocab_dict = tokenizer.vocab.copy()
if tokenizer.target_lang is None: if tokenizer.target_lang is None:
...@@ -584,12 +596,14 @@ def main(): ...@@ -584,12 +596,14 @@ def main():
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
tokenizer_name_or_path, tokenizer_name_or_path,
token=data_args.token, token=data_args.token,
trust_remote_code=data_args.trust_remote_code,
**tokenizer_kwargs, **tokenizer_kwargs,
) )
feature_extractor = AutoFeatureExtractor.from_pretrained( feature_extractor = AutoFeatureExtractor.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
token=data_args.token, token=data_args.token,
trust_remote_code=data_args.trust_remote_code,
) )
# adapt config # adapt config
...@@ -615,6 +629,7 @@ def main(): ...@@ -615,6 +629,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
config=config, config=config,
token=data_args.token, token=data_args.token,
trust_remote_code=data_args.trust_remote_code,
ignore_mismatched_sizes=True, ignore_mismatched_sizes=True,
) )
......
...@@ -101,6 +101,16 @@ class ModelArguments: ...@@ -101,6 +101,16 @@ class ModelArguments:
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`." "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`."
}, },
) )
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
)
},
)
freeze_feature_encoder: bool = field( freeze_feature_encoder: bool = field(
default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."} default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."}
) )
...@@ -384,6 +394,7 @@ def main(): ...@@ -384,6 +394,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
config.update({"forced_decoder_ids": model_args.forced_decoder_ids, "suppress_tokens": model_args.suppress_tokens}) config.update({"forced_decoder_ids": model_args.forced_decoder_ids, "suppress_tokens": model_args.suppress_tokens})
...@@ -397,6 +408,7 @@ def main(): ...@@ -397,6 +408,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
...@@ -404,6 +416,7 @@ def main(): ...@@ -404,6 +416,7 @@ def main():
use_fast=model_args.use_fast_tokenizer, use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
model = AutoModelForSpeechSeq2Seq.from_pretrained( model = AutoModelForSpeechSeq2Seq.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
...@@ -411,6 +424,7 @@ def main(): ...@@ -411,6 +424,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
if model.config.decoder_start_token_id is None: if model.config.decoder_start_token_id is None:
......
...@@ -115,6 +115,16 @@ class ModelArguments: ...@@ -115,6 +115,16 @@ class ModelArguments:
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`." "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`."
}, },
) )
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
)
},
)
resize_position_embeddings: Optional[bool] = field( resize_position_embeddings: Optional[bool] = field(
default=None, default=None,
metadata={ metadata={
...@@ -431,6 +441,7 @@ def main(): ...@@ -431,6 +441,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
...@@ -438,6 +449,7 @@ def main(): ...@@ -438,6 +449,7 @@ def main():
use_fast=model_args.use_fast_tokenizer, use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
model = AutoModelForSeq2SeqLM.from_pretrained( model = AutoModelForSeq2SeqLM.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
...@@ -446,6 +458,7 @@ def main(): ...@@ -446,6 +458,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
# We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch
......
...@@ -266,6 +266,16 @@ def parse_args(): ...@@ -266,6 +266,16 @@ def parse_args():
"--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`."
) )
parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--trust_remote_code",
type=bool,
default=False,
help=(
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
),
)
parser.add_argument( parser.add_argument(
"--checkpointing_steps", "--checkpointing_steps",
type=str, type=str,
...@@ -406,17 +416,21 @@ def main(): ...@@ -406,17 +416,21 @@ def main():
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab. # download model & vocab.
if args.config_name: if args.config_name:
config = AutoConfig.from_pretrained(args.config_name) config = AutoConfig.from_pretrained(args.config_name, trust_remote_code=args.trust_remote_code)
elif args.model_name_or_path: elif args.model_name_or_path:
config = AutoConfig.from_pretrained(args.model_name_or_path) config = AutoConfig.from_pretrained(args.model_name_or_path, trust_remote_code=args.trust_remote_code)
else: else:
config = CONFIG_MAPPING[args.model_type]() config = CONFIG_MAPPING[args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.") logger.warning("You are instantiating a new config instance from scratch.")
if args.tokenizer_name: if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=not args.use_slow_tokenizer) tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name, use_fast=not args.use_slow_tokenizer, trust_remote_code=args.trust_remote_code
)
elif args.model_name_or_path: elif args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer) tokenizer = AutoTokenizer.from_pretrained(
args.model_name_or_path, use_fast=not args.use_slow_tokenizer, trust_remote_code=args.trust_remote_code
)
else: else:
raise ValueError( raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script." "You are instantiating a new tokenizer from scratch. This is not supported by this script."
...@@ -428,10 +442,11 @@ def main(): ...@@ -428,10 +442,11 @@ def main():
args.model_name_or_path, args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path), from_tf=bool(".ckpt" in args.model_name_or_path),
config=config, config=config,
trust_remote_code=args.trust_remote_code,
) )
else: else:
logger.info("Training new model from scratch") logger.info("Training new model from scratch")
model = AutoModelForSeq2SeqLM.from_config(config) model = AutoModelForSeq2SeqLM.from_config(config, trust_remote_code=args.trust_remote_code)
# We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch
# on a small vocab and want a smaller embedding size, remove this test. # on a small vocab and want a smaller embedding size, remove this test.
......
...@@ -243,6 +243,16 @@ class ModelArguments: ...@@ -243,6 +243,16 @@ class ModelArguments:
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`." "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`."
}, },
) )
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
)
},
)
ignore_mismatched_sizes: bool = field( ignore_mismatched_sizes: bool = field(
default=False, default=False,
metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."}, metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."},
...@@ -482,6 +492,7 @@ def main(): ...@@ -482,6 +492,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
if is_regression: if is_regression:
...@@ -500,6 +511,7 @@ def main(): ...@@ -500,6 +511,7 @@ def main():
use_fast=model_args.use_fast_tokenizer, use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
model = AutoModelForSequenceClassification.from_pretrained( model = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
...@@ -508,6 +520,7 @@ def main(): ...@@ -508,6 +520,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes,
) )
......
...@@ -204,6 +204,16 @@ class ModelArguments: ...@@ -204,6 +204,16 @@ class ModelArguments:
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`." "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`."
}, },
) )
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
)
},
)
ignore_mismatched_sizes: bool = field( ignore_mismatched_sizes: bool = field(
default=False, default=False,
metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."}, metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."},
...@@ -375,6 +385,7 @@ def main(): ...@@ -375,6 +385,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
...@@ -382,6 +393,7 @@ def main(): ...@@ -382,6 +393,7 @@ def main():
use_fast=model_args.use_fast_tokenizer, use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
model = AutoModelForSequenceClassification.from_pretrained( model = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
...@@ -390,6 +402,7 @@ def main(): ...@@ -390,6 +402,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes,
) )
......
...@@ -156,6 +156,16 @@ def parse_args(): ...@@ -156,6 +156,16 @@ def parse_args():
"--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`."
) )
parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--trust_remote_code",
type=bool,
default=False,
help=(
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
),
)
parser.add_argument( parser.add_argument(
"--checkpointing_steps", "--checkpointing_steps",
type=str, type=str,
...@@ -309,13 +319,21 @@ def main(): ...@@ -309,13 +319,21 @@ def main():
# #
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab. # download model & vocab.
config = AutoConfig.from_pretrained(args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name) config = AutoConfig.from_pretrained(
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer) args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
trust_remote_code=args.trust_remote_code,
)
tokenizer = AutoTokenizer.from_pretrained(
args.model_name_or_path, use_fast=not args.use_slow_tokenizer, trust_remote_code=args.trust_remote_code
)
model = AutoModelForSequenceClassification.from_pretrained( model = AutoModelForSequenceClassification.from_pretrained(
args.model_name_or_path, args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path), from_tf=bool(".ckpt" in args.model_name_or_path),
config=config, config=config,
ignore_mismatched_sizes=args.ignore_mismatched_sizes, ignore_mismatched_sizes=args.ignore_mismatched_sizes,
trust_remote_code=args.trust_remote_code,
) )
# Preprocessing the datasets # Preprocessing the datasets
......
...@@ -168,6 +168,16 @@ class ModelArguments: ...@@ -168,6 +168,16 @@ class ModelArguments:
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`." "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`."
}, },
) )
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
)
},
)
ignore_mismatched_sizes: bool = field( ignore_mismatched_sizes: bool = field(
default=False, default=False,
metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."}, metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."},
...@@ -292,6 +302,7 @@ def main(): ...@@ -292,6 +302,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
...@@ -300,6 +311,7 @@ def main(): ...@@ -300,6 +311,7 @@ def main():
use_fast=model_args.use_fast_tokenizer, use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
model = AutoModelForSequenceClassification.from_pretrained( model = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
...@@ -308,6 +320,7 @@ def main(): ...@@ -308,6 +320,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes,
) )
......
...@@ -95,6 +95,16 @@ class ModelArguments: ...@@ -95,6 +95,16 @@ class ModelArguments:
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`." "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`."
}, },
) )
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
)
},
)
ignore_mismatched_sizes: bool = field( ignore_mismatched_sizes: bool = field(
default=False, default=False,
metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."}, metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."},
...@@ -362,6 +372,7 @@ def main(): ...@@ -362,6 +372,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
tokenizer_name_or_path = model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path tokenizer_name_or_path = model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path
...@@ -372,6 +383,7 @@ def main(): ...@@ -372,6 +383,7 @@ def main():
use_fast=True, use_fast=True,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
add_prefix_space=True, add_prefix_space=True,
) )
else: else:
...@@ -381,6 +393,7 @@ def main(): ...@@ -381,6 +393,7 @@ def main():
use_fast=True, use_fast=True,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
model = AutoModelForTokenClassification.from_pretrained( model = AutoModelForTokenClassification.from_pretrained(
...@@ -390,6 +403,7 @@ def main(): ...@@ -390,6 +403,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes,
) )
......
...@@ -210,6 +210,16 @@ def parse_args(): ...@@ -210,6 +210,16 @@ def parse_args():
"--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`."
) )
parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--trust_remote_code",
type=bool,
default=False,
help=(
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
),
)
parser.add_argument( parser.add_argument(
"--checkpointing_steps", "--checkpointing_steps",
type=str, type=str,
...@@ -388,9 +398,13 @@ def main(): ...@@ -388,9 +398,13 @@ def main():
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab. # download model & vocab.
if args.config_name: if args.config_name:
config = AutoConfig.from_pretrained(args.config_name, num_labels=num_labels) config = AutoConfig.from_pretrained(
args.config_name, num_labels=num_labels, trust_remote_code=args.trust_remote_code
)
elif args.model_name_or_path: elif args.model_name_or_path:
config = AutoConfig.from_pretrained(args.model_name_or_path, num_labels=num_labels) config = AutoConfig.from_pretrained(
args.model_name_or_path, num_labels=num_labels, trust_remote_code=args.trust_remote_code
)
else: else:
config = CONFIG_MAPPING[args.model_type]() config = CONFIG_MAPPING[args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.") logger.warning("You are instantiating a new config instance from scratch.")
...@@ -403,9 +417,13 @@ def main(): ...@@ -403,9 +417,13 @@ def main():
) )
if config.model_type in {"bloom", "gpt2", "roberta"}: if config.model_type in {"bloom", "gpt2", "roberta"}:
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name_or_path, use_fast=True, add_prefix_space=True) tokenizer = AutoTokenizer.from_pretrained(
tokenizer_name_or_path, use_fast=True, add_prefix_space=True, trust_remote_code=args.trust_remote_code
)
else: else:
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name_or_path, use_fast=True) tokenizer = AutoTokenizer.from_pretrained(
tokenizer_name_or_path, use_fast=True, trust_remote_code=args.trust_remote_code
)
if args.model_name_or_path: if args.model_name_or_path:
model = AutoModelForTokenClassification.from_pretrained( model = AutoModelForTokenClassification.from_pretrained(
...@@ -413,10 +431,11 @@ def main(): ...@@ -413,10 +431,11 @@ def main():
from_tf=bool(".ckpt" in args.model_name_or_path), from_tf=bool(".ckpt" in args.model_name_or_path),
config=config, config=config,
ignore_mismatched_sizes=args.ignore_mismatched_sizes, ignore_mismatched_sizes=args.ignore_mismatched_sizes,
trust_remote_code=args.trust_remote_code,
) )
else: else:
logger.info("Training new model from scratch") logger.info("Training new model from scratch")
model = AutoModelForTokenClassification.from_config(config) model = AutoModelForTokenClassification.from_config(config, trust_remote_code=args.trust_remote_code)
# We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch
# on a small vocab and want a smaller embedding size, remove this test. # on a small vocab and want a smaller embedding size, remove this test.
......
...@@ -105,6 +105,16 @@ class ModelArguments: ...@@ -105,6 +105,16 @@ class ModelArguments:
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`." "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`."
}, },
) )
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
)
},
)
@dataclass @dataclass
...@@ -380,6 +390,7 @@ def main(): ...@@ -380,6 +390,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
...@@ -387,6 +398,7 @@ def main(): ...@@ -387,6 +398,7 @@ def main():
use_fast=model_args.use_fast_tokenizer, use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
model = AutoModelForSeq2SeqLM.from_pretrained( model = AutoModelForSeq2SeqLM.from_pretrained(
model_args.model_name_or_path, model_args.model_name_or_path,
...@@ -395,6 +407,7 @@ def main(): ...@@ -395,6 +407,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
# We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch
......
...@@ -257,6 +257,16 @@ def parse_args(): ...@@ -257,6 +257,16 @@ def parse_args():
"--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`."
) )
parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--trust_remote_code",
type=bool,
default=False,
help=(
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
),
)
parser.add_argument( parser.add_argument(
"--checkpointing_steps", "--checkpointing_steps",
type=str, type=str,
...@@ -386,17 +396,21 @@ def main(): ...@@ -386,17 +396,21 @@ def main():
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab. # download model & vocab.
if args.config_name: if args.config_name:
config = AutoConfig.from_pretrained(args.config_name) config = AutoConfig.from_pretrained(args.config_name, trust_remote_code=args.trust_remote_code)
elif args.model_name_or_path: elif args.model_name_or_path:
config = AutoConfig.from_pretrained(args.model_name_or_path) config = AutoConfig.from_pretrained(args.model_name_or_path, trust_remote_code=args.trust_remote_code)
else: else:
config = CONFIG_MAPPING[args.model_type]() config = CONFIG_MAPPING[args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.") logger.warning("You are instantiating a new config instance from scratch.")
if args.tokenizer_name: if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=not args.use_slow_tokenizer) tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name, use_fast=not args.use_slow_tokenizer, trust_remote_code=args.trust_remote_code
)
elif args.model_name_or_path: elif args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer) tokenizer = AutoTokenizer.from_pretrained(
args.model_name_or_path, use_fast=not args.use_slow_tokenizer, trust_remote_code=args.trust_remote_code
)
else: else:
raise ValueError( raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script." "You are instantiating a new tokenizer from scratch. This is not supported by this script."
...@@ -408,10 +422,11 @@ def main(): ...@@ -408,10 +422,11 @@ def main():
args.model_name_or_path, args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path), from_tf=bool(".ckpt" in args.model_name_or_path),
config=config, config=config,
trust_remote_code=args.trust_remote_code,
) )
else: else:
logger.info("Training new model from scratch") logger.info("Training new model from scratch")
model = AutoModelForSeq2SeqLM.from_config(config) model = AutoModelForSeq2SeqLM.from_config(config, trust_remote_code=args.trust_remote_code)
# We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch
# on a small vocab and want a smaller embedding size, remove this test. # on a small vocab and want a smaller embedding size, remove this test.
......
...@@ -173,6 +173,16 @@ class ModelArguments: ...@@ -173,6 +173,16 @@ class ModelArguments:
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`." "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`."
}, },
) )
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
)
},
)
ignore_mismatched_sizes: bool = field( ignore_mismatched_sizes: bool = field(
default=False, default=False,
metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."}, metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."},
...@@ -323,12 +333,14 @@ def main(): ...@@ -323,12 +333,14 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
image_processor = AutoImageProcessor.from_pretrained( image_processor = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path, model_args.image_processor_name or model_args.model_name_or_path,
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
) )
# If we don't have a validation split, split off a percentage of train as validation. # If we don't have a validation split, split off a percentage of train as validation.
...@@ -449,6 +461,7 @@ def main(): ...@@ -449,6 +461,7 @@ def main():
cache_dir=model_args.cache_dir, cache_dir=model_args.cache_dir,
revision=model_args.model_revision, revision=model_args.model_revision,
token=model_args.token, token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes,
) )
num_replicas = training_args.strategy.num_replicas_in_sync num_replicas = training_args.strategy.num_replicas_in_sync
......
...@@ -128,6 +128,16 @@ class ModelArguments: ...@@ -128,6 +128,16 @@ class ModelArguments:
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`." "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`."
}, },
) )
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
)
},
)
def __post_init__(self): def __post_init__(self):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
...@@ -366,17 +376,26 @@ def main(): ...@@ -366,17 +376,26 @@ def main():
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab. # download model & vocab.
if model_args.config_name: if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name) config = AutoConfig.from_pretrained(
model_args.config_name,
trust_remote_code=model_args.trust_remote_code,
)
elif model_args.model_name_or_path: elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path) config = AutoConfig.from_pretrained(
model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code
)
else: else:
config = CONFIG_MAPPING[model_args.model_type]() config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.") logger.warning("You are instantiating a new config instance from scratch.")
if model_args.tokenizer_name: if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name) tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name, trust_remote_code=model_args.trust_remote_code
)
elif model_args.model_name_or_path: elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path) tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code
)
else: else:
raise ValueError( raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script." "You are instantiating a new tokenizer from scratch. This is not supported by this script."
...@@ -479,12 +498,16 @@ def main(): ...@@ -479,12 +498,16 @@ def main():
with training_args.strategy.scope(): with training_args.strategy.scope():
# region Prepare model # region Prepare model
if checkpoint is not None: if checkpoint is not None:
model = TFAutoModelForCausalLM.from_pretrained(checkpoint, config=config) model = TFAutoModelForCausalLM.from_pretrained(
checkpoint, config=config, trust_remote_code=model_args.trust_remote_code
)
elif model_args.model_name_or_path: elif model_args.model_name_or_path:
model = TFAutoModelForCausalLM.from_pretrained(model_args.model_name_or_path, config=config) model = TFAutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path, config=config, trust_remote_code=model_args.trust_remote_code
)
else: else:
logger.info("Training new model from scratch") logger.info("Training new model from scratch")
model = TFAutoModelForCausalLM.from_config(config) model = TFAutoModelForCausalLM.from_config(config, trust_remote_code=model_args.trust_remote_code)
# We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch
# on a small vocab and want a smaller embedding size, remove this test. # on a small vocab and want a smaller embedding size, remove this test.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment