Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
40ea9ab2
Unverified
Commit
40ea9ab2
authored
Oct 12, 2023
by
Tom Aarsen
Committed by
GitHub
Oct 12, 2023
Browse files
Add many missing spaces in adjacent strings (#26751)
Add missing spaces in adjacent strings
parent
3bc65505
Changes
154
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
38 additions
and
38 deletions
+38
-38
examples/research_projects/mm-imdb/run_mmimdb.py
examples/research_projects/mm-imdb/run_mmimdb.py
+1
-1
examples/research_projects/movement-pruning/bertarize.py
examples/research_projects/movement-pruning/bertarize.py
+2
-2
examples/research_projects/movement-pruning/counts_parameters.py
...s/research_projects/movement-pruning/counts_parameters.py
+2
-2
examples/research_projects/movement-pruning/masked_run_glue.py
...les/research_projects/movement-pruning/masked_run_glue.py
+3
-3
examples/research_projects/movement-pruning/masked_run_squad.py
...es/research_projects/movement-pruning/masked_run_squad.py
+3
-3
examples/research_projects/performer/run_mlm_performer.py
examples/research_projects/performer/run_mlm_performer.py
+2
-2
examples/research_projects/pplm/run_pplm_discrim_train.py
examples/research_projects/pplm/run_pplm_discrim_train.py
+2
-2
examples/research_projects/quantization-qdqbert/evaluate-hf-trt-qa.py
...earch_projects/quantization-qdqbert/evaluate-hf-trt-qa.py
+2
-2
examples/research_projects/quantization-qdqbert/run_quant_qa.py
...es/research_projects/quantization-qdqbert/run_quant_qa.py
+1
-1
examples/research_projects/rag-end2end-retriever/finetune_rag.py
...s/research_projects/rag-end2end-retriever/finetune_rag.py
+2
-2
examples/research_projects/rag-end2end-retriever/lightning_base.py
...research_projects/rag-end2end-retriever/lightning_base.py
+1
-1
examples/research_projects/rag/finetune_rag.py
examples/research_projects/rag/finetune_rag.py
+2
-2
examples/research_projects/rag/lightning_base.py
examples/research_projects/rag/lightning_base.py
+1
-1
examples/research_projects/robust-speech-event/run_speech_recognition_ctc_bnb.py
...cts/robust-speech-event/run_speech_recognition_ctc_bnb.py
+3
-3
examples/research_projects/robust-speech-event/run_speech_recognition_ctc_streaming.py
...bust-speech-event/run_speech_recognition_ctc_streaming.py
+3
-3
examples/research_projects/seq2seq-distillation/lightning_base.py
.../research_projects/seq2seq-distillation/lightning_base.py
+1
-1
examples/research_projects/tapex/run_tabfact_with_tapex.py
examples/research_projects/tapex/run_tabfact_with_tapex.py
+1
-1
examples/research_projects/tapex/run_wikisql_with_tapex.py
examples/research_projects/tapex/run_wikisql_with_tapex.py
+2
-2
examples/research_projects/tapex/run_wikitablequestions_with_tapex.py
...earch_projects/tapex/run_wikitablequestions_with_tapex.py
+2
-2
examples/research_projects/wav2vec2/run_common_voice.py
examples/research_projects/wav2vec2/run_common_voice.py
+2
-2
No files found.
examples/research_projects/mm-imdb/run_mmimdb.py
View file @
40ea9ab2
...
@@ -426,7 +426,7 @@ def main():
...
@@ -426,7 +426,7 @@ def main():
type
=
str
,
type
=
str
,
default
=
"O1"
,
default
=
"O1"
,
help
=
(
help
=
(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].
"
"See details at https://nvidia.github.io/apex/amp.html"
"See details at https://nvidia.github.io/apex/amp.html"
),
),
)
)
...
...
examples/research_projects/movement-pruning/bertarize.py
View file @
40ea9ab2
...
@@ -112,8 +112,8 @@ if __name__ == "__main__":
...
@@ -112,8 +112,8 @@ if __name__ == "__main__":
type
=
float
,
type
=
float
,
required
=
False
,
required
=
False
,
help
=
(
help
=
(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.
"
"For `sigmoied_threshold`, it is the threshold
\t
au against which the (sigmoied) scores are compared."
"For `sigmoied_threshold`, it is the threshold
\t
au against which the (sigmoied) scores are compared.
"
"Not needed for `l0`"
"Not needed for `l0`"
),
),
)
)
...
...
examples/research_projects/movement-pruning/counts_parameters.py
View file @
40ea9ab2
...
@@ -79,8 +79,8 @@ if __name__ == "__main__":
...
@@ -79,8 +79,8 @@ if __name__ == "__main__":
type
=
float
,
type
=
float
,
required
=
False
,
required
=
False
,
help
=
(
help
=
(
"For `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `topK`, it is the level of remaining weights (in %) in the fine-pruned model.
"
"For `sigmoied_threshold`, it is the threshold
\t
au against which the (sigmoied) scores are compared."
"For `sigmoied_threshold`, it is the threshold
\t
au against which the (sigmoied) scores are compared.
"
"Not needed for `l0`"
"Not needed for `l0`"
),
),
)
)
...
...
examples/research_projects/movement-pruning/masked_run_glue.py
View file @
40ea9ab2
...
@@ -671,7 +671,7 @@ def main():
...
@@ -671,7 +671,7 @@ def main():
default
=
1
,
default
=
1
,
type
=
int
,
type
=
int
,
help
=
(
help
=
(
"Run `initial_warmup` * `warmup_steps` steps of threshold warmup during which threshold stays"
"Run `initial_warmup` * `warmup_steps` steps of threshold warmup during which threshold stays
"
"at its `initial_threshold` value (sparsity schedule)."
"at its `initial_threshold` value (sparsity schedule)."
),
),
)
)
...
@@ -680,7 +680,7 @@ def main():
...
@@ -680,7 +680,7 @@ def main():
default
=
2
,
default
=
2
,
type
=
int
,
type
=
int
,
help
=
(
help
=
(
"Run `final_warmup` * `warmup_steps` steps of threshold cool-down during which threshold stays"
"Run `final_warmup` * `warmup_steps` steps of threshold cool-down during which threshold stays
"
"at its final_threshold value (sparsity schedule)."
"at its final_threshold value (sparsity schedule)."
),
),
)
)
...
@@ -799,7 +799,7 @@ def main():
...
@@ -799,7 +799,7 @@ def main():
type
=
str
,
type
=
str
,
default
=
"O1"
,
default
=
"O1"
,
help
=
(
help
=
(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].
"
"See details at https://nvidia.github.io/apex/amp.html"
"See details at https://nvidia.github.io/apex/amp.html"
),
),
)
)
...
...
examples/research_projects/movement-pruning/masked_run_squad.py
View file @
40ea9ab2
...
@@ -789,7 +789,7 @@ def main():
...
@@ -789,7 +789,7 @@ def main():
default
=
1
,
default
=
1
,
type
=
int
,
type
=
int
,
help
=
(
help
=
(
"Run `initial_warmup` * `warmup_steps` steps of threshold warmup during which threshold stays"
"Run `initial_warmup` * `warmup_steps` steps of threshold warmup during which threshold stays
"
"at its `initial_threshold` value (sparsity schedule)."
"at its `initial_threshold` value (sparsity schedule)."
),
),
)
)
...
@@ -798,7 +798,7 @@ def main():
...
@@ -798,7 +798,7 @@ def main():
default
=
2
,
default
=
2
,
type
=
int
,
type
=
int
,
help
=
(
help
=
(
"Run `final_warmup` * `warmup_steps` steps of threshold cool-down during which threshold stays"
"Run `final_warmup` * `warmup_steps` steps of threshold cool-down during which threshold stays
"
"at its final_threshold value (sparsity schedule)."
"at its final_threshold value (sparsity schedule)."
),
),
)
)
...
@@ -946,7 +946,7 @@ def main():
...
@@ -946,7 +946,7 @@ def main():
type
=
str
,
type
=
str
,
default
=
"O1"
,
default
=
"O1"
,
help
=
(
help
=
(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].
"
"See details at https://nvidia.github.io/apex/amp.html"
"See details at https://nvidia.github.io/apex/amp.html"
),
),
)
)
...
...
examples/research_projects/performer/run_mlm_performer.py
View file @
40ea9ab2
...
@@ -466,7 +466,7 @@ if __name__ == "__main__":
...
@@ -466,7 +466,7 @@ if __name__ == "__main__":
and
not
training_args
.
overwrite_output_dir
and
not
training_args
.
overwrite_output_dir
):
):
raise
ValueError
(
raise
ValueError
(
f
"Output directory (
{
training_args
.
output_dir
}
) already exists and is not empty."
f
"Output directory (
{
training_args
.
output_dir
}
) already exists and is not empty.
"
"Use --overwrite_output_dir to overcome."
"Use --overwrite_output_dir to overcome."
)
)
...
@@ -558,7 +558,7 @@ if __name__ == "__main__":
...
@@ -558,7 +558,7 @@ if __name__ == "__main__":
)
)
else
:
else
:
raise
ValueError
(
raise
ValueError
(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You are instantiating a new tokenizer from scratch. This is not supported by this script.
"
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
)
...
...
examples/research_projects/pplm/run_pplm_discrim_train.py
View file @
40ea9ab2
...
@@ -490,8 +490,8 @@ if __name__ == "__main__":
...
@@ -490,8 +490,8 @@ if __name__ == "__main__":
default
=
"SST"
,
default
=
"SST"
,
choices
=
(
"SST"
,
"clickbait"
,
"toxic"
,
"generic"
),
choices
=
(
"SST"
,
"clickbait"
,
"toxic"
,
"generic"
),
help
=
(
help
=
(
"dataset to train the discriminator on."
"dataset to train the discriminator on.
"
"In case of generic, the dataset is expected"
"In case of generic, the dataset is expected
"
"to be a TSBV file with structure: class
\\
t text"
"to be a TSBV file with structure: class
\\
t text"
),
),
)
)
...
...
examples/research_projects/quantization-qdqbert/evaluate-hf-trt-qa.py
View file @
40ea9ab2
...
@@ -153,7 +153,7 @@ if args.tokenizer_name:
...
@@ -153,7 +153,7 @@ if args.tokenizer_name:
tokenizer
=
AutoTokenizer
.
from_pretrained
(
args
.
tokenizer_name
,
use_fast
=
True
)
tokenizer
=
AutoTokenizer
.
from_pretrained
(
args
.
tokenizer_name
,
use_fast
=
True
)
else
:
else
:
raise
ValueError
(
raise
ValueError
(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You are instantiating a new tokenizer from scratch. This is not supported by this script.
"
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
)
...
@@ -288,7 +288,7 @@ pad_on_right = tokenizer.padding_side == "right"
...
@@ -288,7 +288,7 @@ pad_on_right = tokenizer.padding_side == "right"
if
args
.
max_seq_length
>
tokenizer
.
model_max_length
:
if
args
.
max_seq_length
>
tokenizer
.
model_max_length
:
logger
.
warning
(
logger
.
warning
(
f
"The max_seq_length passed (
{
args
.
max_seq_length
}
) is larger than the maximum length for the"
f
"The max_seq_length passed (
{
args
.
max_seq_length
}
) is larger than the maximum length for the
"
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
)
)
...
...
examples/research_projects/quantization-qdqbert/run_quant_qa.py
View file @
40ea9ab2
...
@@ -365,7 +365,7 @@ def main():
...
@@ -365,7 +365,7 @@ def main():
if
data_args
.
max_seq_length
>
tokenizer
.
model_max_length
:
if
data_args
.
max_seq_length
>
tokenizer
.
model_max_length
:
logger
.
warning
(
logger
.
warning
(
f
"The max_seq_length passed (
{
data_args
.
max_seq_length
}
) is larger than the maximum length for the"
f
"The max_seq_length passed (
{
data_args
.
max_seq_length
}
) is larger than the maximum length for the
"
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
)
)
max_seq_length
=
min
(
data_args
.
max_seq_length
,
tokenizer
.
model_max_length
)
max_seq_length
=
min
(
data_args
.
max_seq_length
,
tokenizer
.
model_max_length
)
...
...
examples/research_projects/rag-end2end-retriever/finetune_rag.py
View file @
40ea9ab2
...
@@ -680,7 +680,7 @@ class GenerativeQAModule(BaseTransformer):
...
@@ -680,7 +680,7 @@ class GenerativeQAModule(BaseTransformer):
type
=
int
,
type
=
int
,
default
=
1
,
default
=
1
,
help
=
(
help
=
(
"The number of retrieval actors to use when Ray is selected"
"The number of retrieval actors to use when Ray is selected
"
"for the distributed retriever. Has no effect when "
"for the distributed retriever. Has no effect when "
"distributed_retriever is set to pytorch."
"distributed_retriever is set to pytorch."
),
),
...
@@ -719,7 +719,7 @@ def main(args=None, model=None) -> GenerativeQAModule:
...
@@ -719,7 +719,7 @@ def main(args=None, model=None) -> GenerativeQAModule:
ray
.
init
(
address
=
args
.
ray_address
,
namespace
=
"rag"
)
ray
.
init
(
address
=
args
.
ray_address
,
namespace
=
"rag"
)
except
(
ConnectionError
,
ValueError
):
except
(
ConnectionError
,
ValueError
):
logger
.
warning
(
logger
.
warning
(
"Connection to Ray cluster failed. Make sure a Ray"
"Connection to Ray cluster failed. Make sure a Ray
"
"cluster is running by either using Ray's cluster "
"cluster is running by either using Ray's cluster "
"launcher (`ray up`) or by manually starting Ray on "
"launcher (`ray up`) or by manually starting Ray on "
"each node via `ray start --head` for the head node "
"each node via `ray start --head` for the head node "
...
...
examples/research_projects/rag-end2end-retriever/lightning_base.py
View file @
40ea9ab2
...
@@ -333,7 +333,7 @@ def add_generic_args(parser, root_dir) -> None:
...
@@ -333,7 +333,7 @@ def add_generic_args(parser, root_dir) -> None:
type
=
str
,
type
=
str
,
default
=
"O2"
,
default
=
"O2"
,
help
=
(
help
=
(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].
"
"See details at https://nvidia.github.io/apex/amp.html"
"See details at https://nvidia.github.io/apex/amp.html"
),
),
)
)
...
...
examples/research_projects/rag/finetune_rag.py
View file @
40ea9ab2
...
@@ -525,7 +525,7 @@ class GenerativeQAModule(BaseTransformer):
...
@@ -525,7 +525,7 @@ class GenerativeQAModule(BaseTransformer):
type
=
int
,
type
=
int
,
default
=
1
,
default
=
1
,
help
=
(
help
=
(
"The number of retrieval actors to use when Ray is selected"
"The number of retrieval actors to use when Ray is selected
"
"for the distributed retriever. Has no effect when "
"for the distributed retriever. Has no effect when "
"distributed_retriever is set to pytorch."
"distributed_retriever is set to pytorch."
),
),
...
@@ -552,7 +552,7 @@ def main(args=None, model=None) -> GenerativeQAModule:
...
@@ -552,7 +552,7 @@ def main(args=None, model=None) -> GenerativeQAModule:
ray
.
init
(
address
=
args
.
ray_address
,
namespace
=
"rag"
)
ray
.
init
(
address
=
args
.
ray_address
,
namespace
=
"rag"
)
except
(
ConnectionError
,
ValueError
):
except
(
ConnectionError
,
ValueError
):
logger
.
warning
(
logger
.
warning
(
"Connection to Ray cluster failed. Make sure a Ray"
"Connection to Ray cluster failed. Make sure a Ray
"
"cluster is running by either using Ray's cluster "
"cluster is running by either using Ray's cluster "
"launcher (`ray up`) or by manually starting Ray on "
"launcher (`ray up`) or by manually starting Ray on "
"each node via `ray start --head` for the head node "
"each node via `ray start --head` for the head node "
...
...
examples/research_projects/rag/lightning_base.py
View file @
40ea9ab2
...
@@ -322,7 +322,7 @@ def add_generic_args(parser, root_dir) -> None:
...
@@ -322,7 +322,7 @@ def add_generic_args(parser, root_dir) -> None:
type
=
str
,
type
=
str
,
default
=
"O2"
,
default
=
"O2"
,
help
=
(
help
=
(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].
"
"See details at https://nvidia.github.io/apex/amp.html"
"See details at https://nvidia.github.io/apex/amp.html"
),
),
)
)
...
...
examples/research_projects/robust-speech-event/run_speech_recognition_ctc_bnb.py
View file @
40ea9ab2
...
@@ -104,8 +104,8 @@ class ModelArguments:
...
@@ -104,8 +104,8 @@ class ModelArguments:
default
=
0.05
,
default
=
0.05
,
metadata
=
{
metadata
=
{
"help"
:
(
"help"
:
(
"Probability of each feature vector along the time axis to be chosen as the start of the vector"
"Probability of each feature vector along the time axis to be chosen as the start of the vector
"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature
"
"vectors will be masked along the time axis."
"vectors will be masked along the time axis."
)
)
},
},
...
@@ -399,7 +399,7 @@ def main():
...
@@ -399,7 +399,7 @@ def main():
# Log on each process the small summary:
# Log on each process the small summary:
logger
.
warning
(
logger
.
warning
(
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
"
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
,
"
f
"distributed training:
{
bool
(
training_args
.
local_rank
!=
-
1
)
}
, 16-bits training:
{
training_args
.
fp16
}
"
f
"distributed training:
{
bool
(
training_args
.
local_rank
!=
-
1
)
}
, 16-bits training:
{
training_args
.
fp16
}
"
)
)
# Set the verbosity to info of the Transformers logger (on main process only):
# Set the verbosity to info of the Transformers logger (on main process only):
...
...
examples/research_projects/robust-speech-event/run_speech_recognition_ctc_streaming.py
View file @
40ea9ab2
...
@@ -103,8 +103,8 @@ class ModelArguments:
...
@@ -103,8 +103,8 @@ class ModelArguments:
default
=
0.05
,
default
=
0.05
,
metadata
=
{
metadata
=
{
"help"
:
(
"help"
:
(
"Probability of each feature vector along the time axis to be chosen as the start of the vector"
"Probability of each feature vector along the time axis to be chosen as the start of the vector
"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature
"
"vectors will be masked along the time axis."
"vectors will be masked along the time axis."
)
)
},
},
...
@@ -354,7 +354,7 @@ def main():
...
@@ -354,7 +354,7 @@ def main():
# Log on each process the small summary:
# Log on each process the small summary:
logger
.
warning
(
logger
.
warning
(
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
"
f
"Process rank:
{
training_args
.
local_rank
}
, device:
{
training_args
.
device
}
, n_gpu:
{
training_args
.
n_gpu
}
,
"
f
"distributed training:
{
bool
(
training_args
.
local_rank
!=
-
1
)
}
, 16-bits training:
{
training_args
.
fp16
}
"
f
"distributed training:
{
bool
(
training_args
.
local_rank
!=
-
1
)
}
, 16-bits training:
{
training_args
.
fp16
}
"
)
)
# Set the verbosity to info of the Transformers logger (on main process only):
# Set the verbosity to info of the Transformers logger (on main process only):
...
...
examples/research_projects/seq2seq-distillation/lightning_base.py
View file @
40ea9ab2
...
@@ -313,7 +313,7 @@ def add_generic_args(parser, root_dir) -> None:
...
@@ -313,7 +313,7 @@ def add_generic_args(parser, root_dir) -> None:
type
=
str
,
type
=
str
,
default
=
"O2"
,
default
=
"O2"
,
help
=
(
help
=
(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].
"
"See details at https://nvidia.github.io/apex/amp.html"
"See details at https://nvidia.github.io/apex/amp.html"
),
),
)
)
...
...
examples/research_projects/tapex/run_tabfact_with_tapex.py
View file @
40ea9ab2
...
@@ -325,7 +325,7 @@ def main():
...
@@ -325,7 +325,7 @@ def main():
if
data_args
.
max_seq_length
>
tokenizer
.
model_max_length
:
if
data_args
.
max_seq_length
>
tokenizer
.
model_max_length
:
logger
.
warning
(
logger
.
warning
(
f
"The max_seq_length passed (
{
data_args
.
max_seq_length
}
) is larger than the maximum length for the"
f
"The max_seq_length passed (
{
data_args
.
max_seq_length
}
) is larger than the maximum length for the
"
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
f
"model (
{
tokenizer
.
model_max_length
}
). Using max_seq_length=
{
tokenizer
.
model_max_length
}
."
)
)
max_seq_length
=
min
(
data_args
.
max_seq_length
,
tokenizer
.
model_max_length
)
max_seq_length
=
min
(
data_args
.
max_seq_length
,
tokenizer
.
model_max_length
)
...
...
examples/research_projects/tapex/run_wikisql_with_tapex.py
View file @
40ea9ab2
...
@@ -170,7 +170,7 @@ class DataTrainingArguments:
...
@@ -170,7 +170,7 @@ class DataTrainingArguments:
metadata
=
{
metadata
=
{
"help"
:
(
"help"
:
(
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`."
"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`.
"
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
"during ``evaluate`` and ``predict``."
)
)
...
@@ -379,7 +379,7 @@ def main():
...
@@ -379,7 +379,7 @@ def main():
if
training_args
.
label_smoothing_factor
>
0
and
not
hasattr
(
model
,
"prepare_decoder_input_ids_from_labels"
):
if
training_args
.
label_smoothing_factor
>
0
and
not
hasattr
(
model
,
"prepare_decoder_input_ids_from_labels"
):
logger
.
warning
(
logger
.
warning
(
"label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for"
"label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for
"
f
"`
{
model
.
__class__
.
__name__
}
`. This will lead to loss being calculated twice and will take up more memory"
f
"`
{
model
.
__class__
.
__name__
}
`. This will lead to loss being calculated twice and will take up more memory"
)
)
...
...
examples/research_projects/tapex/run_wikitablequestions_with_tapex.py
View file @
40ea9ab2
...
@@ -168,7 +168,7 @@ class DataTrainingArguments:
...
@@ -168,7 +168,7 @@ class DataTrainingArguments:
metadata
=
{
metadata
=
{
"help"
:
(
"help"
:
(
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`."
"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`.
"
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
"during ``evaluate`` and ``predict``."
)
)
...
@@ -377,7 +377,7 @@ def main():
...
@@ -377,7 +377,7 @@ def main():
if
training_args
.
label_smoothing_factor
>
0
and
not
hasattr
(
model
,
"prepare_decoder_input_ids_from_labels"
):
if
training_args
.
label_smoothing_factor
>
0
and
not
hasattr
(
model
,
"prepare_decoder_input_ids_from_labels"
):
logger
.
warning
(
logger
.
warning
(
"label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for"
"label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for
"
f
"`
{
model
.
__class__
.
__name__
}
`. This will lead to loss being calculated twice and will take up more memory"
f
"`
{
model
.
__class__
.
__name__
}
`. This will lead to loss being calculated twice and will take up more memory"
)
)
...
...
examples/research_projects/wav2vec2/run_common_voice.py
View file @
40ea9ab2
...
@@ -80,8 +80,8 @@ class ModelArguments:
...
@@ -80,8 +80,8 @@ class ModelArguments:
default
=
0.05
,
default
=
0.05
,
metadata
=
{
metadata
=
{
"help"
:
(
"help"
:
(
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"Propability of each feature vector along the time axis to be chosen as the start of the vector
"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature
"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
)
},
},
...
...
Prev
1
2
3
4
5
6
7
8
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment