Commit 49006d06 authored by Tim Rault's avatar Tim Rault
Browse files

Bug fix type=bool -> action='store_true' in argparse

parent beb59080
...@@ -37,7 +37,7 @@ parser.add_argument("--vocab_file", default=None, type=str, required=True, ...@@ -37,7 +37,7 @@ parser.add_argument("--vocab_file", default=None, type=str, required=True,
help="The vocabulary file that the BERT model was trained on.") help="The vocabulary file that the BERT model was trained on.")
## Other parameters ## Other parameters
parser.add_argument("--do_lower_case", default=True, type=bool, parser.add_argument("--do_lower_case", default=True, action='store_true',
help="Whether to lower case the input text. Should be True for uncased " help="Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.") "models and False for cased models.")
parser.add_argument("--max_seq_length", default=128, type=int, help="Maximum sequence length.") parser.add_argument("--max_seq_length", default=128, type=int, help="Maximum sequence length.")
......
...@@ -44,8 +44,8 @@ parser.add_argument("--max_seq_length", default=128, type=int, ...@@ -44,8 +44,8 @@ parser.add_argument("--max_seq_length", default=128, type=int,
"Must match data generation.") "Must match data generation.")
parser.add_argument("--max_predictions_per_seq", default=20, type=int, parser.add_argument("--max_predictions_per_seq", default=20, type=int,
help="Maximum number of masked LM predictions per sequence. Must match data generation.") help="Maximum number of masked LM predictions per sequence. Must match data generation.")
parser.add_argument("--do_train", default=False, type=bool, help="Whether to run training.") parser.add_argument("--do_train", default=False, action='store_true', help="Whether to run training.")
parser.add_argument("--do_eval", default=False, type=bool, help="Whether to run eval on the dev set.") parser.add_argument("--do_eval", default=False, action='store_true', help="Whether to run eval on the dev set.")
parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.") parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.")
parser.add_argument("--eval_batch_size", default=8, type=int, help="Total batch size for eval.") parser.add_argument("--eval_batch_size", default=8, type=int, help="Total batch size for eval.")
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
...@@ -56,7 +56,7 @@ parser.add_argument("--iterations_per_loop", default=1000, type=int, ...@@ -56,7 +56,7 @@ parser.add_argument("--iterations_per_loop", default=1000, type=int,
help="How many steps to make in each estimator call.") help="How many steps to make in each estimator call.")
parser.add_argument("--max_eval_steps", default=100, type=int, help="Maximum number of eval steps.") parser.add_argument("--max_eval_steps", default=100, type=int, help="Maximum number of eval steps.")
### BEGIN - TO DELETE EVENTUALLY --> NO SENSE IN PYTORCH ### ### BEGIN - TO DELETE EVENTUALLY --> NO SENSE IN PYTORCH ###
parser.add_argument("--use_tpu", default=False, type=bool, help="Whether to use TPU or GPU/CPU.") parser.add_argument("--use_tpu", default=False, action='store_true', help="Whether to use TPU or GPU/CPU.")
parser.add_argument("--tpu_name", default=None, type=str, parser.add_argument("--tpu_name", default=None, type=str,
help="The Cloud TPU to use for training. This should be either the name used when creating the " help="The Cloud TPU to use for training. This should be either the name used when creating the "
"Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.") "Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.")
......
...@@ -86,7 +86,7 @@ parser.add_argument("--max_answer_length", default=30, type=int, ...@@ -86,7 +86,7 @@ parser.add_argument("--max_answer_length", default=30, type=int,
"and end predictions are not conditioned on one another.") "and end predictions are not conditioned on one another.")
### BEGIN - TO DELETE EVENTUALLY --> NO SENSE IN PYTORCH ### ### BEGIN - TO DELETE EVENTUALLY --> NO SENSE IN PYTORCH ###
# parser.add_argument("--use_tpu", default=False, type=bool, help="Whether to use TPU or GPU/CPU.") # parser.add_argument("--use_tpu", default=False, action='store_true', help="Whether to use TPU or GPU/CPU.")
# parser.add_argument("--tpu_name", default=None, type=str, # parser.add_argument("--tpu_name", default=None, type=str,
# help="The Cloud TPU to use for training. This should be either the name used when creating the " # help="The Cloud TPU to use for training. This should be either the name used when creating the "
# "Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.") # "Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.")
...@@ -101,7 +101,7 @@ parser.add_argument("--max_answer_length", default=30, type=int, ...@@ -101,7 +101,7 @@ parser.add_argument("--max_answer_length", default=30, type=int,
# "Total number of TPU cores to use.") # "Total number of TPU cores to use.")
### END - TO DELETE EVENTUALLY --> NO SENSE IN PYTORCH ### ### END - TO DELETE EVENTUALLY --> NO SENSE IN PYTORCH ###
parser.add_argument("--verbose_logging", default=False, type=bool, parser.add_argument("--verbose_logging", default=False, action='store_true',
help="If true, all of the warnings related to data processing will be printed. " help="If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.") "A number of warnings are expected for a normal SQuAD evaluation.")
parser.add_argument("--no_cuda", parser.add_argument("--no_cuda",
......
...@@ -17,7 +17,6 @@ from __future__ import division ...@@ -17,7 +17,6 @@ from __future__ import division
from __future__ import print_function from __future__ import print_function
import os import os
import tempfile
import unittest import unittest
import tokenization_pytorch as tokenization import tokenization_pytorch as tokenization
...@@ -122,4 +121,4 @@ class TokenizationTest(unittest.TestCase): ...@@ -122,4 +121,4 @@ class TokenizationTest(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment