Commit 793262e8 authored by Grégory Châtel's avatar Grégory Châtel
Browse files

Removing trailing whitespaces.

parent 3ba5470e
...@@ -35,7 +35,7 @@ from pytorch_pretrained_bert.modeling import BertForSequenceClassification ...@@ -35,7 +35,7 @@ from pytorch_pretrained_bert.modeling import BertForSequenceClassification
from pytorch_pretrained_bert.optimization import BertAdam from pytorch_pretrained_bert.optimization import BertAdam
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S', datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO) level = logging.INFO)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
...@@ -409,14 +409,14 @@ def main(): ...@@ -409,14 +409,14 @@ def main():
type=int, type=int,
default=-1, default=-1,
help="local_rank for distributed training on gpus") help="local_rank for distributed training on gpus")
parser.add_argument('--seed', parser.add_argument('--seed',
type=int, type=int,
default=42, default=42,
help="random seed for initialization") help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps', parser.add_argument('--gradient_accumulation_steps',
type=int, type=int,
default=1, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.") help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--optimize_on_cpu', parser.add_argument('--optimize_on_cpu',
default=False, default=False,
action='store_true', action='store_true',
...@@ -487,7 +487,7 @@ def main(): ...@@ -487,7 +487,7 @@ def main():
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs) len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)
# Prepare model # Prepare model
model = BertForSequenceClassification.from_pretrained(args.bert_model, model = BertForSequenceClassification.from_pretrained(args.bert_model,
cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(args.local_rank)) cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(args.local_rank))
if args.fp16: if args.fp16:
model.half() model.half()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment