Commit c8cba677 authored by thomwolf's avatar thomwolf
Browse files

clean up readme and examples

parent 757750d6
......@@ -362,11 +362,8 @@ python run_classifier.py \
--task_name MRPC \
--do_train \
--do_eval \
--do_lower_case \
--data_dir $GLUE_DIR/MRPC/ \
--vocab_file $BERT_BASE_DIR/vocab.txt \
--bert_config_file $BERT_BASE_DIR/bert_config.json \
--init_checkpoint $BERT_PYTORCH_DIR/pytorch_model.bin \
--bert_model bert-base-uncased \
--max_seq_length 128 \
--train_batch_size 32 \
--learning_rate 2e-5 \
......@@ -388,12 +385,9 @@ The data for SQuAD can be downloaded with the following links and should be save
export SQUAD_DIR=/path/to/SQUAD
python run_squad.py \
--vocab_file $BERT_BASE_DIR/vocab.txt \
--bert_config_file $BERT_BASE_DIR/bert_config.json \
--init_checkpoint $BERT_PYTORCH_DIR/pytorch_model.bin \
--bert_model bert-base-uncased \
--do_train \
--do_predict \
--do_lower_case \
--train_file $SQUAD_DIR/train-v1.1.json \
--predict_file $SQUAD_DIR/dev-v1.1.json \
--train_batch_size 12 \
......
......@@ -351,11 +351,6 @@ def main():
type=str,
required=True,
help="The name of the task to train.")
parser.add_argument("--vocab_file",
default=None,
type=str,
required=True,
help="The vocabulary file that the BERT model was trained on.")
parser.add_argument("--output_dir",
default=None,
type=str,
......
......@@ -2,7 +2,7 @@ from setuptools import find_packages, setup
setup(
name="pytorch_pretrained_bert",
version="0.1.1",
version="0.1.2",
author="Thomas Wolf, Victor Sanh, Tim Rault, Google AI Language Team Authors",
author_email="thomas@huggingface.co",
description="PyTorch version of Google AI BERT model with script to load Google pre-trained models",
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment