run.sh 1.45 KB
Newer Older
1
2
3
4
## The relevant files are currently on a shared Google
## drive at https://drive.google.com/drive/folders/1kC0I2UGl2ltrluI9NqDjaQJGw5iliw_J
## Monitor for changes and eventually migrate to nlp dataset
curl -L 'https://drive.google.com/uc?export=download&id=1Jjhbal535VVz2ap4v4r_rN1UEHTdLK5P' \
5
| grep -v "^#" | cut -f 2,3 | tr '\t' ' ' > train.txt.tmp
6
curl -L 'https://drive.google.com/uc?export=download&id=1ZfRcQThdtAR5PPRjIDtrVP7BtXSCUBbm' \
7
| grep -v "^#" | cut -f 2,3 | tr '\t' ' ' > dev.txt.tmp
8
curl -L 'https://drive.google.com/uc?export=download&id=1u9mb7kNJHWQCWyweMDRMuTFoOHOfeBTH' \
9
| grep -v "^#" | cut -f 2,3 | tr '\t' ' ' > test.txt.tmp
10

11
12
export MAX_LENGTH=128
export BERT_MODEL=bert-base-multilingual-cased
13
14
15
python3 scripts/preprocess.py train.txt.tmp $BERT_MODEL $MAX_LENGTH > train.txt
python3 scripts/preprocess.py dev.txt.tmp $BERT_MODEL $MAX_LENGTH > dev.txt
python3 scripts/preprocess.py test.txt.tmp $BERT_MODEL $MAX_LENGTH > test.txt
16
17
18
19
20
21
22
cat train.txt dev.txt test.txt | cut -d " " -f 2 | grep -v "^$"| sort | uniq > labels.txt
export OUTPUT_DIR=germeval-model
export BATCH_SIZE=32
export NUM_EPOCHS=3
export SAVE_STEPS=750
export SEED=1

Julien Chaumond's avatar
Julien Chaumond committed
23
python3 run_ner.py \
24
--task_type NER \
Julien Chaumond's avatar
Julien Chaumond committed
25
--data_dir . \
26
27
28
29
30
31
32
33
34
35
36
--labels ./labels.txt \
--model_name_or_path $BERT_MODEL \
--output_dir $OUTPUT_DIR \
--max_seq_length  $MAX_LENGTH \
--num_train_epochs $NUM_EPOCHS \
--per_gpu_train_batch_size $BATCH_SIZE \
--save_steps $SAVE_STEPS \
--seed $SEED \
--do_train \
--do_eval \
--do_predict