Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
d5477baf
Unverified
Commit
d5477baf
authored
Jun 16, 2020
by
Sylvain Gugger
Committed by
GitHub
Jun 16, 2020
Browse files
Convert hans to Trainer (#5025)
* Convert hans to Trainer * Tick box
parent
c852036b
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
237 additions
and
641 deletions
+237
-641
examples/README.md
examples/README.md
+1
-1
examples/adversarial/run_hans.py
examples/adversarial/run_hans.py
+231
-0
examples/adversarial/test_hans.py
examples/adversarial/test_hans.py
+0
-577
examples/adversarial/utils_hans.py
examples/adversarial/utils_hans.py
+5
-63
No files found.
examples/README.md
View file @
d5477baf
...
...
@@ -27,7 +27,7 @@ This is still a work-in-progress – in particular documentation is still sparse
|
[
**`summarization`**
](
https://github.com/huggingface/transformers/tree/master/examples/summarization
)
| CNN/Daily Mail | - | - | - | -
|
[
**`translation`**
](
https://github.com/huggingface/transformers/tree/master/examples/translation
)
| WMT | - | - | - | -
|
[
**`bertology`**
](
https://github.com/huggingface/transformers/tree/master/examples/bertology
)
| - | - | - | - | -
|
[
**`adversarial`**
](
https://github.com/huggingface/transformers/tree/master/examples/adversarial
)
| HANS |
-
| - | - | -
|
[
**`adversarial`**
](
https://github.com/huggingface/transformers/tree/master/examples/adversarial
)
| HANS |
✅
| - | - | -
<br>
...
...
examples/adversarial/run_hans.py
0 → 100644
View file @
d5477baf
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on HANS."""
import
logging
import
os
from
dataclasses
import
dataclass
,
field
from
typing
import
Dict
,
List
,
Optional
import
numpy
as
np
import
torch
from
transformers
import
(
AutoConfig
,
AutoModelForSequenceClassification
,
AutoTokenizer
,
HfArgumentParser
,
Trainer
,
TrainingArguments
,
default_data_collator
,
set_seed
,
)
from
utils_hans
import
HansDataset
,
InputFeatures
,
hans_processors
logger
=
logging
.
getLogger
(
__name__
)
@
dataclass
class
ModelArguments
:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path
:
str
=
field
(
metadata
=
{
"help"
:
"Path to pretrained model or model identifier from huggingface.co/models"
}
)
config_name
:
Optional
[
str
]
=
field
(
default
=
None
,
metadata
=
{
"help"
:
"Pretrained config name or path if not the same as model_name"
}
)
tokenizer_name
:
Optional
[
str
]
=
field
(
default
=
None
,
metadata
=
{
"help"
:
"Pretrained tokenizer name or path if not the same as model_name"
}
)
cache_dir
:
Optional
[
str
]
=
field
(
default
=
None
,
metadata
=
{
"help"
:
"Where do you want to store the pretrained models downloaded from s3"
}
)
@
dataclass
class
DataTrainingArguments
:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
task_name
:
str
=
field
(
metadata
=
{
"help"
:
"The name of the task to train selected in the list: "
+
", "
.
join
(
hans_processors
.
keys
())}
)
data_dir
:
str
=
field
(
metadata
=
{
"help"
:
"The input data dir. Should contain the .tsv files (or other data files) for the task."
}
)
max_seq_length
:
int
=
field
(
default
=
128
,
metadata
=
{
"help"
:
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
overwrite_cache
:
bool
=
field
(
default
=
False
,
metadata
=
{
"help"
:
"Overwrite the cached training and evaluation sets"
}
)
def
hans_data_collator
(
features
:
List
[
InputFeatures
])
->
Dict
[
str
,
torch
.
Tensor
]:
"""
Data collator that removes the "pairID" key if present.
"""
batch
=
default_data_collator
(
features
)
_
=
batch
.
pop
(
"pairID"
,
None
)
return
batch
def
main
():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser
=
HfArgumentParser
((
ModelArguments
,
DataTrainingArguments
,
TrainingArguments
))
model_args
,
data_args
,
training_args
=
parser
.
parse_args_into_dataclasses
()
if
(
os
.
path
.
exists
(
training_args
.
output_dir
)
and
os
.
listdir
(
training_args
.
output_dir
)
and
training_args
.
do_train
and
not
training_args
.
overwrite_output_dir
):
raise
ValueError
(
f
"Output directory (
{
training_args
.
output_dir
}
) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
# Setup logging
logging
.
basicConfig
(
format
=
"%(asctime)s - %(levelname)s - %(name)s - %(message)s"
,
datefmt
=
"%m/%d/%Y %H:%M:%S"
,
level
=
logging
.
INFO
if
training_args
.
local_rank
in
[
-
1
,
0
]
else
logging
.
WARN
,
)
logger
.
warning
(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s"
,
training_args
.
local_rank
,
training_args
.
device
,
training_args
.
n_gpu
,
bool
(
training_args
.
local_rank
!=
-
1
),
training_args
.
fp16
,
)
logger
.
info
(
"Training/evaluation parameters %s"
,
training_args
)
# Set seed
set_seed
(
training_args
.
seed
)
try
:
processor
=
hans_processors
[
data_args
.
task_name
]()
label_list
=
processor
.
get_labels
()
num_labels
=
len
(
label_list
)
except
KeyError
:
raise
ValueError
(
"Task not found: %s"
%
(
data_args
.
task_name
))
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config
=
AutoConfig
.
from_pretrained
(
model_args
.
config_name
if
model_args
.
config_name
else
model_args
.
model_name_or_path
,
num_labels
=
num_labels
,
finetuning_task
=
data_args
.
task_name
,
cache_dir
=
model_args
.
cache_dir
,
)
tokenizer
=
AutoTokenizer
.
from_pretrained
(
model_args
.
tokenizer_name
if
model_args
.
tokenizer_name
else
model_args
.
model_name_or_path
,
cache_dir
=
model_args
.
cache_dir
,
)
model
=
AutoModelForSequenceClassification
.
from_pretrained
(
model_args
.
model_name_or_path
,
from_tf
=
bool
(
".ckpt"
in
model_args
.
model_name_or_path
),
config
=
config
,
cache_dir
=
model_args
.
cache_dir
,
)
# Get datasets
train_dataset
=
(
HansDataset
(
data_dir
=
data_args
.
data_dir
,
tokenizer
=
tokenizer
,
task
=
data_args
.
task_name
,
max_seq_length
=
data_args
.
max_seq_length
,
overwrite_cache
=
data_args
.
overwrite_cache
,
)
if
training_args
.
do_train
else
None
)
eval_dataset
=
(
HansDataset
(
data_dir
=
data_args
.
data_dir
,
tokenizer
=
tokenizer
,
task
=
data_args
.
task_name
,
max_seq_length
=
data_args
.
max_seq_length
,
overwrite_cache
=
data_args
.
overwrite_cache
,
evaluate
=
True
,
)
if
training_args
.
do_eval
else
None
)
# Initialize our Trainer
trainer
=
Trainer
(
model
=
model
,
args
=
training_args
,
train_dataset
=
train_dataset
,
eval_dataset
=
eval_dataset
,
data_collator
=
hans_data_collator
,
)
# Training
if
training_args
.
do_train
:
trainer
.
train
(
model_path
=
model_args
.
model_name_or_path
if
os
.
path
.
isdir
(
model_args
.
model_name_or_path
)
else
None
)
trainer
.
save_model
()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if
trainer
.
is_world_master
():
tokenizer
.
save_pretrained
(
training_args
.
output_dir
)
# Evaluation
if
training_args
.
do_eval
:
logger
.
info
(
"*** Evaluate ***"
)
output
=
trainer
.
predict
(
eval_dataset
)
preds
=
output
.
predictions
preds
=
np
.
argmax
(
preds
,
axis
=
1
)
pair_ids
=
[
ex
.
pairID
for
ex
in
eval_dataset
]
output_eval_file
=
os
.
path
.
join
(
training_args
.
output_dir
,
"hans_predictions.txt"
)
if
trainer
.
is_world_master
():
with
open
(
output_eval_file
,
"w"
)
as
writer
:
for
pid
,
pred
in
zip
(
pair_ids
,
preds
):
writer
.
write
(
"ex"
+
str
(
pid
)
+
","
+
label_list
[
int
(
pred
)]
+
"
\n
"
)
trainer
.
_log
(
output
.
metrics
)
def
_mp_fn
(
index
):
# For xla_spawn (TPUs)
main
()
if
__name__
==
"__main__"
:
main
()
examples/adversarial/test_hans.py
deleted
100644 → 0
View file @
c852036b
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa)."""
from
__future__
import
absolute_import
,
division
,
print_function
import
argparse
import
glob
import
logging
import
os
import
random
import
numpy
as
np
import
torch
from
torch.utils.data
import
DataLoader
,
RandomSampler
,
SequentialSampler
from
torch.utils.data.distributed
import
DistributedSampler
from
tqdm
import
tqdm
,
trange
from
transformers
import
(
WEIGHTS_NAME
,
AdamW
,
AlbertConfig
,
AlbertForSequenceClassification
,
AlbertTokenizer
,
BertConfig
,
BertForSequenceClassification
,
BertTokenizer
,
DistilBertConfig
,
DistilBertForSequenceClassification
,
DistilBertTokenizer
,
RobertaConfig
,
RobertaForSequenceClassification
,
RobertaTokenizer
,
XLMConfig
,
XLMForSequenceClassification
,
XLMTokenizer
,
XLNetConfig
,
XLNetForSequenceClassification
,
XLNetTokenizer
,
default_data_collator
,
get_linear_schedule_with_warmup
,
)
from
utils_hans
import
HansDataset
,
hans_output_modes
,
hans_processors
try
:
from
torch.utils.tensorboard
import
SummaryWriter
except
ImportError
:
from
tensorboardX
import
SummaryWriter
logger
=
logging
.
getLogger
(
__name__
)
MODEL_CLASSES
=
{
"bert"
:
(
BertConfig
,
BertForSequenceClassification
,
BertTokenizer
),
"xlnet"
:
(
XLNetConfig
,
XLNetForSequenceClassification
,
XLNetTokenizer
),
"xlm"
:
(
XLMConfig
,
XLMForSequenceClassification
,
XLMTokenizer
),
"roberta"
:
(
RobertaConfig
,
RobertaForSequenceClassification
,
RobertaTokenizer
),
"distilbert"
:
(
DistilBertConfig
,
DistilBertForSequenceClassification
,
DistilBertTokenizer
),
"albert"
:
(
AlbertConfig
,
AlbertForSequenceClassification
,
AlbertTokenizer
),
}
def
set_seed
(
args
):
random
.
seed
(
args
.
seed
)
np
.
random
.
seed
(
args
.
seed
)
torch
.
manual_seed
(
args
.
seed
)
if
args
.
n_gpu
>
0
:
torch
.
cuda
.
manual_seed_all
(
args
.
seed
)
def
train
(
args
,
train_dataset
,
model
,
tokenizer
):
""" Train the model """
if
args
.
local_rank
in
[
-
1
,
0
]:
tb_writer
=
SummaryWriter
()
args
.
train_batch_size
=
args
.
per_gpu_train_batch_size
*
max
(
1
,
args
.
n_gpu
)
train_sampler
=
RandomSampler
(
train_dataset
)
if
args
.
local_rank
==
-
1
else
DistributedSampler
(
train_dataset
)
train_dataloader
=
DataLoader
(
train_dataset
,
sampler
=
train_sampler
,
batch_size
=
args
.
train_batch_size
,
collate_fn
=
default_data_collator
,
)
if
args
.
max_steps
>
0
:
t_total
=
args
.
max_steps
args
.
num_train_epochs
=
args
.
max_steps
//
(
len
(
train_dataloader
)
//
args
.
gradient_accumulation_steps
)
+
1
else
:
t_total
=
len
(
train_dataloader
)
//
args
.
gradient_accumulation_steps
*
args
.
num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay
=
[
"bias"
,
"LayerNorm.weight"
]
optimizer_grouped_parameters
=
[
{
"params"
:
[
p
for
n
,
p
in
model
.
named_parameters
()
if
not
any
(
nd
in
n
for
nd
in
no_decay
)],
"weight_decay"
:
args
.
weight_decay
,
},
{
"params"
:
[
p
for
n
,
p
in
model
.
named_parameters
()
if
any
(
nd
in
n
for
nd
in
no_decay
)],
"weight_decay"
:
0.0
},
]
optimizer
=
AdamW
(
optimizer_grouped_parameters
,
lr
=
args
.
learning_rate
,
eps
=
args
.
adam_epsilon
)
scheduler
=
get_linear_schedule_with_warmup
(
optimizer
,
num_warmup_steps
=
args
.
warmup_steps
,
num_training_steps
=
t_total
)
if
args
.
fp16
:
try
:
from
apex
import
amp
except
ImportError
:
raise
ImportError
(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training."
)
model
,
optimizer
=
amp
.
initialize
(
model
,
optimizer
,
opt_level
=
args
.
fp16_opt_level
)
# multi-gpu training (should be after apex fp16 initialization)
if
args
.
n_gpu
>
1
:
model
=
torch
.
nn
.
DataParallel
(
model
)
# Distributed training (should be after apex fp16 initialization)
if
args
.
local_rank
!=
-
1
:
model
=
torch
.
nn
.
parallel
.
DistributedDataParallel
(
model
,
device_ids
=
[
args
.
local_rank
],
output_device
=
args
.
local_rank
,
find_unused_parameters
=
True
)
# Train!
logger
.
info
(
"***** Running training *****"
)
logger
.
info
(
" Num examples = %d"
,
len
(
train_dataset
))
logger
.
info
(
" Num Epochs = %d"
,
args
.
num_train_epochs
)
logger
.
info
(
" Instantaneous batch size per GPU = %d"
,
args
.
per_gpu_train_batch_size
)
logger
.
info
(
" Total train batch size (w. parallel, distributed & accumulation) = %d"
,
args
.
train_batch_size
*
args
.
gradient_accumulation_steps
*
(
torch
.
distributed
.
get_world_size
()
if
args
.
local_rank
!=
-
1
else
1
),
)
logger
.
info
(
" Gradient Accumulation steps = %d"
,
args
.
gradient_accumulation_steps
)
logger
.
info
(
" Total optimization steps = %d"
,
t_total
)
global_step
=
0
tr_loss
,
logging_loss
=
0.0
,
0.0
model
.
zero_grad
()
train_iterator
=
trange
(
int
(
args
.
num_train_epochs
),
desc
=
"Epoch"
,
disable
=
args
.
local_rank
not
in
[
-
1
,
0
])
set_seed
(
args
)
# Added here for reproductibility (even between python 2 and 3)
for
_
in
train_iterator
:
epoch_iterator
=
tqdm
(
train_dataloader
,
desc
=
"Iteration"
,
disable
=
args
.
local_rank
not
in
[
-
1
,
0
])
for
step
,
batch
in
enumerate
(
epoch_iterator
):
model
.
train
()
inputs
=
{
k
:
t
.
to
(
args
.
device
)
for
k
,
t
in
batch
.
items
()
if
k
!=
"pairID"
}
outputs
=
model
(
**
inputs
)
loss
=
outputs
[
0
]
# model outputs are always tuple in transformers (see doc)
if
args
.
n_gpu
>
1
:
loss
=
loss
.
mean
()
# mean() to average on multi-gpu parallel training
if
args
.
gradient_accumulation_steps
>
1
:
loss
=
loss
/
args
.
gradient_accumulation_steps
if
args
.
fp16
:
with
amp
.
scale_loss
(
loss
,
optimizer
)
as
scaled_loss
:
scaled_loss
.
backward
()
else
:
loss
.
backward
()
tr_loss
+=
loss
.
item
()
if
(
step
+
1
)
%
args
.
gradient_accumulation_steps
==
0
:
if
args
.
fp16
:
torch
.
nn
.
utils
.
clip_grad_norm_
(
amp
.
master_params
(
optimizer
),
args
.
max_grad_norm
)
else
:
torch
.
nn
.
utils
.
clip_grad_norm_
(
model
.
parameters
(),
args
.
max_grad_norm
)
optimizer
.
step
()
scheduler
.
step
()
# Update learning rate schedule
model
.
zero_grad
()
global_step
+=
1
if
args
.
local_rank
in
[
-
1
,
0
]
and
args
.
logging_steps
>
0
and
global_step
%
args
.
logging_steps
==
0
:
logs
=
{}
if
(
args
.
local_rank
==
-
1
and
args
.
evaluate_during_training
):
# Only evaluate when single GPU otherwise metrics may not average well
results
=
evaluate
(
args
,
model
,
tokenizer
)
for
key
,
value
in
results
.
items
():
eval_key
=
"eval_{}"
.
format
(
key
)
logs
[
eval_key
]
=
value
loss_scalar
=
(
tr_loss
-
logging_loss
)
/
args
.
logging_steps
learning_rate_scalar
=
scheduler
.
get_lr
()[
0
]
logs
[
"learning_rate"
]
=
learning_rate_scalar
logs
[
"loss"
]
=
loss_scalar
logging_loss
=
tr_loss
for
key
,
value
in
logs
.
items
():
tb_writer
.
add_scalar
(
key
,
value
,
global_step
)
# print(json.dumps({**logs, **{'step': global_step}}))
if
args
.
local_rank
in
[
-
1
,
0
]
and
args
.
save_steps
>
0
and
global_step
%
args
.
save_steps
==
0
:
# Save model checkpoint
output_dir
=
os
.
path
.
join
(
args
.
output_dir
,
"checkpoint-{}"
.
format
(
global_step
))
if
not
os
.
path
.
exists
(
output_dir
):
os
.
makedirs
(
output_dir
)
model_to_save
=
(
model
.
module
if
hasattr
(
model
,
"module"
)
else
model
)
# Take care of distributed/parallel training
model_to_save
.
save_pretrained
(
output_dir
)
torch
.
save
(
args
,
os
.
path
.
join
(
output_dir
,
"training_args.bin"
))
logger
.
info
(
"Saving model checkpoint to %s"
,
output_dir
)
if
args
.
max_steps
>
0
and
global_step
>
args
.
max_steps
:
epoch_iterator
.
close
()
break
if
args
.
max_steps
>
0
and
global_step
>
args
.
max_steps
:
train_iterator
.
close
()
break
if
args
.
local_rank
in
[
-
1
,
0
]:
tb_writer
.
close
()
return
global_step
,
tr_loss
/
global_step
def
evaluate
(
args
,
model
,
tokenizer
,
label_list
,
prefix
=
""
):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names
=
(
"mnli"
,
"mnli-mm"
)
if
args
.
task_name
==
"mnli"
else
(
args
.
task_name
,)
eval_outputs_dirs
=
(
args
.
output_dir
,
args
.
output_dir
+
"-MM"
)
if
args
.
task_name
==
"mnli"
else
(
args
.
output_dir
,)
results
=
{}
for
eval_task
,
eval_output_dir
in
zip
(
eval_task_names
,
eval_outputs_dirs
):
eval_dataset
=
HansDataset
(
args
.
data_dir
,
tokenizer
,
args
.
task_name
,
args
.
max_seq_length
,
overwrite_cache
=
args
.
overwrite_cache
,
evaluate
=
True
,
)
if
not
os
.
path
.
exists
(
eval_output_dir
)
and
args
.
local_rank
in
[
-
1
,
0
]:
os
.
makedirs
(
eval_output_dir
)
args
.
eval_batch_size
=
args
.
per_gpu_eval_batch_size
*
max
(
1
,
args
.
n_gpu
)
# Note that DistributedSampler samples randomly
eval_sampler
=
SequentialSampler
(
eval_dataset
)
eval_dataloader
=
DataLoader
(
eval_dataset
,
sampler
=
eval_sampler
,
batch_size
=
args
.
eval_batch_size
,
collate_fn
=
default_data_collator
,
)
# multi-gpu eval
if
args
.
n_gpu
>
1
and
not
isinstance
(
model
,
torch
.
nn
.
DataParallel
):
model
=
torch
.
nn
.
DataParallel
(
model
)
# Eval!
logger
.
info
(
"***** Running evaluation {} *****"
.
format
(
prefix
))
logger
.
info
(
" Num examples = %d"
,
len
(
eval_dataset
))
logger
.
info
(
" Batch size = %d"
,
args
.
eval_batch_size
)
eval_loss
=
0.0
nb_eval_steps
=
0
preds
=
None
out_label_ids
=
None
for
batch
in
tqdm
(
eval_dataloader
,
desc
=
"Evaluating"
):
model
.
eval
()
inputs
=
{
k
:
t
.
to
(
args
.
device
)
for
k
,
t
in
batch
.
items
()
if
k
!=
"pairID"
}
pair_ids
=
batch
.
pop
(
"pairID"
,
None
)
with
torch
.
no_grad
():
outputs
=
model
(
**
inputs
)
tmp_eval_loss
,
logits
=
outputs
[:
2
]
eval_loss
+=
tmp_eval_loss
.
mean
().
item
()
nb_eval_steps
+=
1
if
preds
is
None
:
preds
=
logits
.
detach
().
cpu
().
numpy
()
out_label_ids
=
inputs
[
"labels"
].
detach
().
cpu
().
numpy
()
pair_ids
=
pair_ids
.
detach
().
cpu
().
numpy
()
else
:
preds
=
np
.
append
(
preds
,
logits
.
detach
().
cpu
().
numpy
(),
axis
=
0
)
out_label_ids
=
np
.
append
(
out_label_ids
,
inputs
[
"labels"
].
detach
().
cpu
().
numpy
(),
axis
=
0
)
pair_ids
=
np
.
append
(
pair_ids
,
pair_ids
.
detach
().
cpu
().
numpy
(),
axis
=
0
)
eval_loss
=
eval_loss
/
nb_eval_steps
if
args
.
output_mode
==
"classification"
:
preds
=
np
.
argmax
(
preds
,
axis
=
1
)
elif
args
.
output_mode
==
"regression"
:
preds
=
np
.
squeeze
(
preds
)
output_eval_file
=
os
.
path
.
join
(
eval_output_dir
,
"hans_predictions.txt"
)
with
open
(
output_eval_file
,
"w"
)
as
writer
:
writer
.
write
(
"pairID,gld_label
\n
"
)
for
pid
,
pred
in
zip
(
pair_ids
,
preds
):
writer
.
write
(
"ex"
+
str
(
pid
)
+
","
+
label_list
[
int
(
pred
)]
+
"
\n
"
)
return
results
def
main
():
parser
=
argparse
.
ArgumentParser
()
# Required parameters
parser
.
add_argument
(
"--data_dir"
,
default
=
None
,
type
=
str
,
required
=
True
,
help
=
"The input data dir. Should contain the .tsv files (or other data files) for the task."
,
)
parser
.
add_argument
(
"--model_type"
,
default
=
None
,
type
=
str
,
required
=
True
,
help
=
"Model type selected in the list: "
+
", "
.
join
(
MODEL_CLASSES
.
keys
()),
)
parser
.
add_argument
(
"--model_name_or_path"
,
default
=
None
,
type
=
str
,
required
=
True
,
help
=
"Path to pretrained model or model identifier from huggingface.co/models"
,
)
parser
.
add_argument
(
"--task_name"
,
default
=
None
,
type
=
str
,
required
=
True
,
help
=
"The name of the task to train selected in the list: "
+
", "
.
join
(
hans_processors
.
keys
()),
)
parser
.
add_argument
(
"--output_dir"
,
default
=
None
,
type
=
str
,
required
=
True
,
help
=
"The output directory where the model predictions and checkpoints will be written."
,
)
# Other parameters
parser
.
add_argument
(
"--config_name"
,
default
=
""
,
type
=
str
,
help
=
"Pretrained config name or path if not the same as model_name"
)
parser
.
add_argument
(
"--tokenizer_name"
,
default
=
""
,
type
=
str
,
help
=
"Pretrained tokenizer name or path if not the same as model_name"
,
)
parser
.
add_argument
(
"--cache_dir"
,
default
=
""
,
type
=
str
,
help
=
"Where do you want to store the pre-trained models downloaded from s3"
,
)
parser
.
add_argument
(
"--max_seq_length"
,
default
=
128
,
type
=
int
,
help
=
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
,
)
parser
.
add_argument
(
"--do_train"
,
action
=
"store_true"
,
help
=
"Whether to run training."
)
parser
.
add_argument
(
"--do_eval"
,
action
=
"store_true"
,
help
=
"Whether to run eval on the dev set."
)
parser
.
add_argument
(
"--evaluate_during_training"
,
action
=
"store_true"
,
help
=
"Rul evaluation during training at each logging step."
)
parser
.
add_argument
(
"--do_lower_case"
,
action
=
"store_true"
,
help
=
"Set this flag if you are using an uncased model."
)
parser
.
add_argument
(
"--per_gpu_train_batch_size"
,
default
=
8
,
type
=
int
,
help
=
"Batch size per GPU/CPU for training."
)
parser
.
add_argument
(
"--per_gpu_eval_batch_size"
,
default
=
8
,
type
=
int
,
help
=
"Batch size per GPU/CPU for evaluation."
)
parser
.
add_argument
(
"--gradient_accumulation_steps"
,
type
=
int
,
default
=
1
,
help
=
"Number of updates steps to accumulate before performing a backward/update pass."
,
)
parser
.
add_argument
(
"--learning_rate"
,
default
=
5e-5
,
type
=
float
,
help
=
"The initial learning rate for Adam."
)
parser
.
add_argument
(
"--weight_decay"
,
default
=
0.0
,
type
=
float
,
help
=
"Weight decay if we apply some."
)
parser
.
add_argument
(
"--adam_epsilon"
,
default
=
1e-8
,
type
=
float
,
help
=
"Epsilon for Adam optimizer."
)
parser
.
add_argument
(
"--max_grad_norm"
,
default
=
1.0
,
type
=
float
,
help
=
"Max gradient norm."
)
parser
.
add_argument
(
"--num_train_epochs"
,
default
=
3.0
,
type
=
float
,
help
=
"Total number of training epochs to perform."
)
parser
.
add_argument
(
"--max_steps"
,
default
=-
1
,
type
=
int
,
help
=
"If > 0: set total number of training steps to perform. Override num_train_epochs."
,
)
parser
.
add_argument
(
"--warmup_steps"
,
default
=
0
,
type
=
int
,
help
=
"Linear warmup over warmup_steps."
)
parser
.
add_argument
(
"--logging_steps"
,
type
=
int
,
default
=
50
,
help
=
"Log every X updates steps."
)
parser
.
add_argument
(
"--save_steps"
,
type
=
int
,
default
=
50
,
help
=
"Save checkpoint every X updates steps."
)
parser
.
add_argument
(
"--eval_all_checkpoints"
,
action
=
"store_true"
,
help
=
"Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number"
,
)
parser
.
add_argument
(
"--no_cuda"
,
action
=
"store_true"
,
help
=
"Avoid using CUDA when available"
)
parser
.
add_argument
(
"--overwrite_output_dir"
,
action
=
"store_true"
,
help
=
"Overwrite the content of the output directory"
)
parser
.
add_argument
(
"--overwrite_cache"
,
action
=
"store_true"
,
help
=
"Overwrite the cached training and evaluation sets"
)
parser
.
add_argument
(
"--seed"
,
type
=
int
,
default
=
42
,
help
=
"random seed for initialization"
)
parser
.
add_argument
(
"--fp16"
,
action
=
"store_true"
,
help
=
"Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit"
,
)
parser
.
add_argument
(
"--fp16_opt_level"
,
type
=
str
,
default
=
"O1"
,
help
=
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
,
)
parser
.
add_argument
(
"--local_rank"
,
type
=
int
,
default
=-
1
,
help
=
"For distributed training: local_rank"
)
parser
.
add_argument
(
"--server_ip"
,
type
=
str
,
default
=
""
,
help
=
"For distant debugging."
)
parser
.
add_argument
(
"--server_port"
,
type
=
str
,
default
=
""
,
help
=
"For distant debugging."
)
args
=
parser
.
parse_args
()
if
(
os
.
path
.
exists
(
args
.
output_dir
)
and
os
.
listdir
(
args
.
output_dir
)
and
args
.
do_train
and
not
args
.
overwrite_output_dir
):
raise
ValueError
(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome."
.
format
(
args
.
output_dir
)
)
# Setup distant debugging if needed
if
args
.
server_ip
and
args
.
server_port
:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import
ptvsd
print
(
"Waiting for debugger attach"
)
ptvsd
.
enable_attach
(
address
=
(
args
.
server_ip
,
args
.
server_port
),
redirect_output
=
True
)
ptvsd
.
wait_for_attach
()
# Setup CUDA, GPU & distributed training
if
args
.
local_rank
==
-
1
or
args
.
no_cuda
:
device
=
torch
.
device
(
"cuda"
if
torch
.
cuda
.
is_available
()
and
not
args
.
no_cuda
else
"cpu"
)
args
.
n_gpu
=
0
if
args
.
no_cuda
else
torch
.
cuda
.
device_count
()
else
:
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch
.
cuda
.
set_device
(
args
.
local_rank
)
device
=
torch
.
device
(
"cuda"
,
args
.
local_rank
)
torch
.
distributed
.
init_process_group
(
backend
=
"nccl"
)
args
.
n_gpu
=
1
args
.
device
=
device
# Setup logging
logging
.
basicConfig
(
format
=
"%(asctime)s - %(levelname)s - %(name)s - %(message)s"
,
datefmt
=
"%m/%d/%Y %H:%M:%S"
,
level
=
logging
.
INFO
if
args
.
local_rank
in
[
-
1
,
0
]
else
logging
.
WARN
,
)
logger
.
warning
(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s"
,
args
.
local_rank
,
device
,
args
.
n_gpu
,
bool
(
args
.
local_rank
!=
-
1
),
args
.
fp16
,
)
# Set seed
set_seed
(
args
)
# Prepare GLUE task
args
.
task_name
=
args
.
task_name
.
lower
()
if
args
.
task_name
not
in
hans_processors
:
raise
ValueError
(
"Task not found: %s"
%
(
args
.
task_name
))
processor
=
hans_processors
[
args
.
task_name
]()
args
.
output_mode
=
hans_output_modes
[
args
.
task_name
]
label_list
=
processor
.
get_labels
()
num_labels
=
len
(
label_list
)
# Load pretrained model and tokenizer
if
args
.
local_rank
not
in
[
-
1
,
0
]:
torch
.
distributed
.
barrier
()
# Make sure only the first process in distributed training will download model & vocab
args
.
model_type
=
args
.
model_type
.
lower
()
config_class
,
model_class
,
tokenizer_class
=
MODEL_CLASSES
[
args
.
model_type
]
config
=
config_class
.
from_pretrained
(
args
.
config_name
if
args
.
config_name
else
args
.
model_name_or_path
,
num_labels
=
num_labels
,
finetuning_task
=
args
.
task_name
,
cache_dir
=
args
.
cache_dir
if
args
.
cache_dir
else
None
,
)
tokenizer
=
tokenizer_class
.
from_pretrained
(
args
.
tokenizer_name
if
args
.
tokenizer_name
else
args
.
model_name_or_path
,
do_lower_case
=
args
.
do_lower_case
,
cache_dir
=
args
.
cache_dir
if
args
.
cache_dir
else
None
,
)
model
=
model_class
.
from_pretrained
(
args
.
model_name_or_path
,
from_tf
=
bool
(
".ckpt"
in
args
.
model_name_or_path
),
config
=
config
,
cache_dir
=
args
.
cache_dir
if
args
.
cache_dir
else
None
,
)
if
args
.
local_rank
==
0
:
torch
.
distributed
.
barrier
()
# Make sure only the first process in distributed training will download model & vocab
model
.
to
(
args
.
device
)
logger
.
info
(
"Training/evaluation parameters %s"
,
args
)
# Training
if
args
.
do_train
:
train_dataset
=
HansDataset
(
args
.
data_dir
,
tokenizer
,
args
.
task_name
,
args
.
max_seq_length
,
overwrite_cache
=
args
.
overwrite_cache
)
global_step
,
tr_loss
=
train
(
args
,
train_dataset
,
model
,
tokenizer
)
logger
.
info
(
" global_step = %s, average loss = %s"
,
global_step
,
tr_loss
)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if
args
.
do_train
and
(
args
.
local_rank
==
-
1
or
torch
.
distributed
.
get_rank
()
==
0
):
# Create output directory if needed
if
not
os
.
path
.
exists
(
args
.
output_dir
)
and
args
.
local_rank
in
[
-
1
,
0
]:
os
.
makedirs
(
args
.
output_dir
)
logger
.
info
(
"Saving model checkpoint to %s"
,
args
.
output_dir
)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save
=
(
model
.
module
if
hasattr
(
model
,
"module"
)
else
model
)
# Take care of distributed/parallel training
model_to_save
.
save_pretrained
(
args
.
output_dir
)
tokenizer
.
save_pretrained
(
args
.
output_dir
)
# Good practice: save your training arguments together with the trained model
torch
.
save
(
args
,
os
.
path
.
join
(
args
.
output_dir
,
"training_args.bin"
))
# Load a trained model and vocabulary that you have fine-tuned
model
=
model_class
.
from_pretrained
(
args
.
output_dir
)
tokenizer
=
tokenizer_class
.
from_pretrained
(
args
.
output_dir
)
model
.
to
(
args
.
device
)
# Evaluation
results
=
{}
if
args
.
do_eval
and
args
.
local_rank
in
[
-
1
,
0
]:
tokenizer
=
tokenizer_class
.
from_pretrained
(
args
.
output_dir
,
do_lower_case
=
args
.
do_lower_case
)
checkpoints
=
[
args
.
output_dir
]
if
args
.
eval_all_checkpoints
:
checkpoints
=
list
(
os
.
path
.
dirname
(
c
)
for
c
in
sorted
(
glob
.
glob
(
args
.
output_dir
+
"/**/"
+
WEIGHTS_NAME
,
recursive
=
True
))
)
logging
.
getLogger
(
"transformers.modeling_utils"
).
setLevel
(
logging
.
WARN
)
# Reduce logging
logger
.
info
(
"Evaluate the following checkpoints: %s"
,
checkpoints
)
for
checkpoint
in
checkpoints
:
global_step
=
checkpoint
.
split
(
"-"
)[
-
1
]
if
len
(
checkpoints
)
>
1
else
""
prefix
=
checkpoint
.
split
(
"/"
)[
-
1
]
if
checkpoint
.
find
(
"checkpoint"
)
!=
-
1
else
""
model
=
model_class
.
from_pretrained
(
checkpoint
)
model
.
to
(
args
.
device
)
result
=
evaluate
(
args
,
model
,
tokenizer
,
label_list
,
prefix
=
prefix
)
result
=
dict
((
k
+
"_{}"
.
format
(
global_step
),
v
)
for
k
,
v
in
result
.
items
())
results
.
update
(
result
)
return
results
if
__name__
==
"__main__"
:
main
()
examples/adversarial/utils_hans.py
View file @
d5477baf
...
...
@@ -22,15 +22,7 @@ from typing import List, Optional, Union
import
tqdm
from
filelock
import
FileLock
from
transformers
import
(
DataProcessor
,
PreTrainedTokenizer
,
RobertaTokenizer
,
RobertaTokenizerFast
,
XLMRobertaTokenizer
,
is_tf_available
,
is_torch_available
,
)
from
transformers
import
DataProcessor
,
PreTrainedTokenizer
,
is_tf_available
,
is_torch_available
logger
=
logging
.
getLogger
(
__name__
)
...
...
@@ -106,7 +98,6 @@ if is_torch_available():
evaluate
:
bool
=
False
,
):
processor
=
hans_processors
[
task
]()
output_mode
=
hans_output_modes
[
task
]
cached_features_file
=
os
.
path
.
join
(
data_dir
,
...
...
@@ -127,22 +118,12 @@ if is_torch_available():
logger
.
info
(
f
"Creating features from dataset file at
{
data_dir
}
"
)
label_list
=
processor
.
get_labels
()
if
task
in
[
"mnli"
,
"mnli-mm"
]
and
tokenizer
.
__class__
in
(
RobertaTokenizer
,
RobertaTokenizerFast
,
XLMRobertaTokenizer
,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list
[
1
],
label_list
[
2
]
=
label_list
[
2
],
label_list
[
1
]
examples
=
(
processor
.
get_dev_examples
(
data_dir
)
if
evaluate
else
processor
.
get_train_examples
(
data_dir
)
)
logger
.
info
(
"Training examples: %s"
,
len
(
examples
))
# TODO clean up all this to leverage built-in features of tokenizers
self
.
features
=
hans_convert_examples_to_features
(
examples
,
label_list
,
max_seq_length
,
tokenizer
,
output_mode
)
self
.
features
=
hans_convert_examples_to_features
(
examples
,
label_list
,
max_seq_length
,
tokenizer
)
logger
.
info
(
"Saving features into cached file %s"
,
cached_features_file
)
torch
.
save
(
self
.
features
,
cached_features_file
)
...
...
@@ -174,21 +155,10 @@ if is_tf_available():
evaluate
:
bool
=
False
,
):
processor
=
hans_processors
[
task
]()
output_mode
=
hans_output_modes
[
task
]
label_list
=
processor
.
get_labels
()
if
task
in
[
"mnli"
,
"mnli-mm"
]
and
tokenizer
.
__class__
in
(
RobertaTokenizer
,
RobertaTokenizerFast
,
XLMRobertaTokenizer
,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list
[
1
],
label_list
[
2
]
=
label_list
[
2
],
label_list
[
1
]
examples
=
processor
.
get_dev_examples
(
data_dir
)
if
evaluate
else
processor
.
get_train_examples
(
data_dir
)
self
.
features
=
hans_convert_examples_to_features
(
examples
,
label_list
,
max_seq_length
,
tokenizer
,
output_mode
)
self
.
features
=
hans_convert_examples_to_features
(
examples
,
label_list
,
max_seq_length
,
tokenizer
)
def
gen
():
for
(
ex_index
,
ex
)
in
tqdm
.
tqdm
(
enumerate
(
self
.
features
),
desc
=
"convert examples to features"
):
...
...
@@ -240,15 +210,6 @@ if is_tf_available():
class
HansProcessor
(
DataProcessor
):
"""Processor for the HANS data set."""
def
get_example_from_tensor_dict
(
self
,
tensor_dict
):
"""See base class."""
return
InputExample
(
tensor_dict
[
"idx"
].
numpy
(),
tensor_dict
[
"premise"
].
numpy
().
decode
(
"utf-8"
),
tensor_dict
[
"hypothesis"
].
numpy
().
decode
(
"utf-8"
),
str
(
tensor_dict
[
"label"
].
numpy
()),
)
def
get_train_examples
(
self
,
data_dir
):
"""See base class."""
return
self
.
_create_examples
(
self
.
_read_tsv
(
os
.
path
.
join
(
data_dir
,
"heuristics_train_set.txt"
)),
"train"
)
...
...
@@ -277,11 +238,7 @@ class HansProcessor(DataProcessor):
def
hans_convert_examples_to_features
(
examples
:
List
[
InputExample
],
label_list
:
List
[
str
],
max_length
:
int
,
tokenizer
:
PreTrainedTokenizer
,
output_mode
:
str
,
examples
:
List
[
InputExample
],
label_list
:
List
[
str
],
max_length
:
int
,
tokenizer
:
PreTrainedTokenizer
,
):
"""
Loads a data file into a list of ``InputFeatures``
...
...
@@ -313,19 +270,8 @@ def hans_convert_examples_to_features(
pad_to_max_length
=
True
,
return_overflowing_tokens
=
True
,
)
if
"num_truncated_tokens"
in
inputs
and
inputs
[
"num_truncated_tokens"
]
>
0
:
logger
.
info
(
"Attention! you are cropping tokens (swag task is ok). "
"If you are training ARC and RACE and you are poping question + options,"
"you need to try to use a bigger max seq length!"
)
if
output_mode
==
"classification"
:
label
=
label_map
[
example
.
label
]
if
example
.
label
in
label_map
else
0
elif
output_mode
==
"regression"
:
label
=
float
(
example
.
label
)
else
:
raise
KeyError
(
output_mode
)
label
=
label_map
[
example
.
label
]
if
example
.
label
in
label_map
else
0
pairID
=
int
(
example
.
pairID
)
...
...
@@ -346,7 +292,3 @@ hans_tasks_num_labels = {
hans_processors
=
{
"hans"
:
HansProcessor
,
}
hans_output_modes
=
{
"hans"
:
"classification"
,
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment