Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
3ddff783
"vscode:/vscode.git/clone" did not exist on "b0f2dbc59482b58056e4196b43d5e0552edd41bf"
Commit
3ddff783
authored
Nov 04, 2018
by
thomwolf
Browse files
clean up + mask is long
parent
88c10379
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
6 additions
and
6 deletions
+6
-6
run_classifier.py
run_classifier.py
+6
-6
No files found.
run_classifier.py
View file @
3ddff783
...
@@ -22,10 +22,10 @@ import csv
...
@@ -22,10 +22,10 @@ import csv
import
os
import
os
import
logging
import
logging
import
argparse
import
argparse
import
random
import
random
import
numpy
as
np
from
tqdm
import
tqdm
,
trange
from
tqdm
import
tqdm
,
trange
import
numpy
as
np
import
torch
import
torch
from
torch.utils.data
import
TensorDataset
,
DataLoader
,
RandomSampler
,
SequentialSampler
from
torch.utils.data
import
TensorDataset
,
DataLoader
,
RandomSampler
,
SequentialSampler
from
torch.utils.data.distributed
import
DistributedSampler
from
torch.utils.data.distributed
import
DistributedSampler
...
@@ -102,7 +102,7 @@ class MrpcProcessor(DataProcessor):
...
@@ -102,7 +102,7 @@ class MrpcProcessor(DataProcessor):
def
get_train_examples
(
self
,
data_dir
):
def
get_train_examples
(
self
,
data_dir
):
"""See base class."""
"""See base class."""
print
(
"LOOKING AT {}"
.
format
(
os
.
path
.
join
(
data_dir
,
"train.tsv"
)))
logger
.
info
(
"LOOKING AT {}"
.
format
(
os
.
path
.
join
(
data_dir
,
"train.tsv"
)))
return
self
.
_create_examples
(
return
self
.
_create_examples
(
self
.
_read_tsv
(
os
.
path
.
join
(
data_dir
,
"train.tsv"
)),
"train"
)
self
.
_read_tsv
(
os
.
path
.
join
(
data_dir
,
"train.tsv"
)),
"train"
)
...
@@ -420,7 +420,7 @@ def main():
...
@@ -420,7 +420,7 @@ def main():
n_gpu
=
1
n_gpu
=
1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch
.
distributed
.
init_process_group
(
backend
=
'nccl'
)
torch
.
distributed
.
init_process_group
(
backend
=
'nccl'
)
print
(
"device"
,
device
,
"n_gpu"
,
n_gpu
,
"distributed training"
,
bool
(
args
.
local_rank
!=
-
1
))
logger
.
info
(
"device"
,
device
,
"n_gpu"
,
n_gpu
,
"distributed training"
,
bool
(
args
.
local_rank
!=
-
1
))
if
args
.
accumulate_gradients
<
1
:
if
args
.
accumulate_gradients
<
1
:
raise
ValueError
(
"Invalid accumulate_gradients parameter: {}, should be >= 1"
.
format
(
raise
ValueError
(
"Invalid accumulate_gradients parameter: {}, should be >= 1"
.
format
(
...
@@ -516,7 +516,7 @@ def main():
...
@@ -516,7 +516,7 @@ def main():
nb_tr_examples
,
nb_tr_steps
=
0
,
0
nb_tr_examples
,
nb_tr_steps
=
0
,
0
for
step
,
(
input_ids
,
input_mask
,
segment_ids
,
label_ids
)
in
enumerate
(
tqdm
(
train_dataloader
,
desc
=
"Iteration"
)):
for
step
,
(
input_ids
,
input_mask
,
segment_ids
,
label_ids
)
in
enumerate
(
tqdm
(
train_dataloader
,
desc
=
"Iteration"
)):
input_ids
=
input_ids
.
to
(
device
)
input_ids
=
input_ids
.
to
(
device
)
input_mask
=
input_mask
.
float
().
to
(
device
)
input_mask
=
input_mask
.
to
(
device
)
segment_ids
=
segment_ids
.
to
(
device
)
segment_ids
=
segment_ids
.
to
(
device
)
label_ids
=
label_ids
.
to
(
device
)
label_ids
=
label_ids
.
to
(
device
)
...
@@ -559,7 +559,7 @@ def main():
...
@@ -559,7 +559,7 @@ def main():
nb_eval_steps
,
nb_eval_examples
=
0
,
0
nb_eval_steps
,
nb_eval_examples
=
0
,
0
for
input_ids
,
input_mask
,
segment_ids
,
label_ids
in
eval_dataloader
:
for
input_ids
,
input_mask
,
segment_ids
,
label_ids
in
eval_dataloader
:
input_ids
=
input_ids
.
to
(
device
)
input_ids
=
input_ids
.
to
(
device
)
input_mask
=
input_mask
.
float
().
to
(
device
)
input_mask
=
input_mask
.
to
(
device
)
segment_ids
=
segment_ids
.
to
(
device
)
segment_ids
=
segment_ids
.
to
(
device
)
label_ids
=
label_ids
.
to
(
device
)
label_ids
=
label_ids
.
to
(
device
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment