Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
81422c4e
Commit
81422c4e
authored
Dec 23, 2019
by
Aymeric Augustin
Browse files
Remove unused variables in examples.
parent
072750f4
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
2 additions
and
16 deletions
+2
-16
examples/contrib/run_openai_gpt.py
examples/contrib/run_openai_gpt.py
+0
-6
examples/contrib/run_transfo_xl.py
examples/contrib/run_transfo_xl.py
+1
-3
examples/run_multiple_choice.py
examples/run_multiple_choice.py
+1
-2
examples/summarization/modeling_bertabs.py
examples/summarization/modeling_bertabs.py
+0
-5
No files found.
examples/contrib/run_openai_gpt.py
View file @
81422c4e
...
@@ -44,13 +44,10 @@ from transformers import (
...
@@ -44,13 +44,10 @@ from transformers import (
AdamW
,
AdamW
,
OpenAIGPTDoubleHeadsModel
,
OpenAIGPTDoubleHeadsModel
,
OpenAIGPTTokenizer
,
OpenAIGPTTokenizer
,
cached_path
,
get_linear_schedule_with_warmup
,
get_linear_schedule_with_warmup
,
)
)
ROCSTORIES_URL
=
"https://s3.amazonaws.com/datasets.huggingface.co/ROCStories.tar.gz"
logging
.
basicConfig
(
logging
.
basicConfig
(
format
=
"%(asctime)s - %(levelname)s - %(name)s - %(message)s"
,
datefmt
=
"%m/%d/%Y %H:%M:%S"
,
level
=
logging
.
INFO
format
=
"%(asctime)s - %(levelname)s - %(name)s - %(message)s"
,
datefmt
=
"%m/%d/%Y %H:%M:%S"
,
level
=
logging
.
INFO
)
)
...
@@ -182,9 +179,6 @@ def main():
...
@@ -182,9 +179,6 @@ def main():
model
.
to
(
device
)
model
.
to
(
device
)
# Load and encode the datasets
# Load and encode the datasets
if
not
args
.
train_dataset
and
not
args
.
eval_dataset
:
roc_stories
=
cached_path
(
ROCSTORIES_URL
)
def
tokenize_and_encode
(
obj
):
def
tokenize_and_encode
(
obj
):
""" Tokenize and encode a nested object """
""" Tokenize and encode a nested object """
if
isinstance
(
obj
,
str
):
if
isinstance
(
obj
,
str
):
...
...
examples/contrib/run_transfo_xl.py
View file @
81422c4e
...
@@ -28,7 +28,7 @@ import time
...
@@ -28,7 +28,7 @@ import time
import
torch
import
torch
from
transformers
import
TransfoXLCorpus
,
TransfoXLLMHeadModel
,
TransfoXLTokenizer
from
transformers
import
TransfoXLCorpus
,
TransfoXLLMHeadModel
logging
.
basicConfig
(
logging
.
basicConfig
(
...
@@ -73,9 +73,7 @@ def main():
...
@@ -73,9 +73,7 @@ def main():
# The pre-processing involve computing word frequencies to prepare the Adaptive input and SoftMax
# The pre-processing involve computing word frequencies to prepare the Adaptive input and SoftMax
# and tokenizing the dataset
# and tokenizing the dataset
# The pre-processed corpus is a convertion (using the conversion script )
# The pre-processed corpus is a convertion (using the conversion script )
tokenizer
=
TransfoXLTokenizer
.
from_pretrained
(
args
.
model_name
)
corpus
=
TransfoXLCorpus
.
from_pretrained
(
args
.
model_name
)
corpus
=
TransfoXLCorpus
.
from_pretrained
(
args
.
model_name
)
ntokens
=
len
(
corpus
.
vocab
)
va_iter
=
corpus
.
get_iterator
(
"valid"
,
args
.
batch_size
,
args
.
tgt_len
,
device
=
device
,
ext_len
=
args
.
ext_len
)
va_iter
=
corpus
.
get_iterator
(
"valid"
,
args
.
batch_size
,
args
.
tgt_len
,
device
=
device
,
ext_len
=
args
.
ext_len
)
te_iter
=
corpus
.
get_iterator
(
"test"
,
args
.
batch_size
,
args
.
tgt_len
,
device
=
device
,
ext_len
=
args
.
ext_len
)
te_iter
=
corpus
.
get_iterator
(
"test"
,
args
.
batch_size
,
args
.
tgt_len
,
device
=
device
,
ext_len
=
args
.
ext_len
)
...
...
examples/run_multiple_choice.py
View file @
81422c4e
...
@@ -141,7 +141,7 @@ def train(args, train_dataset, model, tokenizer):
...
@@ -141,7 +141,7 @@ def train(args, train_dataset, model, tokenizer):
global_step
=
0
global_step
=
0
tr_loss
,
logging_loss
=
0.0
,
0.0
tr_loss
,
logging_loss
=
0.0
,
0.0
best_dev_acc
,
best_dev_loss
=
0.0
,
99999999999
.0
best_dev_acc
=
0
.0
best_steps
=
0
best_steps
=
0
model
.
zero_grad
()
model
.
zero_grad
()
train_iterator
=
trange
(
int
(
args
.
num_train_epochs
),
desc
=
"Epoch"
,
disable
=
args
.
local_rank
not
in
[
-
1
,
0
])
train_iterator
=
trange
(
int
(
args
.
num_train_epochs
),
desc
=
"Epoch"
,
disable
=
args
.
local_rank
not
in
[
-
1
,
0
])
...
@@ -193,7 +193,6 @@ def train(args, train_dataset, model, tokenizer):
...
@@ -193,7 +193,6 @@ def train(args, train_dataset, model, tokenizer):
tb_writer
.
add_scalar
(
"eval_{}"
.
format
(
key
),
value
,
global_step
)
tb_writer
.
add_scalar
(
"eval_{}"
.
format
(
key
),
value
,
global_step
)
if
results
[
"eval_acc"
]
>
best_dev_acc
:
if
results
[
"eval_acc"
]
>
best_dev_acc
:
best_dev_acc
=
results
[
"eval_acc"
]
best_dev_acc
=
results
[
"eval_acc"
]
best_dev_loss
=
results
[
"eval_loss"
]
best_steps
=
global_step
best_steps
=
global_step
if
args
.
do_test
:
if
args
.
do_test
:
results_test
=
evaluate
(
args
,
model
,
tokenizer
,
test
=
True
)
results_test
=
evaluate
(
args
,
model
,
tokenizer
,
test
=
True
)
...
...
examples/summarization/modeling_bertabs.py
View file @
81422c4e
...
@@ -446,8 +446,6 @@ class MultiHeadedAttention(nn.Module):
...
@@ -446,8 +446,6 @@ class MultiHeadedAttention(nn.Module):
batch_size
=
key
.
size
(
0
)
batch_size
=
key
.
size
(
0
)
dim_per_head
=
self
.
dim_per_head
dim_per_head
=
self
.
dim_per_head
head_count
=
self
.
head_count
head_count
=
self
.
head_count
key_len
=
key
.
size
(
1
)
query_len
=
query
.
size
(
1
)
def
shape
(
x
):
def
shape
(
x
):
""" projection """
""" projection """
...
@@ -504,9 +502,6 @@ class MultiHeadedAttention(nn.Module):
...
@@ -504,9 +502,6 @@ class MultiHeadedAttention(nn.Module):
query
=
shape
(
query
)
query
=
shape
(
query
)
key_len
=
key
.
size
(
2
)
query_len
=
query
.
size
(
2
)
# 2) Calculate and scale scores.
# 2) Calculate and scale scores.
query
=
query
/
math
.
sqrt
(
dim_per_head
)
query
=
query
/
math
.
sqrt
(
dim_per_head
)
scores
=
torch
.
matmul
(
query
,
key
.
transpose
(
2
,
3
))
scores
=
torch
.
matmul
(
query
,
key
.
transpose
(
2
,
3
))
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment