Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
631be270
"vscode:/vscode.git/clone" did not exist on "ea89bec185f7acaeb4b7e5c0ee1082e541becedd"
Commit
631be270
authored
Dec 21, 2019
by
Aymeric Augustin
Browse files
Fix E722 flake8 warnings (x26).
parent
b0f7db73
Changes
20
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
43 additions
and
41 deletions
+43
-41
examples/contrib/run_swag.py
examples/contrib/run_swag.py
+1
-1
examples/distillation/distiller.py
examples/distillation/distiller.py
+1
-1
examples/distillation/run_squad_w_distillation.py
examples/distillation/run_squad_w_distillation.py
+1
-1
examples/mm-imdb/run_mmimdb.py
examples/mm-imdb/run_mmimdb.py
+1
-1
examples/pplm/run_pplm.py
examples/pplm/run_pplm.py
+2
-2
examples/pplm/run_pplm_discrim_train.py
examples/pplm/run_pplm_discrim_train.py
+4
-4
examples/run_glue.py
examples/run_glue.py
+1
-1
examples/run_lm_finetuning.py
examples/run_lm_finetuning.py
+1
-1
examples/run_multiple_choice.py
examples/run_multiple_choice.py
+1
-1
examples/run_squad.py
examples/run_squad.py
+1
-1
examples/run_xnli.py
examples/run_xnli.py
+1
-1
templates/adding_a_new_example_script/run_xxx.py
templates/adding_a_new_example_script/run_xxx.py
+1
-1
transformers/__init__.py
transformers/__init__.py
+3
-3
transformers/hf_api.py
transformers/hf_api.py
+4
-6
transformers/modeling_utils.py
transformers/modeling_utils.py
+1
-1
transformers/tests/modeling_tf_common_test.py
transformers/tests/modeling_tf_common_test.py
+3
-3
transformers/tokenization_ctrl.py
transformers/tokenization_ctrl.py
+4
-3
transformers/tokenization_gpt2.py
transformers/tokenization_gpt2.py
+4
-3
transformers/tokenization_openai.py
transformers/tokenization_openai.py
+4
-3
transformers/tokenization_xlm.py
transformers/tokenization_xlm.py
+4
-3
No files found.
examples/contrib/run_swag.py
View file @
631be270
...
...
@@ -44,7 +44,7 @@ from transformers import (
try
:
from
torch.utils.tensorboard
import
SummaryWriter
except
:
except
ImportError
:
from
tensorboardX
import
SummaryWriter
...
...
examples/distillation/distiller.py
View file @
631be270
...
...
@@ -37,7 +37,7 @@ from utils import logger
try
:
from
torch.utils.tensorboard
import
SummaryWriter
except
:
except
ImportError
:
from
tensorboardX
import
SummaryWriter
...
...
examples/distillation/run_squad_w_distillation.py
View file @
631be270
...
...
@@ -67,7 +67,7 @@ from ..utils_squad_evaluate import main as evaluate_on_squad
try
:
from
torch.utils.tensorboard
import
SummaryWriter
except
:
except
ImportError
:
from
tensorboardX
import
SummaryWriter
...
...
examples/mm-imdb/run_mmimdb.py
View file @
631be270
...
...
@@ -62,7 +62,7 @@ from utils_mmimdb import ImageEncoder, JsonlDataset, collate_fn, get_image_trans
try
:
from
torch.utils.tensorboard
import
SummaryWriter
except
:
except
ImportError
:
from
tensorboardX
import
SummaryWriter
...
...
examples/pplm/run_pplm.py
View file @
631be270
...
...
@@ -697,8 +697,8 @@ def run_pplm_example(
print
(
"= Perturbed generated text {} ="
.
format
(
i
+
1
))
print
(
pert_gen_text
)
print
()
except
:
p
ass
except
Exception
as
exc
:
p
rint
(
"Ignoring error while generating perturbed text:"
,
exc
)
# keep the prefix, perturbed seq, original seq for each index
generated_texts
.
append
((
tokenized_cond_text
,
pert_gen_tok_text
,
unpert_gen_tok_text
))
...
...
examples/pplm/run_pplm_discrim_train.py
View file @
631be270
...
...
@@ -285,7 +285,7 @@ def train_discriminator(
for
i
,
line
in
enumerate
(
f
):
try
:
data
.
append
(
eval
(
line
))
except
:
except
Exception
:
print
(
"Error evaluating line {}: {}"
.
format
(
i
,
line
))
continue
x
=
[]
...
...
@@ -303,7 +303,7 @@ def train_discriminator(
continue
x
.
append
(
seq
)
y
.
append
(
d
[
"label"
])
except
:
except
Exception
:
print
(
"Error evaluating / tokenizing"
" line {}, skipping it"
.
format
(
i
))
pass
...
...
@@ -343,7 +343,7 @@ def train_discriminator(
continue
x
.
append
(
seq
)
y
.
append
(
int
(
np
.
sum
(
d
[
"label"
])
>
0
))
except
:
except
Exception
:
print
(
"Error evaluating / tokenizing"
" line {}, skipping it"
.
format
(
i
))
pass
...
...
@@ -402,7 +402,7 @@ def train_discriminator(
x
.
append
(
seq
)
y
.
append
(
class2idx
[
label
])
except
:
except
Exception
:
print
(
"Error tokenizing line {}, skipping it"
.
format
(
i
))
pass
...
...
examples/run_glue.py
View file @
631be270
...
...
@@ -64,7 +64,7 @@ from transformers import glue_processors as processors
try
:
from
torch.utils.tensorboard
import
SummaryWriter
except
:
except
ImportError
:
from
tensorboardX
import
SummaryWriter
...
...
examples/run_lm_finetuning.py
View file @
631be270
...
...
@@ -63,7 +63,7 @@ from transformers import (
try
:
from
torch.utils.tensorboard
import
SummaryWriter
except
:
except
ImportError
:
from
tensorboardX
import
SummaryWriter
...
...
examples/run_multiple_choice.py
View file @
631be270
...
...
@@ -48,7 +48,7 @@ from utils_multiple_choice import convert_examples_to_features, processors
try
:
from
torch.utils.tensorboard
import
SummaryWriter
except
:
except
ImportError
:
from
tensorboardX
import
SummaryWriter
...
...
examples/run_squad.py
View file @
631be270
...
...
@@ -64,7 +64,7 @@ from transformers.data.processors.squad import SquadResult, SquadV1Processor, Sq
try
:
from
torch.utils.tensorboard
import
SummaryWriter
except
:
except
ImportError
:
from
tensorboardX
import
SummaryWriter
...
...
examples/run_xnli.py
View file @
631be270
...
...
@@ -52,7 +52,7 @@ from transformers import xnli_processors as processors
try
:
from
torch.utils.tensorboard
import
SummaryWriter
except
:
except
ImportError
:
from
tensorboardX
import
SummaryWriter
...
...
templates/adding_a_new_example_script/run_xxx.py
View file @
631be270
...
...
@@ -63,7 +63,7 @@ from utils_squad_evaluate import main as evaluate_on_squad
try
:
from
torch.utils.tensorboard
import
SummaryWriter
except
:
except
ImportError
:
from
tensorboardX
import
SummaryWriter
...
...
transformers/__init__.py
View file @
631be270
...
...
@@ -6,12 +6,12 @@ __version__ = "2.3.0"
# and: https://github.com/tensorflow/tensorflow/issues/26691#issuecomment-500369493
try
:
import
absl.logging
except
ImportError
:
pass
else
:
absl
.
logging
.
set_verbosity
(
"info"
)
absl
.
logging
.
set_stderrthreshold
(
"info"
)
absl
.
logging
.
_warn_preinit_stderr
=
False
except
:
pass
import
logging
...
...
transformers/hf_api.py
View file @
631be270
...
...
@@ -205,10 +205,8 @@ class HfFolder:
try
:
with
open
(
cls
.
path_token
,
"r"
)
as
f
:
return
f
.
read
()
except
:
# this is too wide. When Py2 is dead use:
# `except FileNotFoundError:` instead
return
None
except
FileNotFoundError
:
pass
@
classmethod
def
delete_token
(
cls
):
...
...
@@ -218,5 +216,5 @@ class HfFolder:
"""
try
:
os
.
remove
(
cls
.
path_token
)
except
:
return
except
FileNotFoundError
:
pass
transformers/modeling_utils.py
View file @
631be270
...
...
@@ -439,7 +439,7 @@ class PreTrainedModel(nn.Module):
if
state_dict
is
None
and
not
from_tf
:
try
:
state_dict
=
torch
.
load
(
resolved_archive_file
,
map_location
=
"cpu"
)
except
:
except
Exception
:
raise
OSError
(
"Unable to load weights from pytorch checkpoint file. "
"If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. "
...
...
transformers/tests/modeling_tf_common_test.py
View file @
631be270
...
...
@@ -333,13 +333,13 @@ class TFCommonTestCases:
# We used to fall back to just synthetically creating a dummy tensor of ones:
try
:
x
=
wte
(
input_ids
,
mode
=
"embedding"
)
except
:
except
Exception
:
try
:
x
=
wte
([
input_ids
],
mode
=
"embedding"
)
except
:
except
Exception
:
try
:
x
=
wte
([
input_ids
,
None
,
None
,
None
],
mode
=
"embedding"
)
except
:
except
Exception
:
if
hasattr
(
self
.
model_tester
,
"embedding_size"
):
x
=
tf
.
ones
(
input_ids
.
shape
+
[
self
.
model_tester
.
embedding_size
],
dtype
=
tf
.
dtypes
.
float32
)
else
:
...
...
transformers/tokenization_ctrl.py
View file @
631be270
...
...
@@ -168,11 +168,12 @@ class CTRLTokenizer(PreTrainedTokenizer):
while
i
<
len
(
word
):
try
:
j
=
word
.
index
(
first
,
i
)
new_word
.
extend
(
word
[
i
:
j
])
i
=
j
except
:
except
ValueError
:
new_word
.
extend
(
word
[
i
:])
break
else
:
new_word
.
extend
(
word
[
i
:
j
])
i
=
j
if
word
[
i
]
==
first
and
i
<
len
(
word
)
-
1
and
word
[
i
+
1
]
==
second
:
new_word
.
append
(
first
+
second
)
...
...
transformers/tokenization_gpt2.py
View file @
631be270
...
...
@@ -178,11 +178,12 @@ class GPT2Tokenizer(PreTrainedTokenizer):
while
i
<
len
(
word
):
try
:
j
=
word
.
index
(
first
,
i
)
new_word
.
extend
(
word
[
i
:
j
])
i
=
j
except
:
except
ValueError
:
new_word
.
extend
(
word
[
i
:])
break
else
:
new_word
.
extend
(
word
[
i
:
j
])
i
=
j
if
word
[
i
]
==
first
and
i
<
len
(
word
)
-
1
and
word
[
i
+
1
]
==
second
:
new_word
.
append
(
first
+
second
)
...
...
transformers/tokenization_openai.py
View file @
631be270
...
...
@@ -136,11 +136,12 @@ class OpenAIGPTTokenizer(PreTrainedTokenizer):
while
i
<
len
(
word
):
try
:
j
=
word
.
index
(
first
,
i
)
new_word
.
extend
(
word
[
i
:
j
])
i
=
j
except
:
except
ValueError
:
new_word
.
extend
(
word
[
i
:])
break
else
:
new_word
.
extend
(
word
[
i
:
j
])
i
=
j
if
word
[
i
]
==
first
and
i
<
len
(
word
)
-
1
and
word
[
i
+
1
]
==
second
:
new_word
.
append
(
first
+
second
)
...
...
transformers/tokenization_xlm.py
View file @
631be270
...
...
@@ -683,11 +683,12 @@ class XLMTokenizer(PreTrainedTokenizer):
while
i
<
len
(
word
):
try
:
j
=
word
.
index
(
first
,
i
)
new_word
.
extend
(
word
[
i
:
j
])
i
=
j
except
:
except
ValueError
:
new_word
.
extend
(
word
[
i
:])
break
else
:
new_word
.
extend
(
word
[
i
:
j
])
i
=
j
if
word
[
i
]
==
first
and
i
<
len
(
word
)
-
1
and
word
[
i
+
1
]
==
second
:
new_word
.
append
(
first
+
second
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment