Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
e5b63fb5
Commit
e5b63fb5
authored
Mar 17, 2019
by
Ananya Harsh Jha
Browse files
Merge branch 'master' of
https://github.com/ananyahjha93/pytorch-pretrained-BERT
pull current master to local
parents
8a4e90ff
f3e54048
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
13 additions
and
6 deletions
+13
-6
examples/run_classifier.py
examples/run_classifier.py
+0
-1
examples/run_squad.py
examples/run_squad.py
+4
-4
examples/run_swag.py
examples/run_swag.py
+3
-1
pytorch_pretrained_bert/modeling_gpt2.py
pytorch_pretrained_bert/modeling_gpt2.py
+2
-0
pytorch_pretrained_bert/modeling_openai.py
pytorch_pretrained_bert/modeling_openai.py
+2
-0
pytorch_pretrained_bert/modeling_transfo_xl.py
pytorch_pretrained_bert/modeling_transfo_xl.py
+2
-0
No files found.
examples/run_classifier.py
View file @
e5b63fb5
...
...
@@ -857,7 +857,6 @@ def main():
optimizer
.
zero_grad
()
global_step
+=
1
if
args
.
do_train
:
# Save a trained model and the associated configuration
model_to_save
=
model
.
module
if
hasattr
(
model
,
'module'
)
else
model
# Only save the model it-self
output_model_file
=
os
.
path
.
join
(
args
.
output_dir
,
WEIGHTS_NAME
)
...
...
examples/run_squad.py
View file @
e5b63fb5
...
...
@@ -471,7 +471,7 @@ def write_predictions(all_examples, all_features, all_results, n_best_size,
prelim_predictions
=
[]
# keep track of the minimum score of null start+end of position 0
score_null
=
1000000
# large and positive
min_null_feature_index
=
0
# the paragraph slice with min
m
ull score
min_null_feature_index
=
0
# the paragraph slice with min
n
ull score
null_start_logit
=
0
# the start logit at the slice with min null score
null_end_logit
=
0
# the end logit at the slice with min null score
for
(
feature_index
,
feature
)
in
enumerate
(
features
):
...
...
@@ -620,7 +620,7 @@ def write_predictions(all_examples, all_features, all_results, n_best_size,
all_predictions
[
example
.
qas_id
]
=
""
else
:
all_predictions
[
example
.
qas_id
]
=
best_non_null_entry
.
text
all_nbest_json
[
example
.
qas_id
]
=
nbest_json
all_nbest_json
[
example
.
qas_id
]
=
nbest_json
with
open
(
output_prediction_file
,
"w"
)
as
writer
:
writer
.
write
(
json
.
dumps
(
all_predictions
,
indent
=
4
)
+
"
\n
"
)
...
...
@@ -657,8 +657,8 @@ def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment he
r
uistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# Therefore, we have to apply a semi-complicated alignment heu
r
istic between
# `pred_text` and `orig_text` to get a character-to-char
a
cter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def
_strip_spaces
(
text
):
...
...
examples/run_swag.py
View file @
e5b63fb5
...
...
@@ -15,6 +15,8 @@
# limitations under the License.
"""BERT finetuning runner."""
from
__future__
import
absolute_import
import
argparse
import
csv
import
logging
...
...
@@ -31,7 +33,7 @@ from torch.utils.data.distributed import DistributedSampler
from
tqdm
import
tqdm
,
trange
from
pytorch_pretrained_bert.file_utils
import
PYTORCH_PRETRAINED_BERT_CACHE
from
pytorch_pretrained_bert.modeling
import
BertForMultipleChoice
from
pytorch_pretrained_bert.modeling
import
(
BertForMultipleChoice
,
BertConfig
,
WEIGHTS_NAME
,
CONFIG_NAME
)
from
pytorch_pretrained_bert.optimization
import
BertAdam
,
warmup_linear
from
pytorch_pretrained_bert.tokenization
import
BertTokenizer
...
...
pytorch_pretrained_bert/modeling_gpt2.py
View file @
e5b63fb5
...
...
@@ -15,6 +15,8 @@
# limitations under the License.
"""PyTorch OpenAI GPT-2 model."""
from
__future__
import
absolute_import
,
division
,
print_function
,
unicode_literals
import
collections
import
copy
import
json
...
...
pytorch_pretrained_bert/modeling_openai.py
View file @
e5b63fb5
...
...
@@ -15,6 +15,8 @@
# limitations under the License.
"""PyTorch OpenAI GPT model."""
from
__future__
import
absolute_import
,
division
,
print_function
,
unicode_literals
import
collections
import
copy
import
json
...
...
pytorch_pretrained_bert/modeling_transfo_xl.py
View file @
e5b63fb5
...
...
@@ -18,6 +18,8 @@
In particular https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/mem_transformer.py
"""
from
__future__
import
absolute_import
,
division
,
print_function
,
unicode_literals
import
os
import
copy
import
json
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment