Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
722b5807
"...lm-evaluation-harness.git" did not exist on "72d7cc0cc545b187b9cb243bc1fb73d350657298"
Unverified
Commit
722b5807
authored
Sep 03, 2020
by
Sylvain Gugger
Committed by
GitHub
Sep 03, 2020
Browse files
Template updates (#6914)
parent
ea2c6f1a
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
53 additions
and
29 deletions
+53
-29
templates/adding_a_new_model/README.md
templates/adding_a_new_model/README.md
+0
-1
templates/adding_a_new_model/tests/test_modeling_tf_xxx.py
templates/adding_a_new_model/tests/test_modeling_tf_xxx.py
+17
-13
templates/adding_a_new_model/tests/test_modeling_xxx.py
templates/adding_a_new_model/tests/test_modeling_xxx.py
+36
-15
No files found.
templates/adding_a_new_model/README.md
View file @
722b5807
...
...
@@ -79,7 +79,6 @@ You can then finish the addition step by adding imports for your classes in the
-
[ ] Add your configuration in
`configuration_auto.py`
.
-
[ ] Add your PyTorch and TF 2.0 model respectively in
`modeling_auto.py`
and
`modeling_tf_auto.py`
.
-
[ ] Add your tokenizer in
`tokenization_auto.py`
.
-
[ ] Add your models and tokenizer to
`pipeline.py`
.
-
[ ] Add a link to your conversion script in the main conversion utility (in
`commands/convert.py`
)
-
[ ] Edit the PyTorch to TF 2.0 conversion script to add your model in the
`convert_pytorch_checkpoint_to_tf2.py`
file.
...
...
templates/adding_a_new_model/tests/test_modeling_tf_xxx.py
View file @
722b5807
...
...
@@ -17,10 +17,10 @@
import
unittest
from
transformers
import
XxxConfig
,
is_tf_available
from
transformers.testing_utils
import
CACHE_DIR
,
require_tf
,
slow
from
.test_configuration_common
import
ConfigTester
from
.test_modeling_tf_common
import
TFModelTesterMixin
,
ids_tensor
from
.utils
import
CACHE_DIR
,
require_tf
,
slow
if
is_tf_available
():
...
...
@@ -137,7 +137,7 @@ class TFXxxModelTest(TFModelTesterMixin, unittest.TestCase):
return
config
,
input_ids
,
token_type_ids
,
input_mask
,
sequence_labels
,
token_labels
,
choice_labels
def
create_and_check_
xxx_
model
(
def
create_and_check_model
(
self
,
config
,
input_ids
,
token_type_ids
,
input_mask
,
sequence_labels
,
token_labels
,
choice_labels
):
model
=
TFXxxModel
(
config
=
config
)
...
...
@@ -154,7 +154,7 @@ class TFXxxModelTest(TFModelTesterMixin, unittest.TestCase):
)
self
.
parent
.
assertEqual
(
result
.
pooler_output
.
shape
,
(
self
.
batch_size
,
self
.
hidden_size
))
def
create_and_check_
xxx_
for_masked_lm
(
def
create_and_check_for_masked_lm
(
self
,
config
,
input_ids
,
token_type_ids
,
input_mask
,
sequence_labels
,
token_labels
,
choice_labels
):
model
=
TFXxxForMaskedLM
(
config
=
config
)
...
...
@@ -162,7 +162,7 @@ class TFXxxModelTest(TFModelTesterMixin, unittest.TestCase):
result
=
model
(
inputs
)
self
.
parent
.
assertEqual
(
result
.
logits
.
shape
,
(
self
.
batch_size
,
self
.
seq_length
,
self
.
vocab_size
))
def
create_and_check_
xxx_
for_sequence_classification
(
def
create_and_check_for_sequence_classification
(
self
,
config
,
input_ids
,
token_type_ids
,
input_mask
,
sequence_labels
,
token_labels
,
choice_labels
):
config
.
num_labels
=
self
.
num_labels
...
...
@@ -171,7 +171,7 @@ class TFXxxModelTest(TFModelTesterMixin, unittest.TestCase):
result
=
model
(
inputs
)
self
.
parent
.
assertEqual
(
result
.
logits
.
shape
,
(
self
.
batch_size
,
self
.
num_labels
))
def
create_and_check_
bert_
for_multiple_choice
(
def
create_and_check_for_multiple_choice
(
self
,
config
,
input_ids
,
token_type_ids
,
input_mask
,
sequence_labels
,
token_labels
,
choice_labels
):
config
.
num_choices
=
self
.
num_choices
...
...
@@ -187,7 +187,7 @@ class TFXxxModelTest(TFModelTesterMixin, unittest.TestCase):
result
=
model
(
inputs
)
self
.
parent
.
assertEqual
(
result
.
logits
.
shape
,
(
self
.
batch_size
,
self
.
num_choices
))
def
create_and_check_
xxx_
for_token_classification
(
def
create_and_check_for_token_classification
(
self
,
config
,
input_ids
,
token_type_ids
,
input_mask
,
sequence_labels
,
token_labels
,
choice_labels
):
config
.
num_labels
=
self
.
num_labels
...
...
@@ -196,7 +196,7 @@ class TFXxxModelTest(TFModelTesterMixin, unittest.TestCase):
result
=
model
(
inputs
)
self
.
parent
.
assertEqual
(
result
.
logits
.
shape
,
(
self
.
batch_size
,
self
.
seq_length
,
self
.
num_labels
))
def
create_and_check_
xxx_
for_question_answering
(
def
create_and_check_for_question_answering
(
self
,
config
,
input_ids
,
token_type_ids
,
input_mask
,
sequence_labels
,
token_labels
,
choice_labels
):
model
=
TFXxxForQuestionAnswering
(
config
=
config
)
...
...
@@ -226,25 +226,29 @@ class TFXxxModelTest(TFModelTesterMixin, unittest.TestCase):
def
test_config
(
self
):
self
.
config_tester
.
run_common_tests
()
def
test_
xxx_
model
(
self
):
def
test_model
(
self
):
config_and_inputs
=
self
.
model_tester
.
prepare_config_and_inputs
()
self
.
model_tester
.
create_and_check_
xxx_
model
(
*
config_and_inputs
)
self
.
model_tester
.
create_and_check_model
(
*
config_and_inputs
)
def
test_for_masked_lm
(
self
):
config_and_inputs
=
self
.
model_tester
.
prepare_config_and_inputs
()
self
.
model_tester
.
create_and_check_
xxx_
for_masked_lm
(
*
config_and_inputs
)
self
.
model_tester
.
create_and_check_for_masked_lm
(
*
config_and_inputs
)
def
test_for_question_answering
(
self
):
config_and_inputs
=
self
.
model_tester
.
prepare_config_and_inputs
()
self
.
model_tester
.
create_and_check_
xxx_
for_question_answering
(
*
config_and_inputs
)
self
.
model_tester
.
create_and_check_for_question_answering
(
*
config_and_inputs
)
def
test_for_sequence_classification
(
self
):
config_and_inputs
=
self
.
model_tester
.
prepare_config_and_inputs
()
self
.
model_tester
.
create_and_check_
xxx_
for_sequence_classification
(
*
config_and_inputs
)
self
.
model_tester
.
create_and_check_for_sequence_classification
(
*
config_and_inputs
)
def
test_for_token_classification
(
self
):
config_and_inputs
=
self
.
model_tester
.
prepare_config_and_inputs
()
self
.
model_tester
.
create_and_check_xxx_for_token_classification
(
*
config_and_inputs
)
self
.
model_tester
.
create_and_check_for_token_classification
(
*
config_and_inputs
)
def
test_for_multiple_choice
(
self
):
config_and_inputs
=
self
.
model_tester
.
prepare_config_and_inputs
()
self
.
model_tester
.
create_and_check_for_multiple_choice
(
*
config_and_inputs
)
@
slow
def
test_model_from_pretrained
(
self
):
...
...
templates/adding_a_new_model/tests/test_modeling_xxx.py
View file @
722b5807
...
...
@@ -17,10 +17,10 @@
import
unittest
from
transformers
import
is_torch_available
from
transformers.testing_utils
import
require_torch
,
require_torch_and_cuda
,
slow
,
torch_device
from
.test_configuration_common
import
ConfigTester
from
.test_modeling_common
import
ModelTesterMixin
,
ids_tensor
from
.utils
import
require_torch
,
require_torch_and_cuda
,
slow
,
torch_device
if
is_torch_available
():
...
...
@@ -29,6 +29,7 @@ if is_torch_available():
AutoTokenizer
,
XxxConfig
,
XxxForMaskedLM
,
XxxForMultipleChoice
,
XxxForQuestionAnswering
,
XxxForSequenceClassification
,
XxxForTokenClassification
,
...
...
@@ -126,7 +127,7 @@ class XxxModelTester:
return
config
,
input_ids
,
token_type_ids
,
input_mask
,
sequence_labels
,
token_labels
,
choice_labels
def
create_and_check_
xxx_
model
(
def
create_and_check_model
(
self
,
config
,
input_ids
,
token_type_ids
,
input_mask
,
sequence_labels
,
token_labels
,
choice_labels
):
model
=
XxxModel
(
config
=
config
)
...
...
@@ -138,18 +139,16 @@ class XxxModelTester:
self
.
parent
.
assertEqual
(
result
.
last_hidden_state
.
shape
,
(
self
.
batch_size
,
self
.
seq_length
,
self
.
hidden_size
))
self
.
parent
.
assertEqual
(
result
.
pooler_output
.
shape
,
(
self
.
batch_size
,
self
.
hidden_size
))
def
create_and_check_
xxx_
for_masked_lm
(
def
create_and_check_for_masked_lm
(
self
,
config
,
input_ids
,
token_type_ids
,
input_mask
,
sequence_labels
,
token_labels
,
choice_labels
):
model
=
XxxForMaskedLM
(
config
=
config
)
model
.
to
(
torch_device
)
model
.
eval
()
result
=
model
(
input_ids
,
attention_mask
=
input_mask
,
token_type_ids
=
token_type_ids
,
masked_lm_labels
=
token_labels
)
result
=
model
(
input_ids
,
attention_mask
=
input_mask
,
token_type_ids
=
token_type_ids
,
labels
=
token_labels
)
self
.
parent
.
assertEqual
(
result
.
logits
.
shape
,
(
self
.
batch_size
,
self
.
seq_length
,
self
.
vocab_size
))
def
create_and_check_
xxx_
for_question_answering
(
def
create_and_check_for_question_answering
(
self
,
config
,
input_ids
,
token_type_ids
,
input_mask
,
sequence_labels
,
token_labels
,
choice_labels
):
model
=
XxxForQuestionAnswering
(
config
=
config
)
...
...
@@ -165,7 +164,7 @@ class XxxModelTester:
self
.
parent
.
assertEqual
(
result
.
start_logits
.
shape
,
(
self
.
batch_size
,
self
.
seq_length
))
self
.
parent
.
assertEqual
(
result
.
end_logits
.
shape
,
(
self
.
batch_size
,
self
.
seq_length
))
def
create_and_check_
xxx_
for_sequence_classification
(
def
create_and_check_for_sequence_classification
(
self
,
config
,
input_ids
,
token_type_ids
,
input_mask
,
sequence_labels
,
token_labels
,
choice_labels
):
config
.
num_labels
=
self
.
num_labels
...
...
@@ -175,7 +174,7 @@ class XxxModelTester:
result
=
model
(
input_ids
,
attention_mask
=
input_mask
,
token_type_ids
=
token_type_ids
,
labels
=
sequence_labels
)
self
.
parent
.
assertEqual
(
result
.
logits
.
shape
,
(
self
.
batch_size
,
self
.
num_labels
))
def
create_and_check_
xxx_
for_token_classification
(
def
create_and_check_for_token_classification
(
self
,
config
,
input_ids
,
token_type_ids
,
input_mask
,
sequence_labels
,
token_labels
,
choice_labels
):
config
.
num_labels
=
self
.
num_labels
...
...
@@ -185,6 +184,24 @@ class XxxModelTester:
result
=
model
(
input_ids
,
attention_mask
=
input_mask
,
token_type_ids
=
token_type_ids
,
labels
=
token_labels
)
self
.
parent
.
assertEqual
(
result
.
logits
.
shape
,
(
self
.
batch_size
,
self
.
seq_length
,
self
.
num_labels
))
def
create_and_check_for_multiple_choice
(
self
,
config
,
input_ids
,
token_type_ids
,
input_mask
,
sequence_labels
,
token_labels
,
choice_labels
):
config
.
num_choices
=
self
.
num_choices
model
=
XxxForMultipleChoice
(
config
=
config
)
model
.
to
(
torch_device
)
model
.
eval
()
multiple_choice_inputs_ids
=
input_ids
.
unsqueeze
(
1
).
expand
(
-
1
,
self
.
num_choices
,
-
1
).
contiguous
()
multiple_choice_token_type_ids
=
token_type_ids
.
unsqueeze
(
1
).
expand
(
-
1
,
self
.
num_choices
,
-
1
).
contiguous
()
multiple_choice_input_mask
=
input_mask
.
unsqueeze
(
1
).
expand
(
-
1
,
self
.
num_choices
,
-
1
).
contiguous
()
result
=
model
(
multiple_choice_inputs_ids
,
attention_mask
=
multiple_choice_input_mask
,
token_type_ids
=
multiple_choice_token_type_ids
,
labels
=
choice_labels
,
)
self
.
parent
.
assertEqual
(
result
.
logits
.
shape
,
(
self
.
batch_size
,
self
.
num_choices
))
def
prepare_config_and_inputs_for_common
(
self
):
config_and_inputs
=
self
.
prepare_config_and_inputs
()
(
...
...
@@ -216,25 +233,29 @@ class XxxModelTest(ModelTesterMixin, unittest.TestCase):
def
test_config
(
self
):
self
.
config_tester
.
run_common_tests
()
def
test_
xxx_
model
(
self
):
def
test_model
(
self
):
config_and_inputs
=
self
.
model_tester
.
prepare_config_and_inputs
()
self
.
model_tester
.
create_and_check_
xxx_
model
(
*
config_and_inputs
)
self
.
model_tester
.
create_and_check_model
(
*
config_and_inputs
)
def
test_for_masked_lm
(
self
):
config_and_inputs
=
self
.
model_tester
.
prepare_config_and_inputs
()
self
.
model_tester
.
create_and_check_
xxx_
for_masked_lm
(
*
config_and_inputs
)
self
.
model_tester
.
create_and_check_for_masked_lm
(
*
config_and_inputs
)
def
test_for_question_answering
(
self
):
config_and_inputs
=
self
.
model_tester
.
prepare_config_and_inputs
()
self
.
model_tester
.
create_and_check_
xxx_
for_question_answering
(
*
config_and_inputs
)
self
.
model_tester
.
create_and_check_for_question_answering
(
*
config_and_inputs
)
def
test_for_sequence_classification
(
self
):
config_and_inputs
=
self
.
model_tester
.
prepare_config_and_inputs
()
self
.
model_tester
.
create_and_check_
xxx_
for_sequence_classification
(
*
config_and_inputs
)
self
.
model_tester
.
create_and_check_for_sequence_classification
(
*
config_and_inputs
)
def
test_for_token_classification
(
self
):
config_and_inputs
=
self
.
model_tester
.
prepare_config_and_inputs
()
self
.
model_tester
.
create_and_check_xxx_for_token_classification
(
*
config_and_inputs
)
self
.
model_tester
.
create_and_check_for_token_classification
(
*
config_and_inputs
)
def
test_for_multiple_choice
(
self
):
config_and_inputs
=
self
.
model_tester
.
prepare_config_and_inputs
()
self
.
model_tester
.
create_and_check_electra_for_multiple_choice
(
*
config_and_inputs
)
@
slow
def
test_lm_outputs_same_as_reference_model
(
self
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment