Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
0fa46524
Unverified
Commit
0fa46524
authored
Mar 24, 2023
by
Joao Gante
Committed by
GitHub
Mar 24, 2023
Browse files
Generate: Add GPTNeoX integration test (#22346)
parent
b7960765
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
27 additions
and
2 deletions
+27
-2
tests/models/gpt_neox/test_modeling_gpt_neox.py
tests/models/gpt_neox/test_modeling_gpt_neox.py
+27
-2
No files found.
tests/models/gpt_neox/test_modeling_gpt_neox.py
View file @
0fa46524
...
...
@@ -17,8 +17,8 @@
import
unittest
from
transformers
import
GPTNeoXConfig
,
is_torch_available
from
transformers.testing_utils
import
require_torch
,
torch_device
from
transformers
import
AutoTokenizer
,
GPTNeoXConfig
,
is_torch_available
from
transformers.testing_utils
import
require_torch
,
slow
,
torch_device
from
...generation.test_utils
import
GenerationTesterMixin
from
...test_configuration_common
import
ConfigTester
...
...
@@ -232,3 +232,28 @@ class GPTNeoXModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMi
@
unittest
.
skip
(
reason
=
"Feed forward chunking is not implemented"
)
def
test_feed_forward_chunking
(
self
):
pass
@
require_torch
class
GPTNeoXLanguageGenerationTest
(
unittest
.
TestCase
):
@
slow
def
test_lm_generate_codegen
(
self
):
tokenizer
=
AutoTokenizer
.
from_pretrained
(
"EleutherAI/pythia-410m-deduped"
)
for
checkpointing
in
[
True
,
False
]:
model
=
GPTNeoXForCausalLM
.
from_pretrained
(
"EleutherAI/pythia-410m-deduped"
)
if
checkpointing
:
model
.
gradient_checkpointing_enable
()
else
:
model
.
gradient_checkpointing_disable
()
model
.
to
(
torch_device
)
inputs
=
tokenizer
(
"My favorite food is"
,
return_tensors
=
"pt"
).
to
(
torch_device
)
expected_output
=
(
"My favorite food is the chicken and rice.
\n\n
I love to cook and bake. I love to cook and bake"
)
output_ids
=
model
.
generate
(
**
inputs
,
do_sample
=
False
,
max_new_tokens
=
20
)
output_str
=
tokenizer
.
batch_decode
(
output_ids
)[
0
]
self
.
assertEqual
(
output_str
,
expected_output
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment