Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
deb61e5f
"git@developer.sourcefind.cn:chenpangpang/transformers.git" did not exist on "d9050dc768b6a8d7ef45943059d2bbe3dafc64ec"
Unverified
Commit
deb61e5f
authored
Mar 22, 2022
by
Dan Tegzes
Committed by
GitHub
Mar 22, 2022
Browse files
Add type hints for Pegasus (#16324)
parent
7cc2c9c6
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
33 additions
and
33 deletions
+33
-33
src/transformers/models/pegasus/modeling_pegasus.py
src/transformers/models/pegasus/modeling_pegasus.py
+33
-33
No files found.
src/transformers/models/pegasus/modeling_pegasus.py
View file @
deb61e5f
...
@@ -1184,22 +1184,22 @@ class PegasusModel(PegasusPreTrainedModel):
...
@@ -1184,22 +1184,22 @@ class PegasusModel(PegasusPreTrainedModel):
@
replace_return_docstrings
(
output_type
=
Seq2SeqModelOutput
,
config_class
=
_CONFIG_FOR_DOC
)
@
replace_return_docstrings
(
output_type
=
Seq2SeqModelOutput
,
config_class
=
_CONFIG_FOR_DOC
)
def
forward
(
def
forward
(
self
,
self
,
input_ids
=
None
,
input_ids
:
Optional
[
torch
.
Tensor
]
=
None
,
attention_mask
=
None
,
attention_mask
:
Optional
[
torch
.
Tensor
]
=
None
,
decoder_input_ids
=
None
,
decoder_input_ids
:
Optional
[
torch
.
Tensor
]
=
None
,
decoder_attention_mask
=
None
,
decoder_attention_mask
:
Optional
[
torch
.
Tensor
]
=
None
,
head_mask
=
None
,
head_mask
:
Optional
[
torch
.
Tensor
]
=
None
,
decoder_head_mask
=
None
,
decoder_head_mask
:
Optional
[
torch
.
Tensor
]
=
None
,
cross_attn_head_mask
=
None
,
cross_attn_head_mask
:
Optional
[
torch
.
Tensor
]
=
None
,
encoder_outputs
=
None
,
encoder_outputs
:
Optional
[
Tuple
[
torch
.
FloatTensor
]]
=
None
,
past_key_values
=
None
,
past_key_values
:
Optional
[
Tuple
[
torch
.
FloatTensor
]]
=
None
,
inputs_embeds
=
None
,
inputs_embeds
:
Optional
[
torch
.
Tensor
]
=
None
,
decoder_inputs_embeds
=
None
,
decoder_inputs_embeds
:
Optional
[
torch
.
Tensor
]
=
None
,
use_cache
=
None
,
use_cache
:
Optional
[
bool
]
=
None
,
output_attentions
=
None
,
output_attentions
:
Optional
[
bool
]
=
None
,
output_hidden_states
=
None
,
output_hidden_states
:
Optional
[
bool
]
=
None
,
return_dict
=
None
,
return_dict
:
Optional
[
bool
]
=
None
,
):
)
->
Union
[
Tuple
,
Seq2SeqModelOutput
]
:
r
"""
r
"""
Returns:
Returns:
...
@@ -1352,23 +1352,23 @@ class PegasusForConditionalGeneration(PegasusPreTrainedModel):
...
@@ -1352,23 +1352,23 @@ class PegasusForConditionalGeneration(PegasusPreTrainedModel):
@
add_end_docstrings
(
PEGASUS_GENERATION_EXAMPLE
)
@
add_end_docstrings
(
PEGASUS_GENERATION_EXAMPLE
)
def
forward
(
def
forward
(
self
,
self
,
input_ids
=
None
,
input_ids
:
Optional
[
torch
.
Tensor
]
=
None
,
attention_mask
=
None
,
attention_mask
:
Optional
[
torch
.
Tensor
]
=
None
,
decoder_input_ids
=
None
,
decoder_input_ids
:
Optional
[
torch
.
Tensor
]
=
None
,
decoder_attention_mask
=
None
,
decoder_attention_mask
:
Optional
[
torch
.
Tensor
]
=
None
,
head_mask
=
None
,
head_mask
:
Optional
[
torch
.
Tensor
]
=
None
,
decoder_head_mask
=
None
,
decoder_head_mask
:
Optional
[
torch
.
Tensor
]
=
None
,
cross_attn_head_mask
=
None
,
cross_attn_head_mask
:
Optional
[
torch
.
Tensor
]
=
None
,
encoder_outputs
=
None
,
encoder_outputs
:
Optional
[
Tuple
[
torch
.
FloatTensor
]]
=
None
,
past_key_values
=
None
,
past_key_values
:
Optional
[
Tuple
[
torch
.
FloatTensor
]]
=
None
,
inputs_embeds
=
None
,
inputs_embeds
:
Optional
[
torch
.
Tensor
]
=
None
,
decoder_inputs_embeds
=
None
,
decoder_inputs_embeds
:
Optional
[
torch
.
Tensor
]
=
None
,
labels
=
None
,
labels
:
Optional
[
torch
.
Tensor
]
=
None
,
use_cache
=
None
,
use_cache
:
Optional
[
bool
]
=
None
,
output_attentions
=
None
,
output_attentions
:
Optional
[
bool
]
=
None
,
output_hidden_states
=
None
,
output_hidden_states
:
Optional
[
bool
]
=
None
,
return_dict
=
None
,
return_dict
:
Optional
[
bool
]
=
None
,
):
)
->
Union
[
Tuple
,
Seq2SeqLMOutput
]
:
r
"""
r
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment