Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
96cd5bcb
Unverified
Commit
96cd5bcb
authored
Mar 22, 2022
by
ivanllt
Committed by
GitHub
Mar 21, 2022
Browse files
added type hints for blenderbot and blenderbot_small (#16307)
parent
e226a24f
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
135 additions
and
135 deletions
+135
-135
src/transformers/models/blenderbot/modeling_blenderbot.py
src/transformers/models/blenderbot/modeling_blenderbot.py
+33
-33
src/transformers/models/blenderbot/modeling_tf_blenderbot.py
src/transformers/models/blenderbot/modeling_tf_blenderbot.py
+35
-35
src/transformers/models/blenderbot_small/modeling_blenderbot_small.py
...mers/models/blenderbot_small/modeling_blenderbot_small.py
+33
-33
src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py
...s/models/blenderbot_small/modeling_tf_blenderbot_small.py
+34
-34
No files found.
src/transformers/models/blenderbot/modeling_blenderbot.py
View file @
96cd5bcb
...
...
@@ -1119,22 +1119,22 @@ class BlenderbotModel(BlenderbotPreTrainedModel):
@
replace_return_docstrings
(
output_type
=
Seq2SeqModelOutput
,
config_class
=
_CONFIG_FOR_DOC
)
def
forward
(
self
,
input_ids
=
None
,
attention_mask
=
None
,
decoder_input_ids
=
None
,
decoder_attention_mask
=
None
,
head_mask
=
None
,
decoder_head_mask
=
None
,
cross_attn_head_mask
=
None
,
encoder_outputs
=
None
,
past_key_values
=
None
,
inputs_embeds
=
None
,
decoder_inputs_embeds
=
None
,
use_cache
=
None
,
output_attentions
=
None
,
output_hidden_states
=
None
,
return_dict
=
None
,
):
input_ids
:
Optional
[
torch
.
LongTensor
]
=
None
,
attention_mask
:
Optional
[
torch
.
Tensor
]
=
None
,
decoder_input_ids
:
Optional
[
torch
.
LongTensor
]
=
None
,
decoder_attention_mask
:
Optional
[
torch
.
LongTensor
]
=
None
,
head_mask
:
Optional
[
torch
.
Tensor
]
=
None
,
decoder_head_mask
:
Optional
[
torch
.
Tensor
]
=
None
,
cross_attn_head_mask
:
Optional
[
torch
.
Tensor
]
=
None
,
encoder_outputs
:
Optional
[
Union
[
Tuple
,
BaseModelOutput
]]
=
None
,
past_key_values
:
Optional
[
List
[
torch
.
FloatTensor
]]
=
None
,
inputs_embeds
:
Optional
[
torch
.
Tensor
]
=
None
,
decoder_inputs_embeds
:
Optional
[
torch
.
FloatTensor
]
=
None
,
use_cache
:
Optional
[
bool
]
=
None
,
output_attentions
:
Optional
[
bool
]
=
None
,
output_hidden_states
:
Optional
[
bool
]
=
None
,
return_dict
:
Optional
[
bool
]
=
None
,
)
->
Union
[
Tuple
[
torch
.
FloatTensor
],
Seq2SeqModelOutput
]
:
r
"""
Returns:
...
...
@@ -1275,23 +1275,23 @@ class BlenderbotForConditionalGeneration(BlenderbotPreTrainedModel):
@
add_end_docstrings
(
BLENDERBOT_GENERATION_EXAMPLE
)
def
forward
(
self
,
input_ids
=
None
,
attention_mask
=
None
,
decoder_input_ids
=
None
,
decoder_attention_mask
=
None
,
head_mask
=
None
,
decoder_head_mask
=
None
,
cross_attn_head_mask
=
None
,
encoder_outputs
=
None
,
past_key_values
=
None
,
inputs_embeds
=
None
,
decoder_inputs_embeds
=
None
,
labels
=
None
,
use_cache
=
None
,
output_attentions
=
None
,
output_hidden_states
=
None
,
return_dict
=
None
,
):
input_ids
:
Optional
[
torch
.
LongTensor
]
=
None
,
attention_mask
:
Optional
[
torch
.
Tensor
]
=
None
,
decoder_input_ids
:
Optional
[
torch
.
LongTensor
]
=
None
,
decoder_attention_mask
:
Optional
[
torch
.
LongTensor
]
=
None
,
head_mask
:
Optional
[
torch
.
Tensor
]
=
None
,
decoder_head_mask
:
Optional
[
torch
.
Tensor
]
=
None
,
cross_attn_head_mask
:
Optional
[
torch
.
Tensor
]
=
None
,
encoder_outputs
:
Optional
[
Union
[
Tuple
,
BaseModelOutput
]]
=
None
,
past_key_values
:
Optional
[
List
[
torch
.
FloatTensor
]]
=
None
,
inputs_embeds
:
Optional
[
torch
.
Tensor
]
=
None
,
decoder_inputs_embeds
:
Optional
[
torch
.
FloatTensor
]
=
None
,
labels
:
Optional
[
torch
.
LongTensor
]
=
None
,
use_cache
:
Optional
[
bool
]
=
None
,
output_attentions
:
Optional
[
bool
]
=
None
,
output_hidden_states
:
Optional
[
bool
]
=
None
,
return_dict
:
Optional
[
bool
]
=
None
,
)
->
Union
[
Tuple
[
torch
.
FloatTensor
],
Seq2SeqLMOutput
]
:
r
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
...
...
src/transformers/models/blenderbot/modeling_tf_blenderbot.py
View file @
96cd5bcb
...
...
@@ -18,7 +18,7 @@
import
os
import
random
import
warnings
from
typing
import
Optional
,
Tuple
,
Union
from
typing
import
List
,
Optional
,
Tuple
,
Union
import
tensorflow
as
tf
...
...
@@ -1137,24 +1137,24 @@ class TFBlenderbotModel(TFBlenderbotPreTrainedModel):
)
def
call
(
self
,
input_ids
=
None
,
attention_mask
=
None
,
decoder_input_ids
=
None
,
decoder_attention_mask
=
None
,
head_mask
=
None
,
decoder_head_mask
=
None
,
cross_attn_head_mask
=
None
,
input_ids
:
Optional
[
tf
.
Tensor
]
=
None
,
attention_mask
:
Optional
[
tf
.
Tensor
]
=
None
,
decoder_input_ids
:
Optional
[
tf
.
Tensor
]
=
None
,
decoder_attention_mask
:
Optional
[
tf
.
Tensor
]
=
None
,
head_mask
:
Optional
[
tf
.
Tensor
]
=
None
,
decoder_head_mask
:
Optional
[
tf
.
Tensor
]
=
None
,
cross_attn_head_mask
:
Optional
[
tf
.
Tensor
]
=
None
,
encoder_outputs
:
Optional
[
Union
[
Tuple
,
TFBaseModelOutput
]]
=
None
,
past_key_values
=
None
,
inputs_embeds
=
None
,
decoder_inputs_embeds
=
None
,
use_cache
=
None
,
output_attentions
=
None
,
output_hidden_states
=
None
,
return_dict
=
None
,
training
=
False
,
past_key_values
:
Optional
[
List
[
tf
.
Tensor
]]
=
None
,
inputs_embeds
:
Optional
[
tf
.
Tensor
]
=
None
,
decoder_inputs_embeds
:
Optional
[
tf
.
Tensor
]
=
None
,
use_cache
:
Optional
[
bool
]
=
None
,
output_attentions
:
Optional
[
bool
]
=
None
,
output_hidden_states
:
Optional
[
bool
]
=
None
,
return_dict
:
Optional
[
bool
]
=
None
,
training
:
Optional
[
bool
]
=
False
,
**
kwargs
):
)
->
Union
[
Tuple
[
tf
.
Tensor
],
TFSeq2SeqModelOutput
]
:
outputs
=
self
.
model
(
input_ids
=
input_ids
,
attention_mask
=
attention_mask
,
...
...
@@ -1253,25 +1253,25 @@ class TFBlenderbotForConditionalGeneration(TFBlenderbotPreTrainedModel, TFCausal
@
add_end_docstrings
(
BLENDERBOT_GENERATION_EXAMPLE
)
def
call
(
self
,
input_ids
=
None
,
attention_mask
=
None
,
decoder_input_ids
=
None
,
decoder_attention_mask
=
None
,
head_mask
=
None
,
decoder_head_mask
=
None
,
cross_attn_head_mask
=
None
,
encoder_outputs
:
Optional
[
TFBaseModelOutput
]
=
None
,
past_key_values
=
None
,
inputs_embeds
=
None
,
decoder_inputs_embeds
=
None
,
use_cache
=
None
,
output_attentions
=
None
,
output_hidden_states
=
None
,
return_dict
=
None
,
labels
=
None
,
training
=
False
,
input_ids
:
Optional
[
tf
.
Tensor
]
=
None
,
attention_mask
:
Optional
[
tf
.
Tensor
]
=
None
,
decoder_input_ids
:
Optional
[
tf
.
Tensor
]
=
None
,
decoder_attention_mask
:
Optional
[
tf
.
Tensor
]
=
None
,
head_mask
:
Optional
[
tf
.
Tensor
]
=
None
,
decoder_head_mask
:
Optional
[
tf
.
Tensor
]
=
None
,
cross_attn_head_mask
:
Optional
[
tf
.
Tensor
]
=
None
,
encoder_outputs
:
Optional
[
Union
[
Tuple
,
TFBaseModelOutput
]
]
=
None
,
past_key_values
:
Optional
[
List
[
tf
.
Tensor
]]
=
None
,
inputs_embeds
:
Optional
[
tf
.
Tensor
]
=
None
,
decoder_inputs_embeds
:
Optional
[
tf
.
Tensor
]
=
None
,
use_cache
:
Optional
[
bool
]
=
None
,
output_attentions
:
Optional
[
bool
]
=
None
,
output_hidden_states
:
Optional
[
bool
]
=
None
,
return_dict
:
Optional
[
bool
]
=
None
,
labels
:
Optional
[
tf
.
Tensor
]
=
None
,
training
:
Optional
[
bool
]
=
False
,
**
kwargs
,
):
)
->
Union
[
Tuple
[
tf
.
Tensor
],
TFSeq2SeqLMOutput
]
:
r
"""
labels (`tf.tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
...
...
src/transformers/models/blenderbot_small/modeling_blenderbot_small.py
View file @
96cd5bcb
...
...
@@ -1102,22 +1102,22 @@ class BlenderbotSmallModel(BlenderbotSmallPreTrainedModel):
@
replace_return_docstrings
(
output_type
=
Seq2SeqModelOutput
,
config_class
=
_CONFIG_FOR_DOC
)
def
forward
(
self
,
input_ids
=
None
,
attention_mask
=
None
,
decoder_input_ids
=
None
,
decoder_attention_mask
=
None
,
head_mask
=
None
,
decoder_head_mask
=
None
,
cross_attn_head_mask
=
None
,
encoder_outputs
=
None
,
past_key_values
=
None
,
inputs_embeds
=
None
,
decoder_inputs_embeds
=
None
,
use_cache
=
None
,
output_attentions
=
None
,
output_hidden_states
=
None
,
return_dict
=
None
,
):
input_ids
:
Optional
[
torch
.
LongTensor
]
=
None
,
attention_mask
:
Optional
[
torch
.
Tensor
]
=
None
,
decoder_input_ids
:
Optional
[
torch
.
LongTensor
]
=
None
,
decoder_attention_mask
:
Optional
[
torch
.
LongTensor
]
=
None
,
head_mask
:
Optional
[
torch
.
Tensor
]
=
None
,
decoder_head_mask
:
Optional
[
torch
.
Tensor
]
=
None
,
cross_attn_head_mask
:
Optional
[
torch
.
Tensor
]
=
None
,
encoder_outputs
:
Optional
[
Union
[
Tuple
,
BaseModelOutput
]]
=
None
,
past_key_values
:
Optional
[
List
[
torch
.
FloatTensor
]]
=
None
,
inputs_embeds
:
Optional
[
torch
.
Tensor
]
=
None
,
decoder_inputs_embeds
:
Optional
[
torch
.
FloatTensor
]
=
None
,
use_cache
:
Optional
[
bool
]
=
None
,
output_attentions
:
Optional
[
bool
]
=
None
,
output_hidden_states
:
Optional
[
bool
]
=
None
,
return_dict
:
Optional
[
bool
]
=
None
,
)
->
Union
[
Tuple
[
torch
.
FloatTensor
],
Seq2SeqModelOutput
]
:
r
"""
Returns:
...
...
@@ -1246,23 +1246,23 @@ class BlenderbotSmallForConditionalGeneration(BlenderbotSmallPreTrainedModel):
@
add_end_docstrings
(
BLENDERBOT_SMALL_GENERATION_EXAMPLE
)
def
forward
(
self
,
input_ids
=
None
,
attention_mask
=
None
,
decoder_input_ids
=
None
,
decoder_attention_mask
=
None
,
head_mask
=
None
,
decoder_head_mask
=
None
,
cross_attn_head_mask
=
None
,
encoder_outputs
=
None
,
past_key_values
=
None
,
inputs_embeds
=
None
,
decoder_inputs_embeds
=
None
,
labels
=
None
,
use_cache
=
None
,
output_attentions
=
None
,
output_hidden_states
=
None
,
return_dict
=
None
,
):
input_ids
:
Optional
[
torch
.
LongTensor
]
=
None
,
attention_mask
:
Optional
[
torch
.
Tensor
]
=
None
,
decoder_input_ids
:
Optional
[
torch
.
LongTensor
]
=
None
,
decoder_attention_mask
:
Optional
[
torch
.
LongTensor
]
=
None
,
head_mask
:
Optional
[
torch
.
Tensor
]
=
None
,
decoder_head_mask
:
Optional
[
torch
.
Tensor
]
=
None
,
cross_attn_head_mask
:
Optional
[
torch
.
Tensor
]
=
None
,
encoder_outputs
:
Optional
[
Union
[
Tuple
,
BaseModelOutput
]]
=
None
,
past_key_values
:
Optional
[
List
[
torch
.
FloatTensor
]]
=
None
,
inputs_embeds
:
Optional
[
torch
.
Tensor
]
=
None
,
decoder_inputs_embeds
:
Optional
[
torch
.
FloatTensor
]
=
None
,
labels
:
Optional
[
torch
.
LongTensor
]
=
None
,
use_cache
:
Optional
[
bool
]
=
None
,
output_attentions
:
Optional
[
bool
]
=
None
,
output_hidden_states
:
Optional
[
bool
]
=
None
,
return_dict
:
Optional
[
bool
]
=
None
,
)
->
Union
[
Tuple
[
torch
.
FloatTensor
],
Seq2SeqLMOutput
]
:
r
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
...
...
src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py
View file @
96cd5bcb
...
...
@@ -16,7 +16,7 @@
import
random
from
typing
import
Optional
,
Tuple
,
Union
from
typing
import
List
,
Optional
,
Tuple
,
Union
import
numpy
as
np
import
tensorflow
as
tf
...
...
@@ -1132,24 +1132,24 @@ class TFBlenderbotSmallModel(TFBlenderbotSmallPreTrainedModel):
)
def
call
(
self
,
input_ids
=
None
,
attention_mask
=
None
,
decoder_input_ids
=
None
,
decoder_attention_mask
=
None
,
head_mask
=
None
,
decoder_head_mask
=
None
,
cross_attn_head_mask
=
None
,
input_ids
:
Optional
[
tf
.
Tensor
]
=
None
,
attention_mask
:
Optional
[
tf
.
Tensor
]
=
None
,
decoder_input_ids
:
Optional
[
tf
.
Tensor
]
=
None
,
decoder_attention_mask
:
Optional
[
tf
.
Tensor
]
=
None
,
head_mask
:
Optional
[
tf
.
Tensor
]
=
None
,
decoder_head_mask
:
Optional
[
tf
.
Tensor
]
=
None
,
cross_attn_head_mask
:
Optional
[
tf
.
Tensor
]
=
None
,
encoder_outputs
:
Optional
[
Union
[
Tuple
,
TFBaseModelOutput
]]
=
None
,
past_key_values
=
None
,
inputs_embeds
=
None
,
decoder_inputs_embeds
=
None
,
use_cache
=
None
,
output_attentions
=
None
,
output_hidden_states
=
None
,
return_dict
=
None
,
training
=
False
,
past_key_values
:
Optional
[
List
[
tf
.
Tensor
]]
=
None
,
inputs_embeds
:
Optional
[
tf
.
Tensor
]
=
None
,
decoder_inputs_embeds
:
Optional
[
tf
.
Tensor
]
=
None
,
use_cache
:
Optional
[
bool
]
=
None
,
output_attentions
:
Optional
[
bool
]
=
None
,
output_hidden_states
:
Optional
[
bool
]
=
None
,
return_dict
:
Optional
[
bool
]
=
None
,
training
:
Optional
[
bool
]
=
False
,
**
kwargs
):
)
->
Union
[
Tuple
[
tf
.
Tensor
],
TFSeq2SeqModelOutput
]
:
outputs
=
self
.
model
(
input_ids
=
input_ids
,
...
...
@@ -1236,25 +1236,25 @@ class TFBlenderbotSmallForConditionalGeneration(TFBlenderbotSmallPreTrainedModel
@
add_end_docstrings
(
BLENDERBOT_SMALL_GENERATION_EXAMPLE
)
def
call
(
self
,
input_ids
=
None
,
attention_mask
=
None
,
decoder_input_ids
=
None
,
decoder_attention_mask
=
None
,
head_mask
=
None
,
decoder_head_mask
=
None
,
cross_attn_head_mask
=
None
,
input_ids
:
Optional
[
tf
.
Tensor
]
=
None
,
attention_mask
:
Optional
[
tf
.
Tensor
]
=
None
,
decoder_input_ids
:
Optional
[
tf
.
Tensor
]
=
None
,
decoder_attention_mask
:
Optional
[
tf
.
Tensor
]
=
None
,
head_mask
:
Optional
[
tf
.
Tensor
]
=
None
,
decoder_head_mask
:
Optional
[
tf
.
Tensor
]
=
None
,
cross_attn_head_mask
:
Optional
[
tf
.
Tensor
]
=
None
,
encoder_outputs
:
Optional
[
TFBaseModelOutput
]
=
None
,
past_key_values
=
None
,
inputs_embeds
=
None
,
decoder_inputs_embeds
=
None
,
use_cache
=
None
,
output_attentions
=
None
,
output_hidden_states
=
None
,
return_dict
=
None
,
labels
=
None
,
training
=
False
,
past_key_values
:
Optional
[
List
[
tf
.
Tensor
]]
=
None
,
inputs_embeds
:
Optional
[
tf
.
Tensor
]
=
None
,
decoder_inputs_embeds
:
Optional
[
tf
.
Tensor
]
=
None
,
use_cache
:
Optional
[
bool
]
=
None
,
output_attentions
:
Optional
[
bool
]
=
None
,
output_hidden_states
:
Optional
[
bool
]
=
None
,
return_dict
:
Optional
[
bool
]
=
None
,
labels
:
Optional
[
tf
.
Tensor
]
=
None
,
training
:
Optional
[
bool
]
=
False
,
**
kwargs
,
):
)
->
Union
[
Tuple
[
tf
.
Tensor
],
TFSeq2SeqLMOutput
]
:
r
"""
labels (`tf.tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment