Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
aa6cfe9c
Unverified
Commit
aa6cfe9c
authored
Mar 24, 2022
by
NielsRogge
Committed by
GitHub
Mar 24, 2022
Browse files
Rename to SemanticSegmenterOutput (#15849)
Co-authored-by:
Niels Rogge
<
nielsrogge@Nielss-MacBook-Pro.local
>
parent
70a9bc69
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
9 additions
and
9 deletions
+9
-9
src/transformers/modeling_outputs.py
src/transformers/modeling_outputs.py
+1
-1
src/transformers/models/beit/modeling_beit.py
src/transformers/models/beit/modeling_beit.py
+4
-4
src/transformers/models/segformer/modeling_segformer.py
src/transformers/models/segformer/modeling_segformer.py
+4
-4
No files found.
src/transformers/modeling_outputs.py
View file @
aa6cfe9c
...
@@ -815,7 +815,7 @@ class Seq2SeqQuestionAnsweringModelOutput(ModelOutput):
...
@@ -815,7 +815,7 @@ class Seq2SeqQuestionAnsweringModelOutput(ModelOutput):
@
dataclass
@
dataclass
class
SemanticSegment
ationModel
Output
(
ModelOutput
):
class
SemanticSegment
er
Output
(
ModelOutput
):
"""
"""
Base class for outputs of semantic segmentation models.
Base class for outputs of semantic segmentation models.
...
...
src/transformers/models/beit/modeling_beit.py
View file @
aa6cfe9c
...
@@ -30,7 +30,7 @@ from ...modeling_outputs import (
...
@@ -30,7 +30,7 @@ from ...modeling_outputs import (
BaseModelOutput
,
BaseModelOutput
,
BaseModelOutputWithPooling
,
BaseModelOutputWithPooling
,
MaskedLMOutput
,
MaskedLMOutput
,
SemanticSegment
ationModel
Output
,
SemanticSegment
er
Output
,
SequenceClassifierOutput
,
SequenceClassifierOutput
,
)
)
from
...modeling_utils
import
PreTrainedModel
,
find_pruneable_heads_and_indices
,
prune_linear_layer
from
...modeling_utils
import
PreTrainedModel
,
find_pruneable_heads_and_indices
,
prune_linear_layer
...
@@ -1188,7 +1188,7 @@ class BeitForSemanticSegmentation(BeitPreTrainedModel):
...
@@ -1188,7 +1188,7 @@ class BeitForSemanticSegmentation(BeitPreTrainedModel):
return
loss
return
loss
@
add_start_docstrings_to_model_forward
(
BEIT_INPUTS_DOCSTRING
)
@
add_start_docstrings_to_model_forward
(
BEIT_INPUTS_DOCSTRING
)
@
replace_return_docstrings
(
output_type
=
SemanticSegment
ationModel
Output
,
config_class
=
_CONFIG_FOR_DOC
)
@
replace_return_docstrings
(
output_type
=
SemanticSegment
er
Output
,
config_class
=
_CONFIG_FOR_DOC
)
def
forward
(
def
forward
(
self
,
self
,
pixel_values
:
Optional
[
torch
.
Tensor
]
=
None
,
pixel_values
:
Optional
[
torch
.
Tensor
]
=
None
,
...
@@ -1197,7 +1197,7 @@ class BeitForSemanticSegmentation(BeitPreTrainedModel):
...
@@ -1197,7 +1197,7 @@ class BeitForSemanticSegmentation(BeitPreTrainedModel):
output_attentions
:
Optional
[
bool
]
=
None
,
output_attentions
:
Optional
[
bool
]
=
None
,
output_hidden_states
:
Optional
[
bool
]
=
None
,
output_hidden_states
:
Optional
[
bool
]
=
None
,
return_dict
:
Optional
[
bool
]
=
None
,
return_dict
:
Optional
[
bool
]
=
None
,
)
->
Union
[
tuple
,
SemanticSegment
ationModel
Output
]:
)
->
Union
[
tuple
,
SemanticSegment
er
Output
]:
r
"""
r
"""
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
...
@@ -1272,7 +1272,7 @@ class BeitForSemanticSegmentation(BeitPreTrainedModel):
...
@@ -1272,7 +1272,7 @@ class BeitForSemanticSegmentation(BeitPreTrainedModel):
output
=
(
logits
,)
+
outputs
[
3
:]
output
=
(
logits
,)
+
outputs
[
3
:]
return
((
loss
,)
+
output
)
if
loss
is
not
None
else
output
return
((
loss
,)
+
output
)
if
loss
is
not
None
else
output
return
SemanticSegment
ationModel
Output
(
return
SemanticSegment
er
Output
(
loss
=
loss
,
loss
=
loss
,
logits
=
logits
,
logits
=
logits
,
hidden_states
=
outputs
.
hidden_states
if
output_hidden_states
else
None
,
hidden_states
=
outputs
.
hidden_states
if
output_hidden_states
else
None
,
...
...
src/transformers/models/segformer/modeling_segformer.py
View file @
aa6cfe9c
...
@@ -24,7 +24,7 @@ from torch import nn
...
@@ -24,7 +24,7 @@ from torch import nn
from
torch.nn
import
BCEWithLogitsLoss
,
CrossEntropyLoss
,
MSELoss
from
torch.nn
import
BCEWithLogitsLoss
,
CrossEntropyLoss
,
MSELoss
from
...activations
import
ACT2FN
from
...activations
import
ACT2FN
from
...modeling_outputs
import
BaseModelOutput
,
SemanticSegment
ationModel
Output
,
SequenceClassifierOutput
from
...modeling_outputs
import
BaseModelOutput
,
SemanticSegment
er
Output
,
SequenceClassifierOutput
from
...modeling_utils
import
PreTrainedModel
,
find_pruneable_heads_and_indices
,
prune_linear_layer
from
...modeling_utils
import
PreTrainedModel
,
find_pruneable_heads_and_indices
,
prune_linear_layer
from
...utils
import
(
from
...utils
import
(
add_code_sample_docstrings
,
add_code_sample_docstrings
,
...
@@ -720,7 +720,7 @@ class SegformerForSemanticSegmentation(SegformerPreTrainedModel):
...
@@ -720,7 +720,7 @@ class SegformerForSemanticSegmentation(SegformerPreTrainedModel):
self
.
post_init
()
self
.
post_init
()
@
add_start_docstrings_to_model_forward
(
SEGFORMER_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
add_start_docstrings_to_model_forward
(
SEGFORMER_INPUTS_DOCSTRING
.
format
(
"batch_size, sequence_length"
))
@
replace_return_docstrings
(
output_type
=
SemanticSegment
ationModel
Output
,
config_class
=
_CONFIG_FOR_DOC
)
@
replace_return_docstrings
(
output_type
=
SemanticSegment
er
Output
,
config_class
=
_CONFIG_FOR_DOC
)
def
forward
(
def
forward
(
self
,
self
,
pixel_values
:
torch
.
FloatTensor
,
pixel_values
:
torch
.
FloatTensor
,
...
@@ -728,7 +728,7 @@ class SegformerForSemanticSegmentation(SegformerPreTrainedModel):
...
@@ -728,7 +728,7 @@ class SegformerForSemanticSegmentation(SegformerPreTrainedModel):
output_attentions
:
Optional
[
bool
]
=
None
,
output_attentions
:
Optional
[
bool
]
=
None
,
output_hidden_states
:
Optional
[
bool
]
=
None
,
output_hidden_states
:
Optional
[
bool
]
=
None
,
return_dict
:
Optional
[
bool
]
=
None
,
return_dict
:
Optional
[
bool
]
=
None
,
)
->
Union
[
Tuple
,
SemanticSegment
ationModel
Output
]:
)
->
Union
[
Tuple
,
SemanticSegment
er
Output
]:
r
"""
r
"""
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
...
@@ -788,7 +788,7 @@ class SegformerForSemanticSegmentation(SegformerPreTrainedModel):
...
@@ -788,7 +788,7 @@ class SegformerForSemanticSegmentation(SegformerPreTrainedModel):
output
=
(
logits
,)
+
outputs
[
2
:]
output
=
(
logits
,)
+
outputs
[
2
:]
return
((
loss
,)
+
output
)
if
loss
is
not
None
else
output
return
((
loss
,)
+
output
)
if
loss
is
not
None
else
output
return
SemanticSegment
ationModel
Output
(
return
SemanticSegment
er
Output
(
loss
=
loss
,
loss
=
loss
,
logits
=
logits
,
logits
=
logits
,
hidden_states
=
outputs
.
hidden_states
if
output_hidden_states
else
None
,
hidden_states
=
outputs
.
hidden_states
if
output_hidden_states
else
None
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment