Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
vision
Commits
15848edb
Unverified
Commit
15848edb
authored
Sep 24, 2020
by
Francisco Massa
Committed by
GitHub
Sep 24, 2020
Browse files
Fix deprecation warning in nonzero (#2705)
Replace nonzero by where, now that it works with just a condition
parent
6a43a1f8
Changes
6
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
16 additions
and
16 deletions
+16
-16
torchvision/models/detection/_utils.py
torchvision/models/detection/_utils.py
+4
-4
torchvision/models/detection/generalized_rcnn.py
torchvision/models/detection/generalized_rcnn.py
+1
-1
torchvision/models/detection/roi_heads.py
torchvision/models/detection/roi_heads.py
+6
-6
torchvision/models/detection/rpn.py
torchvision/models/detection/rpn.py
+2
-2
torchvision/ops/boxes.py
torchvision/ops/boxes.py
+1
-1
torchvision/ops/poolers.py
torchvision/ops/poolers.py
+2
-2
No files found.
torchvision/models/detection/_utils.py
View file @
15848edb
...
...
@@ -41,8 +41,8 @@ class BalancedPositiveNegativeSampler(object):
pos_idx
=
[]
neg_idx
=
[]
for
matched_idxs_per_image
in
matched_idxs
:
positive
=
torch
.
nonz
er
o
(
matched_idxs_per_image
>=
1
)
.
squeeze
(
1
)
negative
=
torch
.
nonz
er
o
(
matched_idxs_per_image
==
0
)
.
squeeze
(
1
)
positive
=
torch
.
wh
er
e
(
matched_idxs_per_image
>=
1
)
[
0
]
negative
=
torch
.
wh
er
e
(
matched_idxs_per_image
==
0
)
[
0
]
num_pos
=
int
(
self
.
batch_size_per_image
*
self
.
positive_fraction
)
# protect against not enough positive examples
...
...
@@ -317,7 +317,7 @@ class Matcher(object):
# For each gt, find the prediction with which it has highest quality
highest_quality_foreach_gt
,
_
=
match_quality_matrix
.
max
(
dim
=
1
)
# Find highest quality match available, even if it is low, including ties
gt_pred_pairs_of_highest_quality
=
torch
.
nonz
er
o
(
gt_pred_pairs_of_highest_quality
=
torch
.
wh
er
e
(
match_quality_matrix
==
highest_quality_foreach_gt
[:,
None
]
)
# Example gt_pred_pairs_of_highest_quality:
...
...
@@ -334,7 +334,7 @@ class Matcher(object):
# Each row is a (gt index, prediction index)
# Note how gt items 1, 2, 3, and 5 each have two ties
pred_inds_to_update
=
gt_pred_pairs_of_highest_quality
[
:,
1
]
pred_inds_to_update
=
gt_pred_pairs_of_highest_quality
[
1
]
matches
[
pred_inds_to_update
]
=
all_matches
[
pred_inds_to_update
]
...
...
torchvision/models/detection/generalized_rcnn.py
View file @
15848edb
...
...
@@ -87,7 +87,7 @@ class GeneralizedRCNN(nn.Module):
degenerate_boxes
=
boxes
[:,
2
:]
<=
boxes
[:,
:
2
]
if
degenerate_boxes
.
any
():
# print the first degenerate box
bb_idx
=
degenerate_boxes
.
any
(
dim
=
1
)
.
nonzero
().
view
(
-
1
)
[
0
]
bb_idx
=
torch
.
where
(
degenerate_boxes
.
any
(
dim
=
1
)
)[
0
]
[
0
]
degen_bb
:
List
[
float
]
=
boxes
[
bb_idx
].
tolist
()
raise
ValueError
(
"All bounding boxes should have positive height and width."
" Found invalid box {} for target at index {}."
...
...
torchvision/models/detection/roi_heads.py
View file @
15848edb
...
...
@@ -37,7 +37,7 @@ def fastrcnn_loss(class_logits, box_regression, labels, regression_targets):
# get indices that correspond to the regression targets for
# the corresponding ground truth labels, to be used with
# advanced indexing
sampled_pos_inds_subset
=
torch
.
nonz
er
o
(
labels
>
0
)
.
squeeze
(
1
)
sampled_pos_inds_subset
=
torch
.
wh
er
e
(
labels
>
0
)
[
0
]
labels_pos
=
labels
[
sampled_pos_inds_subset
]
N
,
num_classes
=
class_logits
.
shape
box_regression
=
box_regression
.
reshape
(
N
,
-
1
,
4
)
...
...
@@ -296,7 +296,7 @@ def keypointrcnn_loss(keypoint_logits, proposals, gt_keypoints, keypoint_matched
keypoint_targets
=
torch
.
cat
(
heatmaps
,
dim
=
0
)
valid
=
torch
.
cat
(
valid
,
dim
=
0
).
to
(
dtype
=
torch
.
uint8
)
valid
=
torch
.
nonz
er
o
(
valid
)
.
squeeze
(
1
)
valid
=
torch
.
wh
er
e
(
valid
)
[
0
]
# torch.mean (in binary_cross_entropy_with_logits) does'nt
# accept empty tensors, so handle it sepaartely
...
...
@@ -604,7 +604,7 @@ class RoIHeads(torch.nn.Module):
for
img_idx
,
(
pos_inds_img
,
neg_inds_img
)
in
enumerate
(
zip
(
sampled_pos_inds
,
sampled_neg_inds
)
):
img_sampled_inds
=
torch
.
nonz
er
o
(
pos_inds_img
|
neg_inds_img
)
.
squeeze
(
1
)
img_sampled_inds
=
torch
.
wh
er
e
(
pos_inds_img
|
neg_inds_img
)
[
0
]
sampled_inds
.
append
(
img_sampled_inds
)
return
sampled_inds
...
...
@@ -700,7 +700,7 @@ class RoIHeads(torch.nn.Module):
labels
=
labels
.
reshape
(
-
1
)
# remove low scoring boxes
inds
=
torch
.
nonz
er
o
(
scores
>
self
.
score_thresh
)
.
squeeze
(
1
)
inds
=
torch
.
wh
er
e
(
scores
>
self
.
score_thresh
)
[
0
]
boxes
,
scores
,
labels
=
boxes
[
inds
],
scores
[
inds
],
labels
[
inds
]
# remove empty boxes
...
...
@@ -784,7 +784,7 @@ class RoIHeads(torch.nn.Module):
mask_proposals
=
[]
pos_matched_idxs
=
[]
for
img_id
in
range
(
num_images
):
pos
=
torch
.
nonz
er
o
(
labels
[
img_id
]
>
0
)
.
squeeze
(
1
)
pos
=
torch
.
wh
er
e
(
labels
[
img_id
]
>
0
)
[
0
]
mask_proposals
.
append
(
proposals
[
img_id
][
pos
])
pos_matched_idxs
.
append
(
matched_idxs
[
img_id
][
pos
])
else
:
...
...
@@ -832,7 +832,7 @@ class RoIHeads(torch.nn.Module):
pos_matched_idxs
=
[]
assert
matched_idxs
is
not
None
for
img_id
in
range
(
num_images
):
pos
=
torch
.
nonz
er
o
(
labels
[
img_id
]
>
0
)
.
squeeze
(
1
)
pos
=
torch
.
wh
er
e
(
labels
[
img_id
]
>
0
)
[
0
]
keypoint_proposals
.
append
(
proposals
[
img_id
][
pos
])
pos_matched_idxs
.
append
(
matched_idxs
[
img_id
][
pos
])
else
:
...
...
torchvision/models/detection/rpn.py
View file @
15848edb
...
...
@@ -430,8 +430,8 @@ class RegionProposalNetwork(torch.nn.Module):
"""
sampled_pos_inds
,
sampled_neg_inds
=
self
.
fg_bg_sampler
(
labels
)
sampled_pos_inds
=
torch
.
nonz
er
o
(
torch
.
cat
(
sampled_pos_inds
,
dim
=
0
))
.
squeeze
(
1
)
sampled_neg_inds
=
torch
.
nonz
er
o
(
torch
.
cat
(
sampled_neg_inds
,
dim
=
0
))
.
squeeze
(
1
)
sampled_pos_inds
=
torch
.
wh
er
e
(
torch
.
cat
(
sampled_pos_inds
,
dim
=
0
))
[
0
]
sampled_neg_inds
=
torch
.
wh
er
e
(
torch
.
cat
(
sampled_neg_inds
,
dim
=
0
))
[
0
]
sampled_inds
=
torch
.
cat
([
sampled_pos_inds
,
sampled_neg_inds
],
dim
=
0
)
...
...
torchvision/ops/boxes.py
View file @
15848edb
...
...
@@ -100,7 +100,7 @@ def remove_small_boxes(boxes: Tensor, min_size: float) -> Tensor:
"""
ws
,
hs
=
boxes
[:,
2
]
-
boxes
[:,
0
],
boxes
[:,
3
]
-
boxes
[:,
1
]
keep
=
(
ws
>=
min_size
)
&
(
hs
>=
min_size
)
keep
=
keep
.
nonzero
().
squeeze
(
1
)
keep
=
torch
.
where
(
keep
)[
0
]
return
keep
...
...
torchvision/ops/poolers.py
View file @
15848edb
...
...
@@ -24,7 +24,7 @@ def _onnx_merge_levels(levels: Tensor, unmerged_results: List[Tensor]) -> Tensor
first_result
.
size
(
2
),
first_result
.
size
(
3
)),
dtype
=
dtype
,
device
=
device
)
for
level
in
range
(
len
(
unmerged_results
)):
index
=
(
levels
==
level
)
.
nonzero
()
.
view
(
-
1
,
1
,
1
,
1
)
index
=
torch
.
where
(
levels
==
level
)
[
0
]
.
view
(
-
1
,
1
,
1
,
1
)
index
=
index
.
expand
(
index
.
size
(
0
),
unmerged_results
[
level
].
size
(
1
),
unmerged_results
[
level
].
size
(
2
),
...
...
@@ -234,7 +234,7 @@ class MultiScaleRoIAlign(nn.Module):
tracing_results
=
[]
for
level
,
(
per_level_feature
,
scale
)
in
enumerate
(
zip
(
x_filtered
,
scales
)):
idx_in_level
=
torch
.
nonz
er
o
(
levels
==
level
)
.
squeeze
(
1
)
idx_in_level
=
torch
.
wh
er
e
(
levels
==
level
)
[
0
]
rois_per_level
=
rois
[
idx_in_level
]
result_idx_in_level
=
roi_align
(
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment