Unverified Commit 15848edb authored by Francisco Massa's avatar Francisco Massa Committed by GitHub
Browse files

Fix deprecation warning in nonzero (#2705)

Replace nonzero by where, now that it works with just a condition
parent 6a43a1f8
...@@ -41,8 +41,8 @@ class BalancedPositiveNegativeSampler(object): ...@@ -41,8 +41,8 @@ class BalancedPositiveNegativeSampler(object):
pos_idx = [] pos_idx = []
neg_idx = [] neg_idx = []
for matched_idxs_per_image in matched_idxs: for matched_idxs_per_image in matched_idxs:
positive = torch.nonzero(matched_idxs_per_image >= 1).squeeze(1) positive = torch.where(matched_idxs_per_image >= 1)[0]
negative = torch.nonzero(matched_idxs_per_image == 0).squeeze(1) negative = torch.where(matched_idxs_per_image == 0)[0]
num_pos = int(self.batch_size_per_image * self.positive_fraction) num_pos = int(self.batch_size_per_image * self.positive_fraction)
# protect against not enough positive examples # protect against not enough positive examples
...@@ -317,7 +317,7 @@ class Matcher(object): ...@@ -317,7 +317,7 @@ class Matcher(object):
# For each gt, find the prediction with which it has highest quality # For each gt, find the prediction with which it has highest quality
highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1) highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1)
# Find highest quality match available, even if it is low, including ties # Find highest quality match available, even if it is low, including ties
gt_pred_pairs_of_highest_quality = torch.nonzero( gt_pred_pairs_of_highest_quality = torch.where(
match_quality_matrix == highest_quality_foreach_gt[:, None] match_quality_matrix == highest_quality_foreach_gt[:, None]
) )
# Example gt_pred_pairs_of_highest_quality: # Example gt_pred_pairs_of_highest_quality:
...@@ -334,7 +334,7 @@ class Matcher(object): ...@@ -334,7 +334,7 @@ class Matcher(object):
# Each row is a (gt index, prediction index) # Each row is a (gt index, prediction index)
# Note how gt items 1, 2, 3, and 5 each have two ties # Note how gt items 1, 2, 3, and 5 each have two ties
pred_inds_to_update = gt_pred_pairs_of_highest_quality[:, 1] pred_inds_to_update = gt_pred_pairs_of_highest_quality[1]
matches[pred_inds_to_update] = all_matches[pred_inds_to_update] matches[pred_inds_to_update] = all_matches[pred_inds_to_update]
......
...@@ -87,7 +87,7 @@ class GeneralizedRCNN(nn.Module): ...@@ -87,7 +87,7 @@ class GeneralizedRCNN(nn.Module):
degenerate_boxes = boxes[:, 2:] <= boxes[:, :2] degenerate_boxes = boxes[:, 2:] <= boxes[:, :2]
if degenerate_boxes.any(): if degenerate_boxes.any():
# print the first degenerate box # print the first degenerate box
bb_idx = degenerate_boxes.any(dim=1).nonzero().view(-1)[0] bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0]
degen_bb: List[float] = boxes[bb_idx].tolist() degen_bb: List[float] = boxes[bb_idx].tolist()
raise ValueError("All bounding boxes should have positive height and width." raise ValueError("All bounding boxes should have positive height and width."
" Found invalid box {} for target at index {}." " Found invalid box {} for target at index {}."
......
...@@ -37,7 +37,7 @@ def fastrcnn_loss(class_logits, box_regression, labels, regression_targets): ...@@ -37,7 +37,7 @@ def fastrcnn_loss(class_logits, box_regression, labels, regression_targets):
# get indices that correspond to the regression targets for # get indices that correspond to the regression targets for
# the corresponding ground truth labels, to be used with # the corresponding ground truth labels, to be used with
# advanced indexing # advanced indexing
sampled_pos_inds_subset = torch.nonzero(labels > 0).squeeze(1) sampled_pos_inds_subset = torch.where(labels > 0)[0]
labels_pos = labels[sampled_pos_inds_subset] labels_pos = labels[sampled_pos_inds_subset]
N, num_classes = class_logits.shape N, num_classes = class_logits.shape
box_regression = box_regression.reshape(N, -1, 4) box_regression = box_regression.reshape(N, -1, 4)
...@@ -296,7 +296,7 @@ def keypointrcnn_loss(keypoint_logits, proposals, gt_keypoints, keypoint_matched ...@@ -296,7 +296,7 @@ def keypointrcnn_loss(keypoint_logits, proposals, gt_keypoints, keypoint_matched
keypoint_targets = torch.cat(heatmaps, dim=0) keypoint_targets = torch.cat(heatmaps, dim=0)
valid = torch.cat(valid, dim=0).to(dtype=torch.uint8) valid = torch.cat(valid, dim=0).to(dtype=torch.uint8)
valid = torch.nonzero(valid).squeeze(1) valid = torch.where(valid)[0]
# torch.mean (in binary_cross_entropy_with_logits) does'nt # torch.mean (in binary_cross_entropy_with_logits) does'nt
# accept empty tensors, so handle it sepaartely # accept empty tensors, so handle it sepaartely
...@@ -604,7 +604,7 @@ class RoIHeads(torch.nn.Module): ...@@ -604,7 +604,7 @@ class RoIHeads(torch.nn.Module):
for img_idx, (pos_inds_img, neg_inds_img) in enumerate( for img_idx, (pos_inds_img, neg_inds_img) in enumerate(
zip(sampled_pos_inds, sampled_neg_inds) zip(sampled_pos_inds, sampled_neg_inds)
): ):
img_sampled_inds = torch.nonzero(pos_inds_img | neg_inds_img).squeeze(1) img_sampled_inds = torch.where(pos_inds_img | neg_inds_img)[0]
sampled_inds.append(img_sampled_inds) sampled_inds.append(img_sampled_inds)
return sampled_inds return sampled_inds
...@@ -700,7 +700,7 @@ class RoIHeads(torch.nn.Module): ...@@ -700,7 +700,7 @@ class RoIHeads(torch.nn.Module):
labels = labels.reshape(-1) labels = labels.reshape(-1)
# remove low scoring boxes # remove low scoring boxes
inds = torch.nonzero(scores > self.score_thresh).squeeze(1) inds = torch.where(scores > self.score_thresh)[0]
boxes, scores, labels = boxes[inds], scores[inds], labels[inds] boxes, scores, labels = boxes[inds], scores[inds], labels[inds]
# remove empty boxes # remove empty boxes
...@@ -784,7 +784,7 @@ class RoIHeads(torch.nn.Module): ...@@ -784,7 +784,7 @@ class RoIHeads(torch.nn.Module):
mask_proposals = [] mask_proposals = []
pos_matched_idxs = [] pos_matched_idxs = []
for img_id in range(num_images): for img_id in range(num_images):
pos = torch.nonzero(labels[img_id] > 0).squeeze(1) pos = torch.where(labels[img_id] > 0)[0]
mask_proposals.append(proposals[img_id][pos]) mask_proposals.append(proposals[img_id][pos])
pos_matched_idxs.append(matched_idxs[img_id][pos]) pos_matched_idxs.append(matched_idxs[img_id][pos])
else: else:
...@@ -832,7 +832,7 @@ class RoIHeads(torch.nn.Module): ...@@ -832,7 +832,7 @@ class RoIHeads(torch.nn.Module):
pos_matched_idxs = [] pos_matched_idxs = []
assert matched_idxs is not None assert matched_idxs is not None
for img_id in range(num_images): for img_id in range(num_images):
pos = torch.nonzero(labels[img_id] > 0).squeeze(1) pos = torch.where(labels[img_id] > 0)[0]
keypoint_proposals.append(proposals[img_id][pos]) keypoint_proposals.append(proposals[img_id][pos])
pos_matched_idxs.append(matched_idxs[img_id][pos]) pos_matched_idxs.append(matched_idxs[img_id][pos])
else: else:
......
...@@ -430,8 +430,8 @@ class RegionProposalNetwork(torch.nn.Module): ...@@ -430,8 +430,8 @@ class RegionProposalNetwork(torch.nn.Module):
""" """
sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels) sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)
sampled_pos_inds = torch.nonzero(torch.cat(sampled_pos_inds, dim=0)).squeeze(1) sampled_pos_inds = torch.where(torch.cat(sampled_pos_inds, dim=0))[0]
sampled_neg_inds = torch.nonzero(torch.cat(sampled_neg_inds, dim=0)).squeeze(1) sampled_neg_inds = torch.where(torch.cat(sampled_neg_inds, dim=0))[0]
sampled_inds = torch.cat([sampled_pos_inds, sampled_neg_inds], dim=0) sampled_inds = torch.cat([sampled_pos_inds, sampled_neg_inds], dim=0)
......
...@@ -100,7 +100,7 @@ def remove_small_boxes(boxes: Tensor, min_size: float) -> Tensor: ...@@ -100,7 +100,7 @@ def remove_small_boxes(boxes: Tensor, min_size: float) -> Tensor:
""" """
ws, hs = boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1] ws, hs = boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1]
keep = (ws >= min_size) & (hs >= min_size) keep = (ws >= min_size) & (hs >= min_size)
keep = keep.nonzero().squeeze(1) keep = torch.where(keep)[0]
return keep return keep
......
...@@ -24,7 +24,7 @@ def _onnx_merge_levels(levels: Tensor, unmerged_results: List[Tensor]) -> Tensor ...@@ -24,7 +24,7 @@ def _onnx_merge_levels(levels: Tensor, unmerged_results: List[Tensor]) -> Tensor
first_result.size(2), first_result.size(3)), first_result.size(2), first_result.size(3)),
dtype=dtype, device=device) dtype=dtype, device=device)
for level in range(len(unmerged_results)): for level in range(len(unmerged_results)):
index = (levels == level).nonzero().view(-1, 1, 1, 1) index = torch.where(levels == level)[0].view(-1, 1, 1, 1)
index = index.expand(index.size(0), index = index.expand(index.size(0),
unmerged_results[level].size(1), unmerged_results[level].size(1),
unmerged_results[level].size(2), unmerged_results[level].size(2),
...@@ -234,7 +234,7 @@ class MultiScaleRoIAlign(nn.Module): ...@@ -234,7 +234,7 @@ class MultiScaleRoIAlign(nn.Module):
tracing_results = [] tracing_results = []
for level, (per_level_feature, scale) in enumerate(zip(x_filtered, scales)): for level, (per_level_feature, scale) in enumerate(zip(x_filtered, scales)):
idx_in_level = torch.nonzero(levels == level).squeeze(1) idx_in_level = torch.where(levels == level)[0]
rois_per_level = rois[idx_in_level] rois_per_level = rois[idx_in_level]
result_idx_in_level = roi_align( result_idx_in_level = roi_align(
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment