Unverified Commit 15848edb authored by Francisco Massa's avatar Francisco Massa Committed by GitHub
Browse files

Fix deprecation warning in nonzero (#2705)

Replace nonzero by where, now that it works with just a condition
parent 6a43a1f8
......@@ -41,8 +41,8 @@ class BalancedPositiveNegativeSampler(object):
pos_idx = []
neg_idx = []
for matched_idxs_per_image in matched_idxs:
positive = torch.nonzero(matched_idxs_per_image >= 1).squeeze(1)
negative = torch.nonzero(matched_idxs_per_image == 0).squeeze(1)
positive = torch.where(matched_idxs_per_image >= 1)[0]
negative = torch.where(matched_idxs_per_image == 0)[0]
num_pos = int(self.batch_size_per_image * self.positive_fraction)
# protect against not enough positive examples
......@@ -317,7 +317,7 @@ class Matcher(object):
# For each gt, find the prediction with which it has highest quality
highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1)
# Find highest quality match available, even if it is low, including ties
gt_pred_pairs_of_highest_quality = torch.nonzero(
gt_pred_pairs_of_highest_quality = torch.where(
match_quality_matrix == highest_quality_foreach_gt[:, None]
)
# Example gt_pred_pairs_of_highest_quality:
......@@ -334,7 +334,7 @@ class Matcher(object):
# Each row is a (gt index, prediction index)
# Note how gt items 1, 2, 3, and 5 each have two ties
pred_inds_to_update = gt_pred_pairs_of_highest_quality[:, 1]
pred_inds_to_update = gt_pred_pairs_of_highest_quality[1]
matches[pred_inds_to_update] = all_matches[pred_inds_to_update]
......
......@@ -87,7 +87,7 @@ class GeneralizedRCNN(nn.Module):
degenerate_boxes = boxes[:, 2:] <= boxes[:, :2]
if degenerate_boxes.any():
# print the first degenerate box
bb_idx = degenerate_boxes.any(dim=1).nonzero().view(-1)[0]
bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0]
degen_bb: List[float] = boxes[bb_idx].tolist()
raise ValueError("All bounding boxes should have positive height and width."
" Found invalid box {} for target at index {}."
......
......@@ -37,7 +37,7 @@ def fastrcnn_loss(class_logits, box_regression, labels, regression_targets):
# get indices that correspond to the regression targets for
# the corresponding ground truth labels, to be used with
# advanced indexing
sampled_pos_inds_subset = torch.nonzero(labels > 0).squeeze(1)
sampled_pos_inds_subset = torch.where(labels > 0)[0]
labels_pos = labels[sampled_pos_inds_subset]
N, num_classes = class_logits.shape
box_regression = box_regression.reshape(N, -1, 4)
......@@ -296,7 +296,7 @@ def keypointrcnn_loss(keypoint_logits, proposals, gt_keypoints, keypoint_matched
keypoint_targets = torch.cat(heatmaps, dim=0)
valid = torch.cat(valid, dim=0).to(dtype=torch.uint8)
valid = torch.nonzero(valid).squeeze(1)
valid = torch.where(valid)[0]
# torch.mean (in binary_cross_entropy_with_logits) does'nt
# accept empty tensors, so handle it sepaartely
......@@ -604,7 +604,7 @@ class RoIHeads(torch.nn.Module):
for img_idx, (pos_inds_img, neg_inds_img) in enumerate(
zip(sampled_pos_inds, sampled_neg_inds)
):
img_sampled_inds = torch.nonzero(pos_inds_img | neg_inds_img).squeeze(1)
img_sampled_inds = torch.where(pos_inds_img | neg_inds_img)[0]
sampled_inds.append(img_sampled_inds)
return sampled_inds
......@@ -700,7 +700,7 @@ class RoIHeads(torch.nn.Module):
labels = labels.reshape(-1)
# remove low scoring boxes
inds = torch.nonzero(scores > self.score_thresh).squeeze(1)
inds = torch.where(scores > self.score_thresh)[0]
boxes, scores, labels = boxes[inds], scores[inds], labels[inds]
# remove empty boxes
......@@ -784,7 +784,7 @@ class RoIHeads(torch.nn.Module):
mask_proposals = []
pos_matched_idxs = []
for img_id in range(num_images):
pos = torch.nonzero(labels[img_id] > 0).squeeze(1)
pos = torch.where(labels[img_id] > 0)[0]
mask_proposals.append(proposals[img_id][pos])
pos_matched_idxs.append(matched_idxs[img_id][pos])
else:
......@@ -832,7 +832,7 @@ class RoIHeads(torch.nn.Module):
pos_matched_idxs = []
assert matched_idxs is not None
for img_id in range(num_images):
pos = torch.nonzero(labels[img_id] > 0).squeeze(1)
pos = torch.where(labels[img_id] > 0)[0]
keypoint_proposals.append(proposals[img_id][pos])
pos_matched_idxs.append(matched_idxs[img_id][pos])
else:
......
......@@ -430,8 +430,8 @@ class RegionProposalNetwork(torch.nn.Module):
"""
sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)
sampled_pos_inds = torch.nonzero(torch.cat(sampled_pos_inds, dim=0)).squeeze(1)
sampled_neg_inds = torch.nonzero(torch.cat(sampled_neg_inds, dim=0)).squeeze(1)
sampled_pos_inds = torch.where(torch.cat(sampled_pos_inds, dim=0))[0]
sampled_neg_inds = torch.where(torch.cat(sampled_neg_inds, dim=0))[0]
sampled_inds = torch.cat([sampled_pos_inds, sampled_neg_inds], dim=0)
......
......@@ -100,7 +100,7 @@ def remove_small_boxes(boxes: Tensor, min_size: float) -> Tensor:
"""
ws, hs = boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1]
keep = (ws >= min_size) & (hs >= min_size)
keep = keep.nonzero().squeeze(1)
keep = torch.where(keep)[0]
return keep
......
......@@ -24,7 +24,7 @@ def _onnx_merge_levels(levels: Tensor, unmerged_results: List[Tensor]) -> Tensor
first_result.size(2), first_result.size(3)),
dtype=dtype, device=device)
for level in range(len(unmerged_results)):
index = (levels == level).nonzero().view(-1, 1, 1, 1)
index = torch.where(levels == level)[0].view(-1, 1, 1, 1)
index = index.expand(index.size(0),
unmerged_results[level].size(1),
unmerged_results[level].size(2),
......@@ -234,7 +234,7 @@ class MultiScaleRoIAlign(nn.Module):
tracing_results = []
for level, (per_level_feature, scale) in enumerate(zip(x_filtered, scales)):
idx_in_level = torch.nonzero(levels == level).squeeze(1)
idx_in_level = torch.where(levels == level)[0]
rois_per_level = rois[idx_in_level]
result_idx_in_level = roi_align(
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment