Unverified Commit e2e511be authored by Francisco Massa's avatar Francisco Massa Committed by GitHub
Browse files

Fix Python lint (#2226)

parent 57c54075
......@@ -27,15 +27,15 @@ class Tester(unittest.TestCase):
for _, labels in loader:
bins = defaultdict(int)
for l in labels.tolist():
bins[l] += 1
for label in labels.tolist():
bins[label] += 1
# Ensure that each batch has samples from exactly p classes
self.assertEqual(len(bins), p)
# Ensure that there are k samples from each class
for l in bins:
self.assertEqual(bins[l], k)
for b in bins:
self.assertEqual(bins[b], k)
if __name__ == '__main__':
......
......@@ -59,6 +59,7 @@ def temp_video(num_frames, height, width, fps, lossless=False, video_codec=None,
yield f.name, data
os.unlink(f.name)
@unittest.skipIf(get_video_backend() != "pyav" and not io._HAS_VIDEO_OPT,
"video_reader backend not available")
@unittest.skipIf(av is None, "PyAV unavailable")
......@@ -108,10 +109,10 @@ class TestIO(unittest.TestCase):
with temp_video(10, 300, 300, 5, lossless=True) as (f_name, data):
pts, _ = io.read_video_timestamps(f_name)
for start in range(5):
for l in range(1, 4):
lv, _, _ = io.read_video(f_name, pts[start], pts[start + l - 1])
s_data = data[start:(start + l)]
self.assertEqual(len(lv), l)
for offset in range(1, 4):
lv, _, _ = io.read_video(f_name, pts[start], pts[start + offset - 1])
s_data = data[start:(start + offset)]
self.assertEqual(len(lv), offset)
self.assertTrue(s_data.equal(lv))
if get_video_backend() == "pyav":
......@@ -127,10 +128,10 @@ class TestIO(unittest.TestCase):
with temp_video(100, 300, 300, 5, options=options) as (f_name, data):
pts, _ = io.read_video_timestamps(f_name)
for start in range(0, 80, 20):
for l in range(1, 4):
lv, _, _ = io.read_video(f_name, pts[start], pts[start + l - 1])
s_data = data[start:(start + l)]
self.assertEqual(len(lv), l)
for offset in range(1, 4):
lv, _, _ = io.read_video(f_name, pts[start], pts[start + offset - 1])
s_data = data[start:(start + offset)]
self.assertEqual(len(lv), offset)
self.assertTrue((s_data.float() - lv.float()).abs().max() < self.TOLERANCE)
lv, _, _ = io.read_video(f_name, pts[4] + 1, pts[7])
......@@ -201,10 +202,10 @@ class TestIO(unittest.TestCase):
pts, _ = io.read_video_timestamps(f_name, pts_unit='sec')
for start in range(5):
for l in range(1, 4):
lv, _, _ = io.read_video(f_name, pts[start], pts[start + l - 1], pts_unit='sec')
s_data = data[start:(start + l)]
self.assertEqual(len(lv), l)
for offset in range(1, 4):
lv, _, _ = io.read_video(f_name, pts[start], pts[start + offset - 1], pts_unit='sec')
s_data = data[start:(start + offset)]
self.assertEqual(len(lv), offset)
self.assertTrue(s_data.equal(lv))
container = av.open(f_name)
......
......@@ -49,7 +49,7 @@ class USPS(VisionDataset):
import bz2
with bz2.open(full_path) as fp:
raw_data = [l.decode().split() for l in fp.readlines()]
raw_data = [line.decode().split() for line in fp.readlines()]
imgs = [[x.split(':')[-1] for x in data[1:]] for data in raw_data]
imgs = np.asarray(imgs, dtype=np.float32).reshape((-1, 16, 16))
imgs = ((imgs + 1) / 2 * 255).astype(dtype=np.uint8)
......
......@@ -221,10 +221,10 @@ class KeypointRCNNHeads(nn.Sequential):
def __init__(self, in_channels, layers):
d = []
next_feature = in_channels
for l in layers:
d.append(misc_nn_ops.Conv2d(next_feature, l, 3, stride=1, padding=1))
for out_channels in layers:
d.append(misc_nn_ops.Conv2d(next_feature, out_channels, 3, stride=1, padding=1))
d.append(nn.ReLU(inplace=True))
next_feature = l
next_feature = out_channels
super(KeypointRCNNHeads, self).__init__(*d)
for m in self.children():
if isinstance(m, misc_nn_ops.Conv2d):
......
......@@ -75,7 +75,7 @@ def maskrcnn_inference(x, labels):
# select masks coresponding to the predicted classes
num_masks = x.shape[0]
boxes_per_image = [l.shape[0] for l in labels]
boxes_per_image = [label.shape[0] for label in labels]
labels = torch.cat(labels)
index = torch.arange(num_masks, device=labels.device)
mask_prob = mask_prob[index, labels][:, None]
......@@ -112,7 +112,7 @@ def maskrcnn_loss(mask_logits, proposals, gt_masks, gt_labels, mask_matched_idxs
"""
discretization_size = mask_logits.shape[-1]
labels = [l[idxs] for l, idxs in zip(gt_labels, mask_matched_idxs)]
labels = [gt_label[idxs] for gt_label, idxs in zip(gt_labels, mask_matched_idxs)]
mask_targets = [
project_masks_on_boxes(m, p, i, discretization_size)
for m, p, i in zip(gt_masks, proposals, mask_matched_idxs)
......
......@@ -195,9 +195,9 @@ class RPNHead(nn.Module):
in_channels, num_anchors * 4, kernel_size=1, stride=1
)
for l in self.children():
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
for layer in self.children():
torch.nn.init.normal_(layer.weight, std=0.01)
torch.nn.init.constant_(layer.bias, 0)
def forward(self, x):
# type: (List[Tensor]) -> Tuple[List[Tensor], List[Tensor]]
......
......@@ -111,15 +111,15 @@ class GeneralizedRCNNTransform(nn.Module):
std = torch.as_tensor(self.image_std, dtype=dtype, device=device)
return (image - mean[:, None, None]) / std[:, None, None]
def torch_choice(self, l):
def torch_choice(self, k):
# type: (List[int]) -> int
"""
Implements `random.choice` via torch ops so it can be compiled with
TorchScript. Remove if https://github.com/pytorch/pytorch/issues/25803
is fixed.
"""
index = int(torch.empty(1).uniform_(0., float(len(l))).item())
return l[index]
index = int(torch.empty(1).uniform_(0., float(len(k))).item())
return k[index]
def resize(self, image, target):
# type: (Tensor, Optional[Dict[str, Tensor]]) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]
......
......@@ -9,6 +9,7 @@ from torchvision.ops.boxes import box_area
from torch.jit.annotations import Optional, List, Dict, Tuple
import torchvision
# copying result_idx_in_level to a specific index in result[]
# is not supported by ONNX tracing yet.
# _onnx_merge_levels() is an implementation supported by ONNX
......@@ -21,13 +22,13 @@ def _onnx_merge_levels(levels, unmerged_results):
res = torch.zeros((levels.size(0), first_result.size(1),
first_result.size(2), first_result.size(3)),
dtype=dtype, device=device)
for l in range(len(unmerged_results)):
index = (levels == l).nonzero().view(-1, 1, 1, 1)
for level in range(len(unmerged_results)):
index = (levels == level).nonzero().view(-1, 1, 1, 1)
index = index.expand(index.size(0),
unmerged_results[l].size(1),
unmerged_results[l].size(2),
unmerged_results[l].size(3))
res = res.scatter(0, index, unmerged_results[l])
unmerged_results[level].size(1),
unmerged_results[level].size(2),
unmerged_results[level].size(3))
res = res.scatter(0, index, unmerged_results[level])
return res
......
......@@ -676,7 +676,7 @@ def adjust_hue(img, hue_factor):
PIL Image: Hue adjusted image.
"""
if not(-0.5 <= hue_factor <= 0.5):
raise ValueError('hue_factor is not in [-0.5, 0.5].'.format(hue_factor))
raise ValueError('hue_factor ({}) is not in [-0.5, 0.5].'.format(hue_factor))
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
......
......@@ -807,8 +807,8 @@ class LinearTransformation(object):
if mean_vector.size(0) != transformation_matrix.size(0):
raise ValueError("mean_vector should have the same length {}".format(mean_vector.size(0)) +
" as any one of the dimensions of the transformation_matrix [{} x {}]"
.format(transformation_matrix.size()))
" as any one of the dimensions of the transformation_matrix [{}]"
.format(tuple(transformation_matrix.size())))
self.transformation_matrix = transformation_matrix
self.mean_vector = mean_vector
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment