Unverified Commit f052c53f authored by Francisco Massa's avatar Francisco Massa Committed by GitHub
Browse files

Misc lint fixes (#1020)

parent 5a7010f6
...@@ -195,7 +195,7 @@ def convert_to_coco_api(ds): ...@@ -195,7 +195,7 @@ def convert_to_coco_api(ds):
def get_coco_api_from_dataset(dataset): def get_coco_api_from_dataset(dataset):
for i in range(10): for _ in range(10):
if isinstance(dataset, torchvision.datasets.CocoDetection): if isinstance(dataset, torchvision.datasets.CocoDetection):
break break
if isinstance(dataset, torch.utils.data.Subset): if isinstance(dataset, torch.utils.data.Subset):
......
...@@ -101,7 +101,7 @@ def _compute_aspect_ratios_slow(dataset, indices=None): ...@@ -101,7 +101,7 @@ def _compute_aspect_ratios_slow(dataset, indices=None):
collate_fn=lambda x: x[0]) collate_fn=lambda x: x[0])
aspect_ratios = [] aspect_ratios = []
with tqdm(total=len(dataset)) as pbar: with tqdm(total=len(dataset)) as pbar:
for i, (img, _) in enumerate(data_loader): for _i, (img, _) in enumerate(data_loader):
pbar.update(1) pbar.update(1)
height, width = img.shape[-2:] height, width = img.shape[-2:]
aspect_ratio = float(height) / float(width) aspect_ratio = float(height) / float(width)
......
...@@ -9,8 +9,6 @@ import torchvision ...@@ -9,8 +9,6 @@ import torchvision
import torchvision.models.detection import torchvision.models.detection
import torchvision.models.detection.mask_rcnn import torchvision.models.detection.mask_rcnn
from torchvision import transforms
from coco_utils import get_coco, get_coco_kp from coco_utils import get_coco, get_coco_kp
from group_by_aspect_ratio import GroupedBatchSampler, create_aspect_ratio_groups from group_by_aspect_ratio import GroupedBatchSampler, create_aspect_ratio_groups
......
...@@ -14,7 +14,9 @@ class RoIPoolTester(unittest.TestCase): ...@@ -14,7 +14,9 @@ class RoIPoolTester(unittest.TestCase):
cls.dtype = torch.float64 cls.dtype = torch.float64
def slow_roi_pooling(self, x, rois, pool_h, pool_w, spatial_scale=1, def slow_roi_pooling(self, x, rois, pool_h, pool_w, spatial_scale=1,
device=torch.device('cpu'), dtype=torch.float64): device=None, dtype=torch.float64):
if device is None:
device = torch.device("cpu")
c = x.size(1) c = x.size(1)
y = torch.zeros(rois.size(0), c, pool_h, pool_w, dtype=dtype, device=device) y = torch.zeros(rois.size(0), c, pool_h, pool_w, dtype=dtype, device=device)
......
...@@ -141,7 +141,7 @@ class Tester(unittest.TestCase): ...@@ -141,7 +141,7 @@ class Tester(unittest.TestCase):
img = to_pil_image(img) img = to_pil_image(img)
size = 100 size = 100
epsilon = 0.05 epsilon = 0.05
for i in range(10): for _ in range(10):
scale_min = round(random.random(), 2) scale_min = round(random.random(), 2)
scale_range = (scale_min, scale_min + round(random.random(), 2)) scale_range = (scale_min, scale_min + round(random.random(), 2))
aspect_min = max(round(random.random(), 2), epsilon) aspect_min = max(round(random.random(), 2), epsilon)
...@@ -153,7 +153,7 @@ class Tester(unittest.TestCase): ...@@ -153,7 +153,7 @@ class Tester(unittest.TestCase):
aspect_ratio_obtained == 1.0) aspect_ratio_obtained == 1.0)
def test_randomperspective(self): def test_randomperspective(self):
for i in range(10): for _ in range(10):
height = random.randint(24, 32) * 2 height = random.randint(24, 32) * 2
width = random.randint(24, 32) * 2 width = random.randint(24, 32) * 2
img = torch.ones(3, height, width) img = torch.ones(3, height, width)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment