Unverified Commit 0777b51e authored by Francisco Massa's avatar Francisco Massa Committed by GitHub
Browse files

Add CircleCI job for python lint (#2056)

* Add CircleCI job for python lint

* Break lint

* Fix

* Fix lint

* Re-enable all tests and remove travis python lint
parent dcfcc867
...@@ -76,6 +76,17 @@ jobs: ...@@ -76,6 +76,17 @@ jobs:
python .circleci/regenerate.py python .circleci/regenerate.py
git diff --exit-code || (echo ".circleci/config.yml not in sync with config.yml.in! Run .circleci/regenerate.py to update config"; exit 1) git diff --exit-code || (echo ".circleci/config.yml not in sync with config.yml.in! Run .circleci/regenerate.py to update config"; exit 1)
python_lint:
docker:
- image: circleci/python:3.7
steps:
- checkout
- run:
command: |
pip install --user --progress-bar off flake8 typing
flake8 .
binary_linux_wheel: binary_linux_wheel:
<<: *binary_common <<: *binary_common
docker: docker:
...@@ -509,6 +520,8 @@ workflows: ...@@ -509,6 +520,8 @@ workflows:
name: torchvision_win_py3.6_cu101 name: torchvision_win_py3.6_cu101
python_version: "3.6" python_version: "3.6"
cu_version: "cu101" cu_version: "cu101"
- python_lint
nightly: nightly:
jobs: jobs:
......
...@@ -76,6 +76,17 @@ jobs: ...@@ -76,6 +76,17 @@ jobs:
python .circleci/regenerate.py python .circleci/regenerate.py
git diff --exit-code || (echo ".circleci/config.yml not in sync with config.yml.in! Run .circleci/regenerate.py to update config"; exit 1) git diff --exit-code || (echo ".circleci/config.yml not in sync with config.yml.in! Run .circleci/regenerate.py to update config"; exit 1)
python_lint:
docker:
- image: circleci/python:3.7
steps:
- checkout
- run:
command: |
pip install --user --progress-bar off flake8 typing
flake8 .
binary_linux_wheel: binary_linux_wheel:
<<: *binary_common <<: *binary_common
docker: docker:
...@@ -311,6 +322,8 @@ workflows: ...@@ -311,6 +322,8 @@ workflows:
name: torchvision_win_py3.6_cu101 name: torchvision_win_py3.6_cu101
python_version: "3.6" python_version: "3.6"
cu_version: "cu101" cu_version: "cu101"
- python_lint
nightly: nightly:
{%- endif %} {%- endif %}
......
...@@ -15,11 +15,6 @@ matrix: ...@@ -15,11 +15,6 @@ matrix:
before_install: skip before_install: skip
install: skip install: skip
script: ./travis-scripts/run-clang-format/run-clang-format.py -r torchvision/csrc script: ./travis-scripts/run-clang-format/run-clang-format.py -r torchvision/csrc
- env: LINT_CHECK
python: "3.6"
install: pip install flake8 typing
script: flake8 .circleci
after_success: []
- python: "3.6" - python: "3.6"
env: IMAGE_BACKEND=Pillow-SIMD env: IMAGE_BACKEND=Pillow-SIMD
- python: "3.6" - python: "3.6"
......
...@@ -95,9 +95,9 @@ def get_extensions(): ...@@ -95,9 +95,9 @@ def get_extensions():
includes="torchvision/csrc/cuda/*", includes="torchvision/csrc/cuda/*",
show_detailed=True, show_detailed=True,
is_pytorch_extension=True, is_pytorch_extension=True,
) )
source_cuda = glob.glob(os.path.join(extensions_dir, 'hip', '*.hip')) source_cuda = glob.glob(os.path.join(extensions_dir, 'hip', '*.hip'))
## Copy over additional files # Copy over additional files
shutil.copy("torchvision/csrc/cuda/cuda_helpers.h", "torchvision/csrc/hip/cuda_helpers.h") shutil.copy("torchvision/csrc/cuda/cuda_helpers.h", "torchvision/csrc/hip/cuda_helpers.h")
shutil.copy("torchvision/csrc/cuda/vision_cuda.h", "torchvision/csrc/hip/vision_cuda.h") shutil.copy("torchvision/csrc/cuda/vision_cuda.h", "torchvision/csrc/hip/vision_cuda.h")
...@@ -122,7 +122,8 @@ def get_extensions(): ...@@ -122,7 +122,8 @@ def get_extensions():
define_macros = [] define_macros = []
extra_compile_args = {} extra_compile_args = {}
if (torch.cuda.is_available() and ((CUDA_HOME is not None) or is_rocm_pytorch)) or os.getenv('FORCE_CUDA', '0') == '1': if (torch.cuda.is_available() and ((CUDA_HOME is not None) or is_rocm_pytorch)) \
or os.getenv('FORCE_CUDA', '0') == '1':
extension = CUDAExtension extension = CUDAExtension
sources += source_cuda sources += source_cuda
if not is_rocm_pytorch: if not is_rocm_pytorch:
......
...@@ -253,13 +253,14 @@ def svhn_root(): ...@@ -253,13 +253,14 @@ def svhn_root():
yield root yield root
@contextlib.contextmanager @contextlib.contextmanager
def voc_root(): def voc_root():
with get_tmp_dir() as tmp_dir: with get_tmp_dir() as tmp_dir:
voc_dir = os.path.join(tmp_dir, 'VOCdevkit', voc_dir = os.path.join(tmp_dir, 'VOCdevkit',
'VOC2012','ImageSets','Main') 'VOC2012', 'ImageSets', 'Main')
os.makedirs(voc_dir) os.makedirs(voc_dir)
train_file = os.path.join(voc_dir,'train.txt') train_file = os.path.join(voc_dir, 'train.txt')
with open(train_file, 'w') as f: with open(train_file, 'w') as f:
f.write('test') f.write('test')
......
...@@ -415,13 +415,14 @@ class ONNXExporterTester(unittest.TestCase): ...@@ -415,13 +415,14 @@ class ONNXExporterTester(unittest.TestCase):
assert torch.all(out2[0].eq(out_trace2[0])) assert torch.all(out2[0].eq(out_trace2[0]))
assert torch.all(out2[1].eq(out_trace2[1])) assert torch.all(out2[1].eq(out_trace2[1]))
@unittest.skip("Disable test until export of interpolate script module to ONNX is fixed") @unittest.skip("Disable test until export of interpolate script module to ONNX is fixed")
def test_keypoint_rcnn(self): def test_keypoint_rcnn(self):
class KeyPointRCNN(torch.nn.Module): class KeyPointRCNN(torch.nn.Module):
def __init__(self): def __init__(self):
super(KeyPointRCNN, self).__init__() super(KeyPointRCNN, self).__init__()
self.model = models.detection.keypoint_rcnn.keypointrcnn_resnet50_fpn(pretrained=True, min_size=200, max_size=300) self.model = models.detection.keypoint_rcnn.keypointrcnn_resnet50_fpn(
pretrained=True, min_size=200, max_size=300)
def forward(self, images): def forward(self, images):
output = self.model(images) output = self.model(images)
......
...@@ -202,7 +202,7 @@ class Tester(unittest.TestCase): ...@@ -202,7 +202,7 @@ class Tester(unittest.TestCase):
startpoints, endpoints = transforms.RandomPerspective.get_params(width, height, 0.5) startpoints, endpoints = transforms.RandomPerspective.get_params(width, height, 0.5)
tr_img = F.perspective(img_conv, startpoints, endpoints, fill=fill) tr_img = F.perspective(img_conv, startpoints, endpoints, fill=fill)
pixel = tr_img.getpixel((0, 0)) pixel = tr_img.getpixel((0, 0))
if not isinstance(pixel, tuple): if not isinstance(pixel, tuple):
pixel = (pixel,) pixel = (pixel,)
self.assertTupleEqual(pixel, tuple([fill] * num_bands)) self.assertTupleEqual(pixel, tuple([fill] * num_bands))
...@@ -896,7 +896,6 @@ class Tester(unittest.TestCase): ...@@ -896,7 +896,6 @@ class Tester(unittest.TestCase):
assert_array_almost_equal(target, result1.numpy()) assert_array_almost_equal(target, result1.numpy())
assert_array_almost_equal(target, result2.numpy()) assert_array_almost_equal(target, result2.numpy())
def test_adjust_brightness(self): def test_adjust_brightness(self):
x_shape = [2, 2, 3] x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
......
...@@ -192,8 +192,11 @@ class GeneralizedRCNNTransform(nn.Module): ...@@ -192,8 +192,11 @@ class GeneralizedRCNNTransform(nn.Module):
def resize_keypoints(keypoints, original_size, new_size): def resize_keypoints(keypoints, original_size, new_size):
# type: (Tensor, List[int], List[int]) # type: (Tensor, List[int], List[int])
ratios = [torch.tensor(s, dtype=torch.float32, device=keypoints.device) / torch.tensor(s_orig, dtype=torch.float32, device=keypoints.device) ratios = [
for s, s_orig in zip(new_size, original_size)] torch.tensor(s, dtype=torch.float32, device=keypoints.device) /
torch.tensor(s_orig, dtype=torch.float32, device=keypoints.device)
for s, s_orig in zip(new_size, original_size)
]
ratio_h, ratio_w = ratios ratio_h, ratio_w = ratios
resized_data = keypoints.clone() resized_data = keypoints.clone()
if torch._C._get_tracing_state(): if torch._C._get_tracing_state():
...@@ -208,8 +211,11 @@ def resize_keypoints(keypoints, original_size, new_size): ...@@ -208,8 +211,11 @@ def resize_keypoints(keypoints, original_size, new_size):
def resize_boxes(boxes, original_size, new_size): def resize_boxes(boxes, original_size, new_size):
# type: (Tensor, List[int], List[int]) # type: (Tensor, List[int], List[int])
ratios = [torch.tensor(s, dtype=torch.float32, device=boxes.device) / torch.tensor(s_orig, dtype=torch.float32, device=boxes.device) ratios = [
for s, s_orig in zip(new_size, original_size)] torch.tensor(s, dtype=torch.float32, device=boxes.device) /
torch.tensor(s_orig, dtype=torch.float32, device=boxes.device)
for s, s_orig in zip(new_size, original_size)
]
ratio_height, ratio_width = ratios ratio_height, ratio_width = ratios
xmin, ymin, xmax, ymax = boxes.unbind(1) xmin, ymin, xmax, ymax = boxes.unbind(1)
......
...@@ -76,7 +76,7 @@ def _check_size_scale_factor(dim, size, scale_factor): ...@@ -76,7 +76,7 @@ def _check_size_scale_factor(dim, size, scale_factor):
raise ValueError("either size or scale_factor should be defined") raise ValueError("either size or scale_factor should be defined")
if size is not None and scale_factor is not None: if size is not None and scale_factor is not None:
raise ValueError("only one of size or scale_factor should be defined") raise ValueError("only one of size or scale_factor should be defined")
if scale_factor is not None: if scale_factor is not None:
if isinstance(scale_factor, (list, tuple)): if isinstance(scale_factor, (list, tuple)):
if len(scale_factor) != dim: if len(scale_factor) != dim:
raise ValueError( raise ValueError(
......
...@@ -188,7 +188,7 @@ def normalize(tensor, mean, std, inplace=False): ...@@ -188,7 +188,7 @@ def normalize(tensor, mean, std, inplace=False):
""" """
if not torch.is_tensor(tensor): if not torch.is_tensor(tensor):
raise TypeError('tensor should be a torch tensor. Got {}.'.format(type(tensor))) raise TypeError('tensor should be a torch tensor. Got {}.'.format(type(tensor)))
if tensor.ndimension() != 3: if tensor.ndimension() != 3:
raise ValueError('Expected tensor to be a tensor image of size (C, H, W). Got tensor.size() = ' raise ValueError('Expected tensor to be a tensor image of size (C, H, W). Got tensor.size() = '
'{}.'.format(tensor.size())) '{}.'.format(tensor.size()))
...@@ -424,7 +424,7 @@ def _parse_fill(fill, img, min_pil_version): ...@@ -424,7 +424,7 @@ def _parse_fill(fill, img, min_pil_version):
image. If int or float, the value is used for all bands respectively. image. If int or float, the value is used for all bands respectively.
Defaults to 0 for all bands. Defaults to 0 for all bands.
img (PIL Image): Image to be filled. img (PIL Image): Image to be filled.
min_pil_version (str): The minimum PILLOW version for when the ``fillcolor`` option min_pil_version (str): The minimum PILLOW version for when the ``fillcolor`` option
was first introduced in the calling function. (e.g. rotate->5.2.0, perspective->5.0.0) was first introduced in the calling function. (e.g. rotate->5.2.0, perspective->5.0.0)
Returns: Returns:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment