Unverified Commit 0777b51e authored by Francisco Massa's avatar Francisco Massa Committed by GitHub
Browse files

Add CircleCI job for python lint (#2056)

* Add CircleCI job for python lint

* Break lint

* Fix

* Fix lint

* Re-enable all tests and remove travis python lint
parent dcfcc867
......@@ -76,6 +76,17 @@ jobs:
python .circleci/regenerate.py
git diff --exit-code || (echo ".circleci/config.yml not in sync with config.yml.in! Run .circleci/regenerate.py to update config"; exit 1)
python_lint:
docker:
- image: circleci/python:3.7
steps:
- checkout
- run:
command: |
pip install --user --progress-bar off flake8 typing
flake8 .
binary_linux_wheel:
<<: *binary_common
docker:
......@@ -509,6 +520,8 @@ workflows:
name: torchvision_win_py3.6_cu101
python_version: "3.6"
cu_version: "cu101"
- python_lint
nightly:
jobs:
......
......@@ -76,6 +76,17 @@ jobs:
python .circleci/regenerate.py
git diff --exit-code || (echo ".circleci/config.yml not in sync with config.yml.in! Run .circleci/regenerate.py to update config"; exit 1)
python_lint:
docker:
- image: circleci/python:3.7
steps:
- checkout
- run:
command: |
pip install --user --progress-bar off flake8 typing
flake8 .
binary_linux_wheel:
<<: *binary_common
docker:
......@@ -311,6 +322,8 @@ workflows:
name: torchvision_win_py3.6_cu101
python_version: "3.6"
cu_version: "cu101"
- python_lint
nightly:
{%- endif %}
......
......@@ -15,11 +15,6 @@ matrix:
before_install: skip
install: skip
script: ./travis-scripts/run-clang-format/run-clang-format.py -r torchvision/csrc
- env: LINT_CHECK
python: "3.6"
install: pip install flake8 typing
script: flake8 .circleci
after_success: []
- python: "3.6"
env: IMAGE_BACKEND=Pillow-SIMD
- python: "3.6"
......
......@@ -97,7 +97,7 @@ def get_extensions():
is_pytorch_extension=True,
)
source_cuda = glob.glob(os.path.join(extensions_dir, 'hip', '*.hip'))
## Copy over additional files
# Copy over additional files
shutil.copy("torchvision/csrc/cuda/cuda_helpers.h", "torchvision/csrc/hip/cuda_helpers.h")
shutil.copy("torchvision/csrc/cuda/vision_cuda.h", "torchvision/csrc/hip/vision_cuda.h")
......@@ -122,7 +122,8 @@ def get_extensions():
define_macros = []
extra_compile_args = {}
if (torch.cuda.is_available() and ((CUDA_HOME is not None) or is_rocm_pytorch)) or os.getenv('FORCE_CUDA', '0') == '1':
if (torch.cuda.is_available() and ((CUDA_HOME is not None) or is_rocm_pytorch)) \
or os.getenv('FORCE_CUDA', '0') == '1':
extension = CUDAExtension
sources += source_cuda
if not is_rocm_pytorch:
......
......@@ -253,13 +253,14 @@ def svhn_root():
yield root
@contextlib.contextmanager
def voc_root():
with get_tmp_dir() as tmp_dir:
voc_dir = os.path.join(tmp_dir, 'VOCdevkit',
'VOC2012','ImageSets','Main')
'VOC2012', 'ImageSets', 'Main')
os.makedirs(voc_dir)
train_file = os.path.join(voc_dir,'train.txt')
train_file = os.path.join(voc_dir, 'train.txt')
with open(train_file, 'w') as f:
f.write('test')
......
......@@ -421,7 +421,8 @@ class ONNXExporterTester(unittest.TestCase):
class KeyPointRCNN(torch.nn.Module):
def __init__(self):
super(KeyPointRCNN, self).__init__()
self.model = models.detection.keypoint_rcnn.keypointrcnn_resnet50_fpn(pretrained=True, min_size=200, max_size=300)
self.model = models.detection.keypoint_rcnn.keypointrcnn_resnet50_fpn(
pretrained=True, min_size=200, max_size=300)
def forward(self, images):
output = self.model(images)
......
......@@ -896,7 +896,6 @@ class Tester(unittest.TestCase):
assert_array_almost_equal(target, result1.numpy())
assert_array_almost_equal(target, result2.numpy())
def test_adjust_brightness(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
......
......@@ -192,8 +192,11 @@ class GeneralizedRCNNTransform(nn.Module):
def resize_keypoints(keypoints, original_size, new_size):
# type: (Tensor, List[int], List[int])
ratios = [torch.tensor(s, dtype=torch.float32, device=keypoints.device) / torch.tensor(s_orig, dtype=torch.float32, device=keypoints.device)
for s, s_orig in zip(new_size, original_size)]
ratios = [
torch.tensor(s, dtype=torch.float32, device=keypoints.device) /
torch.tensor(s_orig, dtype=torch.float32, device=keypoints.device)
for s, s_orig in zip(new_size, original_size)
]
ratio_h, ratio_w = ratios
resized_data = keypoints.clone()
if torch._C._get_tracing_state():
......@@ -208,8 +211,11 @@ def resize_keypoints(keypoints, original_size, new_size):
def resize_boxes(boxes, original_size, new_size):
# type: (Tensor, List[int], List[int])
ratios = [torch.tensor(s, dtype=torch.float32, device=boxes.device) / torch.tensor(s_orig, dtype=torch.float32, device=boxes.device)
for s, s_orig in zip(new_size, original_size)]
ratios = [
torch.tensor(s, dtype=torch.float32, device=boxes.device) /
torch.tensor(s_orig, dtype=torch.float32, device=boxes.device)
for s, s_orig in zip(new_size, original_size)
]
ratio_height, ratio_width = ratios
xmin, ymin, xmax, ymax = boxes.unbind(1)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment