Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
vision
Commits
e2e511be
Unverified
Commit
e2e511be
authored
May 18, 2020
by
Francisco Massa
Committed by
GitHub
May 18, 2020
Browse files
Fix Python lint (#2226)
parent
57c54075
Changes
10
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
39 additions
and
37 deletions
+39
-37
references/similarity/test.py
references/similarity/test.py
+4
-4
test/test_io.py
test/test_io.py
+13
-12
torchvision/datasets/usps.py
torchvision/datasets/usps.py
+1
-1
torchvision/models/detection/keypoint_rcnn.py
torchvision/models/detection/keypoint_rcnn.py
+3
-3
torchvision/models/detection/roi_heads.py
torchvision/models/detection/roi_heads.py
+2
-2
torchvision/models/detection/rpn.py
torchvision/models/detection/rpn.py
+3
-3
torchvision/models/detection/transform.py
torchvision/models/detection/transform.py
+3
-3
torchvision/ops/poolers.py
torchvision/ops/poolers.py
+7
-6
torchvision/transforms/functional.py
torchvision/transforms/functional.py
+1
-1
torchvision/transforms/transforms.py
torchvision/transforms/transforms.py
+2
-2
No files found.
references/similarity/test.py
View file @
e2e511be
...
@@ -27,15 +27,15 @@ class Tester(unittest.TestCase):
...
@@ -27,15 +27,15 @@ class Tester(unittest.TestCase):
for
_
,
labels
in
loader
:
for
_
,
labels
in
loader
:
bins
=
defaultdict
(
int
)
bins
=
defaultdict
(
int
)
for
l
in
labels
.
tolist
():
for
l
abel
in
labels
.
tolist
():
bins
[
l
]
+=
1
bins
[
l
abel
]
+=
1
# Ensure that each batch has samples from exactly p classes
# Ensure that each batch has samples from exactly p classes
self
.
assertEqual
(
len
(
bins
),
p
)
self
.
assertEqual
(
len
(
bins
),
p
)
# Ensure that there are k samples from each class
# Ensure that there are k samples from each class
for
l
in
bins
:
for
b
in
bins
:
self
.
assertEqual
(
bins
[
l
],
k
)
self
.
assertEqual
(
bins
[
b
],
k
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
test/test_io.py
View file @
e2e511be
...
@@ -59,6 +59,7 @@ def temp_video(num_frames, height, width, fps, lossless=False, video_codec=None,
...
@@ -59,6 +59,7 @@ def temp_video(num_frames, height, width, fps, lossless=False, video_codec=None,
yield
f
.
name
,
data
yield
f
.
name
,
data
os
.
unlink
(
f
.
name
)
os
.
unlink
(
f
.
name
)
@
unittest
.
skipIf
(
get_video_backend
()
!=
"pyav"
and
not
io
.
_HAS_VIDEO_OPT
,
@
unittest
.
skipIf
(
get_video_backend
()
!=
"pyav"
and
not
io
.
_HAS_VIDEO_OPT
,
"video_reader backend not available"
)
"video_reader backend not available"
)
@
unittest
.
skipIf
(
av
is
None
,
"PyAV unavailable"
)
@
unittest
.
skipIf
(
av
is
None
,
"PyAV unavailable"
)
...
@@ -108,10 +109,10 @@ class TestIO(unittest.TestCase):
...
@@ -108,10 +109,10 @@ class TestIO(unittest.TestCase):
with
temp_video
(
10
,
300
,
300
,
5
,
lossless
=
True
)
as
(
f_name
,
data
):
with
temp_video
(
10
,
300
,
300
,
5
,
lossless
=
True
)
as
(
f_name
,
data
):
pts
,
_
=
io
.
read_video_timestamps
(
f_name
)
pts
,
_
=
io
.
read_video_timestamps
(
f_name
)
for
start
in
range
(
5
):
for
start
in
range
(
5
):
for
l
in
range
(
1
,
4
):
for
offset
in
range
(
1
,
4
):
lv
,
_
,
_
=
io
.
read_video
(
f_name
,
pts
[
start
],
pts
[
start
+
l
-
1
])
lv
,
_
,
_
=
io
.
read_video
(
f_name
,
pts
[
start
],
pts
[
start
+
offset
-
1
])
s_data
=
data
[
start
:(
start
+
l
)]
s_data
=
data
[
start
:(
start
+
offset
)]
self
.
assertEqual
(
len
(
lv
),
l
)
self
.
assertEqual
(
len
(
lv
),
offset
)
self
.
assertTrue
(
s_data
.
equal
(
lv
))
self
.
assertTrue
(
s_data
.
equal
(
lv
))
if
get_video_backend
()
==
"pyav"
:
if
get_video_backend
()
==
"pyav"
:
...
@@ -127,10 +128,10 @@ class TestIO(unittest.TestCase):
...
@@ -127,10 +128,10 @@ class TestIO(unittest.TestCase):
with
temp_video
(
100
,
300
,
300
,
5
,
options
=
options
)
as
(
f_name
,
data
):
with
temp_video
(
100
,
300
,
300
,
5
,
options
=
options
)
as
(
f_name
,
data
):
pts
,
_
=
io
.
read_video_timestamps
(
f_name
)
pts
,
_
=
io
.
read_video_timestamps
(
f_name
)
for
start
in
range
(
0
,
80
,
20
):
for
start
in
range
(
0
,
80
,
20
):
for
l
in
range
(
1
,
4
):
for
offset
in
range
(
1
,
4
):
lv
,
_
,
_
=
io
.
read_video
(
f_name
,
pts
[
start
],
pts
[
start
+
l
-
1
])
lv
,
_
,
_
=
io
.
read_video
(
f_name
,
pts
[
start
],
pts
[
start
+
offset
-
1
])
s_data
=
data
[
start
:(
start
+
l
)]
s_data
=
data
[
start
:(
start
+
offset
)]
self
.
assertEqual
(
len
(
lv
),
l
)
self
.
assertEqual
(
len
(
lv
),
offset
)
self
.
assertTrue
((
s_data
.
float
()
-
lv
.
float
()).
abs
().
max
()
<
self
.
TOLERANCE
)
self
.
assertTrue
((
s_data
.
float
()
-
lv
.
float
()).
abs
().
max
()
<
self
.
TOLERANCE
)
lv
,
_
,
_
=
io
.
read_video
(
f_name
,
pts
[
4
]
+
1
,
pts
[
7
])
lv
,
_
,
_
=
io
.
read_video
(
f_name
,
pts
[
4
]
+
1
,
pts
[
7
])
...
@@ -201,10 +202,10 @@ class TestIO(unittest.TestCase):
...
@@ -201,10 +202,10 @@ class TestIO(unittest.TestCase):
pts
,
_
=
io
.
read_video_timestamps
(
f_name
,
pts_unit
=
'sec'
)
pts
,
_
=
io
.
read_video_timestamps
(
f_name
,
pts_unit
=
'sec'
)
for
start
in
range
(
5
):
for
start
in
range
(
5
):
for
l
in
range
(
1
,
4
):
for
offset
in
range
(
1
,
4
):
lv
,
_
,
_
=
io
.
read_video
(
f_name
,
pts
[
start
],
pts
[
start
+
l
-
1
],
pts_unit
=
'sec'
)
lv
,
_
,
_
=
io
.
read_video
(
f_name
,
pts
[
start
],
pts
[
start
+
offset
-
1
],
pts_unit
=
'sec'
)
s_data
=
data
[
start
:(
start
+
l
)]
s_data
=
data
[
start
:(
start
+
offset
)]
self
.
assertEqual
(
len
(
lv
),
l
)
self
.
assertEqual
(
len
(
lv
),
offset
)
self
.
assertTrue
(
s_data
.
equal
(
lv
))
self
.
assertTrue
(
s_data
.
equal
(
lv
))
container
=
av
.
open
(
f_name
)
container
=
av
.
open
(
f_name
)
...
...
torchvision/datasets/usps.py
View file @
e2e511be
...
@@ -49,7 +49,7 @@ class USPS(VisionDataset):
...
@@ -49,7 +49,7 @@ class USPS(VisionDataset):
import
bz2
import
bz2
with
bz2
.
open
(
full_path
)
as
fp
:
with
bz2
.
open
(
full_path
)
as
fp
:
raw_data
=
[
l
.
decode
().
split
()
for
l
in
fp
.
readlines
()]
raw_data
=
[
l
ine
.
decode
().
split
()
for
l
ine
in
fp
.
readlines
()]
imgs
=
[[
x
.
split
(
':'
)[
-
1
]
for
x
in
data
[
1
:]]
for
data
in
raw_data
]
imgs
=
[[
x
.
split
(
':'
)[
-
1
]
for
x
in
data
[
1
:]]
for
data
in
raw_data
]
imgs
=
np
.
asarray
(
imgs
,
dtype
=
np
.
float32
).
reshape
((
-
1
,
16
,
16
))
imgs
=
np
.
asarray
(
imgs
,
dtype
=
np
.
float32
).
reshape
((
-
1
,
16
,
16
))
imgs
=
((
imgs
+
1
)
/
2
*
255
).
astype
(
dtype
=
np
.
uint8
)
imgs
=
((
imgs
+
1
)
/
2
*
255
).
astype
(
dtype
=
np
.
uint8
)
...
...
torchvision/models/detection/keypoint_rcnn.py
View file @
e2e511be
...
@@ -221,10 +221,10 @@ class KeypointRCNNHeads(nn.Sequential):
...
@@ -221,10 +221,10 @@ class KeypointRCNNHeads(nn.Sequential):
def
__init__
(
self
,
in_channels
,
layers
):
def
__init__
(
self
,
in_channels
,
layers
):
d
=
[]
d
=
[]
next_feature
=
in_channels
next_feature
=
in_channels
for
l
in
layers
:
for
out_channels
in
layers
:
d
.
append
(
misc_nn_ops
.
Conv2d
(
next_feature
,
l
,
3
,
stride
=
1
,
padding
=
1
))
d
.
append
(
misc_nn_ops
.
Conv2d
(
next_feature
,
out_channels
,
3
,
stride
=
1
,
padding
=
1
))
d
.
append
(
nn
.
ReLU
(
inplace
=
True
))
d
.
append
(
nn
.
ReLU
(
inplace
=
True
))
next_feature
=
l
next_feature
=
out_channels
super
(
KeypointRCNNHeads
,
self
).
__init__
(
*
d
)
super
(
KeypointRCNNHeads
,
self
).
__init__
(
*
d
)
for
m
in
self
.
children
():
for
m
in
self
.
children
():
if
isinstance
(
m
,
misc_nn_ops
.
Conv2d
):
if
isinstance
(
m
,
misc_nn_ops
.
Conv2d
):
...
...
torchvision/models/detection/roi_heads.py
View file @
e2e511be
...
@@ -75,7 +75,7 @@ def maskrcnn_inference(x, labels):
...
@@ -75,7 +75,7 @@ def maskrcnn_inference(x, labels):
# select masks coresponding to the predicted classes
# select masks coresponding to the predicted classes
num_masks
=
x
.
shape
[
0
]
num_masks
=
x
.
shape
[
0
]
boxes_per_image
=
[
l
.
shape
[
0
]
for
l
in
labels
]
boxes_per_image
=
[
l
abel
.
shape
[
0
]
for
l
abel
in
labels
]
labels
=
torch
.
cat
(
labels
)
labels
=
torch
.
cat
(
labels
)
index
=
torch
.
arange
(
num_masks
,
device
=
labels
.
device
)
index
=
torch
.
arange
(
num_masks
,
device
=
labels
.
device
)
mask_prob
=
mask_prob
[
index
,
labels
][:,
None
]
mask_prob
=
mask_prob
[
index
,
labels
][:,
None
]
...
@@ -112,7 +112,7 @@ def maskrcnn_loss(mask_logits, proposals, gt_masks, gt_labels, mask_matched_idxs
...
@@ -112,7 +112,7 @@ def maskrcnn_loss(mask_logits, proposals, gt_masks, gt_labels, mask_matched_idxs
"""
"""
discretization_size
=
mask_logits
.
shape
[
-
1
]
discretization_size
=
mask_logits
.
shape
[
-
1
]
labels
=
[
l
[
idxs
]
for
l
,
idxs
in
zip
(
gt_labels
,
mask_matched_idxs
)]
labels
=
[
gt_labe
l
[
idxs
]
for
gt_labe
l
,
idxs
in
zip
(
gt_labels
,
mask_matched_idxs
)]
mask_targets
=
[
mask_targets
=
[
project_masks_on_boxes
(
m
,
p
,
i
,
discretization_size
)
project_masks_on_boxes
(
m
,
p
,
i
,
discretization_size
)
for
m
,
p
,
i
in
zip
(
gt_masks
,
proposals
,
mask_matched_idxs
)
for
m
,
p
,
i
in
zip
(
gt_masks
,
proposals
,
mask_matched_idxs
)
...
...
torchvision/models/detection/rpn.py
View file @
e2e511be
...
@@ -195,9 +195,9 @@ class RPNHead(nn.Module):
...
@@ -195,9 +195,9 @@ class RPNHead(nn.Module):
in_channels
,
num_anchors
*
4
,
kernel_size
=
1
,
stride
=
1
in_channels
,
num_anchors
*
4
,
kernel_size
=
1
,
stride
=
1
)
)
for
l
in
self
.
children
():
for
l
ayer
in
self
.
children
():
torch
.
nn
.
init
.
normal_
(
l
.
weight
,
std
=
0.01
)
torch
.
nn
.
init
.
normal_
(
l
ayer
.
weight
,
std
=
0.01
)
torch
.
nn
.
init
.
constant_
(
l
.
bias
,
0
)
torch
.
nn
.
init
.
constant_
(
l
ayer
.
bias
,
0
)
def
forward
(
self
,
x
):
def
forward
(
self
,
x
):
# type: (List[Tensor]) -> Tuple[List[Tensor], List[Tensor]]
# type: (List[Tensor]) -> Tuple[List[Tensor], List[Tensor]]
...
...
torchvision/models/detection/transform.py
View file @
e2e511be
...
@@ -111,15 +111,15 @@ class GeneralizedRCNNTransform(nn.Module):
...
@@ -111,15 +111,15 @@ class GeneralizedRCNNTransform(nn.Module):
std
=
torch
.
as_tensor
(
self
.
image_std
,
dtype
=
dtype
,
device
=
device
)
std
=
torch
.
as_tensor
(
self
.
image_std
,
dtype
=
dtype
,
device
=
device
)
return
(
image
-
mean
[:,
None
,
None
])
/
std
[:,
None
,
None
]
return
(
image
-
mean
[:,
None
,
None
])
/
std
[:,
None
,
None
]
def
torch_choice
(
self
,
l
):
def
torch_choice
(
self
,
k
):
# type: (List[int]) -> int
# type: (List[int]) -> int
"""
"""
Implements `random.choice` via torch ops so it can be compiled with
Implements `random.choice` via torch ops so it can be compiled with
TorchScript. Remove if https://github.com/pytorch/pytorch/issues/25803
TorchScript. Remove if https://github.com/pytorch/pytorch/issues/25803
is fixed.
is fixed.
"""
"""
index
=
int
(
torch
.
empty
(
1
).
uniform_
(
0.
,
float
(
len
(
l
))).
item
())
index
=
int
(
torch
.
empty
(
1
).
uniform_
(
0.
,
float
(
len
(
k
))).
item
())
return
l
[
index
]
return
k
[
index
]
def
resize
(
self
,
image
,
target
):
def
resize
(
self
,
image
,
target
):
# type: (Tensor, Optional[Dict[str, Tensor]]) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]
# type: (Tensor, Optional[Dict[str, Tensor]]) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]
...
...
torchvision/ops/poolers.py
View file @
e2e511be
...
@@ -9,6 +9,7 @@ from torchvision.ops.boxes import box_area
...
@@ -9,6 +9,7 @@ from torchvision.ops.boxes import box_area
from
torch.jit.annotations
import
Optional
,
List
,
Dict
,
Tuple
from
torch.jit.annotations
import
Optional
,
List
,
Dict
,
Tuple
import
torchvision
import
torchvision
# copying result_idx_in_level to a specific index in result[]
# copying result_idx_in_level to a specific index in result[]
# is not supported by ONNX tracing yet.
# is not supported by ONNX tracing yet.
# _onnx_merge_levels() is an implementation supported by ONNX
# _onnx_merge_levels() is an implementation supported by ONNX
...
@@ -21,13 +22,13 @@ def _onnx_merge_levels(levels, unmerged_results):
...
@@ -21,13 +22,13 @@ def _onnx_merge_levels(levels, unmerged_results):
res
=
torch
.
zeros
((
levels
.
size
(
0
),
first_result
.
size
(
1
),
res
=
torch
.
zeros
((
levels
.
size
(
0
),
first_result
.
size
(
1
),
first_result
.
size
(
2
),
first_result
.
size
(
3
)),
first_result
.
size
(
2
),
first_result
.
size
(
3
)),
dtype
=
dtype
,
device
=
device
)
dtype
=
dtype
,
device
=
device
)
for
l
in
range
(
len
(
unmerged_results
)):
for
l
evel
in
range
(
len
(
unmerged_results
)):
index
=
(
levels
==
l
).
nonzero
().
view
(
-
1
,
1
,
1
,
1
)
index
=
(
levels
==
leve
l
).
nonzero
().
view
(
-
1
,
1
,
1
,
1
)
index
=
index
.
expand
(
index
.
size
(
0
),
index
=
index
.
expand
(
index
.
size
(
0
),
unmerged_results
[
l
].
size
(
1
),
unmerged_results
[
l
evel
].
size
(
1
),
unmerged_results
[
l
].
size
(
2
),
unmerged_results
[
l
evel
].
size
(
2
),
unmerged_results
[
l
].
size
(
3
))
unmerged_results
[
l
evel
].
size
(
3
))
res
=
res
.
scatter
(
0
,
index
,
unmerged_results
[
l
])
res
=
res
.
scatter
(
0
,
index
,
unmerged_results
[
l
evel
])
return
res
return
res
...
...
torchvision/transforms/functional.py
View file @
e2e511be
...
@@ -676,7 +676,7 @@ def adjust_hue(img, hue_factor):
...
@@ -676,7 +676,7 @@ def adjust_hue(img, hue_factor):
PIL Image: Hue adjusted image.
PIL Image: Hue adjusted image.
"""
"""
if
not
(
-
0.5
<=
hue_factor
<=
0.5
):
if
not
(
-
0.5
<=
hue_factor
<=
0.5
):
raise
ValueError
(
'hue_factor is not in [-0.5, 0.5].'
.
format
(
hue_factor
))
raise
ValueError
(
'hue_factor
({})
is not in [-0.5, 0.5].'
.
format
(
hue_factor
))
if
not
_is_pil_image
(
img
):
if
not
_is_pil_image
(
img
):
raise
TypeError
(
'img should be PIL Image. Got {}'
.
format
(
type
(
img
)))
raise
TypeError
(
'img should be PIL Image. Got {}'
.
format
(
type
(
img
)))
...
...
torchvision/transforms/transforms.py
View file @
e2e511be
...
@@ -807,8 +807,8 @@ class LinearTransformation(object):
...
@@ -807,8 +807,8 @@ class LinearTransformation(object):
if
mean_vector
.
size
(
0
)
!=
transformation_matrix
.
size
(
0
):
if
mean_vector
.
size
(
0
)
!=
transformation_matrix
.
size
(
0
):
raise
ValueError
(
"mean_vector should have the same length {}"
.
format
(
mean_vector
.
size
(
0
))
+
raise
ValueError
(
"mean_vector should have the same length {}"
.
format
(
mean_vector
.
size
(
0
))
+
" as any one of the dimensions of the transformation_matrix [{}
x {}
]"
" as any one of the dimensions of the transformation_matrix [{}]"
.
format
(
transformation_matrix
.
size
()))
.
format
(
tuple
(
transformation_matrix
.
size
()))
)
self
.
transformation_matrix
=
transformation_matrix
self
.
transformation_matrix
=
transformation_matrix
self
.
mean_vector
=
mean_vector
self
.
mean_vector
=
mean_vector
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment