Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
vision
Commits
325dfd61
Unverified
Commit
325dfd61
authored
Dec 08, 2020
by
Vasilis Vryniotis
Committed by
GitHub
Dec 08, 2020
Browse files
Fixing the upper bound limit of random pixels in tests to 256. (#3136)
parent
f80b83ea
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
13 additions
and
13 deletions
+13
-13
test/fakedata_generation.py
test/fakedata_generation.py
+1
-1
test/test_datasets_samplers.py
test/test_datasets_samplers.py
+1
-1
test/test_datasets_video_utils.py
test/test_datasets_video_utils.py
+1
-1
test/test_transforms_tensor.py
test/test_transforms_tensor.py
+10
-10
No files found.
test/fakedata_generation.py
View file @
325dfd61
...
...
@@ -21,7 +21,7 @@ def mnist_root(num_images, cls_name):
return
torch
.
tensor
(
v
,
dtype
=
torch
.
int32
).
numpy
().
tobytes
()[::
-
1
]
def
_make_image_file
(
filename
,
num_images
):
img
=
torch
.
randint
(
0
,
25
5
,
size
=
(
28
*
28
*
num_images
,),
dtype
=
torch
.
uint8
)
img
=
torch
.
randint
(
0
,
25
6
,
size
=
(
28
*
28
*
num_images
,),
dtype
=
torch
.
uint8
)
with
open
(
filename
,
"wb"
)
as
f
:
f
.
write
(
_encode
(
2051
))
# magic header
f
.
write
(
_encode
(
num_images
))
...
...
test/test_datasets_samplers.py
View file @
325dfd61
...
...
@@ -29,7 +29,7 @@ def get_list_of_videos(num_videos=5, sizes=None, fps=None):
f
=
5
else
:
f
=
fps
[
i
]
data
=
torch
.
randint
(
0
,
25
5
,
(
size
,
300
,
400
,
3
),
dtype
=
torch
.
uint8
)
data
=
torch
.
randint
(
0
,
25
6
,
(
size
,
300
,
400
,
3
),
dtype
=
torch
.
uint8
)
name
=
os
.
path
.
join
(
tmp_dir
,
"{}.mp4"
.
format
(
i
))
names
.
append
(
name
)
io
.
write_video
(
name
,
data
,
fps
=
f
)
...
...
test/test_datasets_video_utils.py
View file @
325dfd61
...
...
@@ -22,7 +22,7 @@ def get_list_of_videos(num_videos=5, sizes=None, fps=None):
f
=
5
else
:
f
=
fps
[
i
]
data
=
torch
.
randint
(
0
,
25
5
,
(
size
,
300
,
400
,
3
),
dtype
=
torch
.
uint8
)
data
=
torch
.
randint
(
0
,
25
6
,
(
size
,
300
,
400
,
3
),
dtype
=
torch
.
uint8
)
name
=
os
.
path
.
join
(
tmp_dir
,
"{}.mp4"
.
format
(
i
))
names
.
append
(
name
)
io
.
write_video
(
name
,
data
,
fps
=
f
)
...
...
test/test_transforms_tensor.py
View file @
325dfd61
...
...
@@ -180,7 +180,7 @@ class Tester(TransformsTester):
self
.
_test_op
(
"center_crop"
,
"CenterCrop"
,
fn_kwargs
=
fn_kwargs
,
meth_kwargs
=
meth_kwargs
)
tensor
=
torch
.
randint
(
0
,
25
5
,
(
3
,
10
,
10
),
dtype
=
torch
.
uint8
,
device
=
self
.
device
)
tensor
=
torch
.
randint
(
0
,
25
6
,
(
3
,
10
,
10
),
dtype
=
torch
.
uint8
,
device
=
self
.
device
)
# Test torchscript of transforms.CenterCrop with size as int
f
=
T
.
CenterCrop
(
size
=
5
)
scripted_fn
=
torch
.
jit
.
script
(
f
)
...
...
@@ -294,7 +294,7 @@ class Tester(TransformsTester):
self
.
assertEqual
(
y
.
shape
[
2
],
int
(
38
*
46
/
32
))
tensor
,
_
=
self
.
_create_data
(
height
=
34
,
width
=
36
,
device
=
self
.
device
)
batch_tensors
=
torch
.
randint
(
0
,
25
5
,
size
=
(
4
,
3
,
44
,
56
),
dtype
=
torch
.
uint8
,
device
=
self
.
device
)
batch_tensors
=
torch
.
randint
(
0
,
25
6
,
size
=
(
4
,
3
,
44
,
56
),
dtype
=
torch
.
uint8
,
device
=
self
.
device
)
script_fn
=
torch
.
jit
.
script
(
F
.
resize
)
for
dt
in
[
None
,
torch
.
float32
,
torch
.
float64
]:
...
...
@@ -323,8 +323,8 @@ class Tester(TransformsTester):
script_fn
.
save
(
os
.
path
.
join
(
tmp_dir
,
"t_resize.pt"
))
def
test_resized_crop
(
self
):
tensor
=
torch
.
randint
(
0
,
25
5
,
size
=
(
3
,
44
,
56
),
dtype
=
torch
.
uint8
,
device
=
self
.
device
)
batch_tensors
=
torch
.
randint
(
0
,
25
5
,
size
=
(
4
,
3
,
44
,
56
),
dtype
=
torch
.
uint8
,
device
=
self
.
device
)
tensor
=
torch
.
randint
(
0
,
25
6
,
size
=
(
3
,
44
,
56
),
dtype
=
torch
.
uint8
,
device
=
self
.
device
)
batch_tensors
=
torch
.
randint
(
0
,
25
6
,
size
=
(
4
,
3
,
44
,
56
),
dtype
=
torch
.
uint8
,
device
=
self
.
device
)
for
scale
in
[(
0.7
,
1.2
),
[
0.7
,
1.2
]]:
for
ratio
in
[(
0.75
,
1.333
),
[
0.75
,
1.333
]]:
...
...
@@ -341,8 +341,8 @@ class Tester(TransformsTester):
s_transform
.
save
(
os
.
path
.
join
(
tmp_dir
,
"t_resized_crop.pt"
))
def
test_random_affine
(
self
):
tensor
=
torch
.
randint
(
0
,
25
5
,
size
=
(
3
,
44
,
56
),
dtype
=
torch
.
uint8
,
device
=
self
.
device
)
batch_tensors
=
torch
.
randint
(
0
,
25
5
,
size
=
(
4
,
3
,
44
,
56
),
dtype
=
torch
.
uint8
,
device
=
self
.
device
)
tensor
=
torch
.
randint
(
0
,
25
6
,
size
=
(
3
,
44
,
56
),
dtype
=
torch
.
uint8
,
device
=
self
.
device
)
batch_tensors
=
torch
.
randint
(
0
,
25
6
,
size
=
(
4
,
3
,
44
,
56
),
dtype
=
torch
.
uint8
,
device
=
self
.
device
)
for
shear
in
[
15
,
10.0
,
(
5.0
,
10.0
),
[
-
15
,
15
],
[
-
10.0
,
10.0
,
-
11.0
,
11.0
]]:
for
scale
in
[(
0.7
,
1.2
),
[
0.7
,
1.2
]]:
...
...
@@ -363,8 +363,8 @@ class Tester(TransformsTester):
s_transform
.
save
(
os
.
path
.
join
(
tmp_dir
,
"t_random_affine.pt"
))
def
test_random_rotate
(
self
):
tensor
=
torch
.
randint
(
0
,
25
5
,
size
=
(
3
,
44
,
56
),
dtype
=
torch
.
uint8
,
device
=
self
.
device
)
batch_tensors
=
torch
.
randint
(
0
,
25
5
,
size
=
(
4
,
3
,
44
,
56
),
dtype
=
torch
.
uint8
,
device
=
self
.
device
)
tensor
=
torch
.
randint
(
0
,
25
6
,
size
=
(
3
,
44
,
56
),
dtype
=
torch
.
uint8
,
device
=
self
.
device
)
batch_tensors
=
torch
.
randint
(
0
,
25
6
,
size
=
(
4
,
3
,
44
,
56
),
dtype
=
torch
.
uint8
,
device
=
self
.
device
)
for
center
in
[(
0
,
0
),
[
10
,
10
],
None
,
(
56
,
44
)]:
for
expand
in
[
True
,
False
]:
...
...
@@ -383,8 +383,8 @@ class Tester(TransformsTester):
s_transform
.
save
(
os
.
path
.
join
(
tmp_dir
,
"t_random_rotate.pt"
))
def
test_random_perspective
(
self
):
tensor
=
torch
.
randint
(
0
,
25
5
,
size
=
(
3
,
44
,
56
),
dtype
=
torch
.
uint8
,
device
=
self
.
device
)
batch_tensors
=
torch
.
randint
(
0
,
25
5
,
size
=
(
4
,
3
,
44
,
56
),
dtype
=
torch
.
uint8
,
device
=
self
.
device
)
tensor
=
torch
.
randint
(
0
,
25
6
,
size
=
(
3
,
44
,
56
),
dtype
=
torch
.
uint8
,
device
=
self
.
device
)
batch_tensors
=
torch
.
randint
(
0
,
25
6
,
size
=
(
4
,
3
,
44
,
56
),
dtype
=
torch
.
uint8
,
device
=
self
.
device
)
for
distortion_scale
in
np
.
linspace
(
0.1
,
1.0
,
num
=
20
):
for
interpolation
in
[
NEAREST
,
BILINEAR
]:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment