Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
dcuai
dlexamples
Commits
85529f35
Commit
85529f35
authored
Jul 30, 2022
by
unknown
Browse files
添加openmmlab测试用例
parent
b21b0c01
Changes
977
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
2980 additions
and
0 deletions
+2980
-0
openmmlab_test/mmclassification-speed-benchmark/tests/test_pipelines/test_loading.py
...tion-speed-benchmark/tests/test_pipelines/test_loading.py
+58
-0
openmmlab_test/mmclassification-speed-benchmark/tests/test_pipelines/test_transform.py
...on-speed-benchmark/tests/test_pipelines/test_transform.py
+1187
-0
openmmlab_test/mmclassification-speed-benchmark/tools/analyze_results.py
...mmclassification-speed-benchmark/tools/analyze_results.py
+99
-0
openmmlab_test/mmclassification-speed-benchmark/tools/benchmark_regression.py
...ssification-speed-benchmark/tools/benchmark_regression.py
+166
-0
openmmlab_test/mmclassification-speed-benchmark/tools/convert_models/mobilenetv2_to_mmcls.py
...ed-benchmark/tools/convert_models/mobilenetv2_to_mmcls.py
+134
-0
openmmlab_test/mmclassification-speed-benchmark/tools/convert_models/shufflenetv2_to_mmcls.py
...d-benchmark/tools/convert_models/shufflenetv2_to_mmcls.py
+112
-0
openmmlab_test/mmclassification-speed-benchmark/tools/convert_models/vgg_to_mmcls.py
...tion-speed-benchmark/tools/convert_models/vgg_to_mmcls.py
+116
-0
openmmlab_test/mmclassification-speed-benchmark/tools/deployment/mmcls2torchserve.py
...tion-speed-benchmark/tools/deployment/mmcls2torchserve.py
+110
-0
openmmlab_test/mmclassification-speed-benchmark/tools/deployment/mmcls_handler.py
...ication-speed-benchmark/tools/deployment/mmcls_handler.py
+50
-0
openmmlab_test/mmclassification-speed-benchmark/tools/deployment/onnx2tensorrt.py
...ication-speed-benchmark/tools/deployment/onnx2tensorrt.py
+141
-0
openmmlab_test/mmclassification-speed-benchmark/tools/deployment/pytorch2onnx.py
...fication-speed-benchmark/tools/deployment/pytorch2onnx.py
+218
-0
openmmlab_test/mmclassification-speed-benchmark/tools/deployment/pytorch2torchscript.py
...n-speed-benchmark/tools/deployment/pytorch2torchscript.py
+138
-0
openmmlab_test/mmclassification-speed-benchmark/tools/deployment/test.py
...mmclassification-speed-benchmark/tools/deployment/test.py
+115
-0
openmmlab_test/mmclassification-speed-benchmark/tools/dist_test.sh
..._test/mmclassification-speed-benchmark/tools/dist_test.sh
+10
-0
openmmlab_test/mmclassification-speed-benchmark/tools/dist_train.sh
...test/mmclassification-speed-benchmark/tools/dist_train.sh
+9
-0
openmmlab_test/mmclassification-speed-benchmark/tools/get_flops.py
..._test/mmclassification-speed-benchmark/tools/get_flops.py
+54
-0
openmmlab_test/mmclassification-speed-benchmark/tools/publish_model.py
...t/mmclassification-speed-benchmark/tools/publish_model.py
+39
-0
openmmlab_test/mmclassification-speed-benchmark/tools/slurm_test.sh
...test/mmclassification-speed-benchmark/tools/slurm_test.sh
+24
-0
openmmlab_test/mmclassification-speed-benchmark/tools/slurm_train.sh
...est/mmclassification-speed-benchmark/tools/slurm_train.sh
+24
-0
openmmlab_test/mmclassification-speed-benchmark/tools/test.py
...mmlab_test/mmclassification-speed-benchmark/tools/test.py
+176
-0
No files found.
Too many changes to show.
To preserve performance only
977 of 977+
files are displayed.
Plain diff
Email patch
openmmlab_test/mmclassification-speed-benchmark/tests/test_pipelines/test_loading.py
0 → 100644
View file @
85529f35
import
copy
import
os.path
as
osp
import
numpy
as
np
from
mmcls.datasets.pipelines
import
LoadImageFromFile
class
TestLoading
(
object
):
@
classmethod
def
setup_class
(
cls
):
cls
.
data_prefix
=
osp
.
join
(
osp
.
dirname
(
__file__
),
'../data'
)
def
test_load_img
(
self
):
results
=
dict
(
img_prefix
=
self
.
data_prefix
,
img_info
=
dict
(
filename
=
'color.jpg'
))
transform
=
LoadImageFromFile
()
results
=
transform
(
copy
.
deepcopy
(
results
))
assert
results
[
'filename'
]
==
osp
.
join
(
self
.
data_prefix
,
'color.jpg'
)
assert
results
[
'ori_filename'
]
==
'color.jpg'
assert
results
[
'img'
].
shape
==
(
300
,
400
,
3
)
assert
results
[
'img'
].
dtype
==
np
.
uint8
assert
results
[
'img_shape'
]
==
(
300
,
400
,
3
)
assert
results
[
'ori_shape'
]
==
(
300
,
400
,
3
)
np
.
testing
.
assert_equal
(
results
[
'img_norm_cfg'
][
'mean'
],
np
.
zeros
(
3
,
dtype
=
np
.
float32
))
assert
repr
(
transform
)
==
transform
.
__class__
.
__name__
+
\
"(to_float32=False, color_type='color', "
+
\
"file_client_args={'backend': 'disk'})"
# no img_prefix
results
=
dict
(
img_prefix
=
None
,
img_info
=
dict
(
filename
=
'tests/data/color.jpg'
))
transform
=
LoadImageFromFile
()
results
=
transform
(
copy
.
deepcopy
(
results
))
assert
results
[
'filename'
]
==
'tests/data/color.jpg'
assert
results
[
'img'
].
shape
==
(
300
,
400
,
3
)
# to_float32
transform
=
LoadImageFromFile
(
to_float32
=
True
)
results
=
transform
(
copy
.
deepcopy
(
results
))
assert
results
[
'img'
].
dtype
==
np
.
float32
# gray image
results
=
dict
(
img_prefix
=
self
.
data_prefix
,
img_info
=
dict
(
filename
=
'gray.jpg'
))
transform
=
LoadImageFromFile
()
results
=
transform
(
copy
.
deepcopy
(
results
))
assert
results
[
'img'
].
shape
==
(
288
,
512
,
3
)
assert
results
[
'img'
].
dtype
==
np
.
uint8
transform
=
LoadImageFromFile
(
color_type
=
'unchanged'
)
results
=
transform
(
copy
.
deepcopy
(
results
))
assert
results
[
'img'
].
shape
==
(
288
,
512
)
assert
results
[
'img'
].
dtype
==
np
.
uint8
np
.
testing
.
assert_equal
(
results
[
'img_norm_cfg'
][
'mean'
],
np
.
zeros
(
1
,
dtype
=
np
.
float32
))
openmmlab_test/mmclassification-speed-benchmark/tests/test_pipelines/test_transform.py
0 → 100644
View file @
85529f35
import
copy
import
os.path
as
osp
import
random
import
mmcv
import
numpy
as
np
import
pytest
import
torch
import
torchvision
from
mmcv.utils
import
build_from_cfg
from
numpy.testing
import
assert_array_almost_equal
,
assert_array_equal
from
PIL
import
Image
from
torchvision
import
transforms
import
mmcls.datasets.pipelines.transforms
as
mmcls_transforms
from
mmcls.datasets.builder
import
PIPELINES
from
mmcls.datasets.pipelines
import
Compose
def
construct_toy_data
():
img
=
np
.
array
([[
1
,
2
,
3
,
4
],
[
5
,
6
,
7
,
8
],
[
9
,
10
,
11
,
12
]],
dtype
=
np
.
uint8
)
img
=
np
.
stack
([
img
,
img
,
img
],
axis
=-
1
)
results
=
dict
()
# image
results
[
'ori_img'
]
=
img
results
[
'img'
]
=
copy
.
deepcopy
(
img
)
results
[
'ori_shape'
]
=
img
.
shape
results
[
'img_shape'
]
=
img
.
shape
return
results
def
test_resize
():
# test assertion if size is smaller than 0
with
pytest
.
raises
(
AssertionError
):
transform
=
dict
(
type
=
'Resize'
,
size
=-
1
)
build_from_cfg
(
transform
,
PIPELINES
)
# test assertion if size is tuple but the second value is smaller than 0
# and the second value is not equal to -1
with
pytest
.
raises
(
AssertionError
):
transform
=
dict
(
type
=
'Resize'
,
size
=
(
224
,
-
2
))
build_from_cfg
(
transform
,
PIPELINES
)
# test assertion if size is tuple but the first value is smaller than 0
with
pytest
.
raises
(
AssertionError
):
transform
=
dict
(
type
=
'Resize'
,
size
=
(
-
1
,
224
))
build_from_cfg
(
transform
,
PIPELINES
)
# test assertion if size is tuple and len(size) < 2
with
pytest
.
raises
(
AssertionError
):
transform
=
dict
(
type
=
'Resize'
,
size
=
(
224
,
))
build_from_cfg
(
transform
,
PIPELINES
)
# test assertion if size is tuple len(size) > 2
with
pytest
.
raises
(
AssertionError
):
transform
=
dict
(
type
=
'Resize'
,
size
=
(
224
,
224
,
3
))
build_from_cfg
(
transform
,
PIPELINES
)
# test assertion when interpolation is invalid
with
pytest
.
raises
(
AssertionError
):
transform
=
dict
(
type
=
'Resize'
,
size
=
224
,
interpolation
=
'2333'
)
build_from_cfg
(
transform
,
PIPELINES
)
# test repr
transform
=
dict
(
type
=
'Resize'
,
size
=
224
)
resize_module
=
build_from_cfg
(
transform
,
PIPELINES
)
assert
isinstance
(
repr
(
resize_module
),
str
)
# read test image
results
=
dict
()
img
=
mmcv
.
imread
(
osp
.
join
(
osp
.
dirname
(
__file__
),
'../data/color.jpg'
),
'color'
)
original_img
=
copy
.
deepcopy
(
img
)
results
[
'img'
]
=
img
results
[
'img2'
]
=
copy
.
deepcopy
(
img
)
results
[
'img_shape'
]
=
img
.
shape
results
[
'ori_shape'
]
=
img
.
shape
results
[
'img_fields'
]
=
[
'img'
,
'img2'
]
def
reset_results
(
results
,
original_img
):
results
[
'img'
]
=
copy
.
deepcopy
(
original_img
)
results
[
'img2'
]
=
copy
.
deepcopy
(
original_img
)
results
[
'img_shape'
]
=
original_img
.
shape
results
[
'ori_shape'
]
=
original_img
.
shape
results
[
'img_fields'
]
=
[
'img'
,
'img2'
]
return
results
# test resize when size is int
transform
=
dict
(
type
=
'Resize'
,
size
=
224
,
interpolation
=
'bilinear'
)
resize_module
=
build_from_cfg
(
transform
,
PIPELINES
)
results
=
resize_module
(
results
)
assert
np
.
equal
(
results
[
'img'
],
results
[
'img2'
]).
all
()
assert
results
[
'img_shape'
]
==
(
224
,
224
,
3
)
# test resize when size is tuple and the second value is -1
transform
=
dict
(
type
=
'Resize'
,
size
=
(
224
,
-
1
),
interpolation
=
'bilinear'
)
resize_module
=
build_from_cfg
(
transform
,
PIPELINES
)
results
=
reset_results
(
results
,
original_img
)
results
=
resize_module
(
results
)
assert
np
.
equal
(
results
[
'img'
],
results
[
'img2'
]).
all
()
assert
results
[
'img_shape'
]
==
(
224
,
298
,
3
)
# test resize when size is tuple
transform
=
dict
(
type
=
'Resize'
,
size
=
(
224
,
224
),
interpolation
=
'bilinear'
)
resize_module
=
build_from_cfg
(
transform
,
PIPELINES
)
results
=
reset_results
(
results
,
original_img
)
results
=
resize_module
(
results
)
assert
np
.
equal
(
results
[
'img'
],
results
[
'img2'
]).
all
()
assert
results
[
'img_shape'
]
==
(
224
,
224
,
3
)
# test resize when resize_height != resize_width
transform
=
dict
(
type
=
'Resize'
,
size
=
(
224
,
256
),
interpolation
=
'bilinear'
)
resize_module
=
build_from_cfg
(
transform
,
PIPELINES
)
results
=
reset_results
(
results
,
original_img
)
results
=
resize_module
(
results
)
assert
np
.
equal
(
results
[
'img'
],
results
[
'img2'
]).
all
()
assert
results
[
'img_shape'
]
==
(
224
,
256
,
3
)
# test resize when size is larger than img.shape
img_height
,
img_width
,
_
=
original_img
.
shape
transform
=
dict
(
type
=
'Resize'
,
size
=
(
img_height
*
2
,
img_width
*
2
),
interpolation
=
'bilinear'
)
resize_module
=
build_from_cfg
(
transform
,
PIPELINES
)
results
=
reset_results
(
results
,
original_img
)
results
=
resize_module
(
results
)
assert
np
.
equal
(
results
[
'img'
],
results
[
'img2'
]).
all
()
assert
results
[
'img_shape'
]
==
(
img_height
*
2
,
img_width
*
2
,
3
)
# test resize with different backends
transform_cv2
=
dict
(
type
=
'Resize'
,
size
=
(
224
,
256
),
interpolation
=
'bilinear'
,
backend
=
'cv2'
)
transform_pil
=
dict
(
type
=
'Resize'
,
size
=
(
224
,
256
),
interpolation
=
'bilinear'
,
backend
=
'pillow'
)
resize_module_cv2
=
build_from_cfg
(
transform_cv2
,
PIPELINES
)
resize_module_pil
=
build_from_cfg
(
transform_pil
,
PIPELINES
)
results
=
reset_results
(
results
,
original_img
)
results
[
'img_fields'
]
=
[
'img'
]
results_cv2
=
resize_module_cv2
(
results
)
results
[
'img_fields'
]
=
[
'img2'
]
results_pil
=
resize_module_pil
(
results
)
assert
np
.
allclose
(
results_cv2
[
'img'
],
results_pil
[
'img2'
],
atol
=
45
)
# compare results with torchvision
transform
=
dict
(
type
=
'Resize'
,
size
=
(
224
,
224
),
interpolation
=
'area'
)
resize_module
=
build_from_cfg
(
transform
,
PIPELINES
)
results
=
reset_results
(
results
,
original_img
)
results
=
resize_module
(
results
)
resize_module
=
transforms
.
Resize
(
size
=
(
224
,
224
),
interpolation
=
Image
.
BILINEAR
)
pil_img
=
Image
.
fromarray
(
original_img
)
resized_img
=
resize_module
(
pil_img
)
resized_img
=
np
.
array
(
resized_img
)
assert
np
.
equal
(
results
[
'img'
],
results
[
'img2'
]).
all
()
assert
results
[
'img_shape'
]
==
(
224
,
224
,
3
)
assert
np
.
allclose
(
results
[
'img'
],
resized_img
,
atol
=
30
)
def
test_center_crop
():
# test assertion if size is smaller than 0
with
pytest
.
raises
(
AssertionError
):
transform
=
dict
(
type
=
'CenterCrop'
,
crop_size
=-
1
)
build_from_cfg
(
transform
,
PIPELINES
)
# test assertion if size is tuple but one value is smaller than 0
with
pytest
.
raises
(
AssertionError
):
transform
=
dict
(
type
=
'CenterCrop'
,
crop_size
=
(
224
,
-
1
))
build_from_cfg
(
transform
,
PIPELINES
)
# test assertion if size is tuple and len(size) < 2
with
pytest
.
raises
(
AssertionError
):
transform
=
dict
(
type
=
'CenterCrop'
,
crop_size
=
(
224
,
))
build_from_cfg
(
transform
,
PIPELINES
)
# test assertion if size is tuple len(size) > 2
with
pytest
.
raises
(
AssertionError
):
transform
=
dict
(
type
=
'CenterCrop'
,
crop_size
=
(
224
,
224
,
3
))
build_from_cfg
(
transform
,
PIPELINES
)
# test assertion if efficientnet is True and crop_size is tuple
with
pytest
.
raises
(
AssertionError
):
transform
=
dict
(
type
=
'CenterCrop'
,
crop_size
=
(
224
,
224
),
efficientnet_style
=
True
,
)
build_from_cfg
(
transform
,
PIPELINES
)
# test assertion if efficientnet is True and interpolation is invalid
with
pytest
.
raises
(
AssertionError
):
transform
=
dict
(
type
=
'CenterCrop'
,
crop_size
=
224
,
efficientnet_style
=
True
,
interpolation
=
'2333'
)
build_from_cfg
(
transform
,
PIPELINES
)
# test assertion if efficientnet is True and crop_padding is negative
with
pytest
.
raises
(
AssertionError
):
transform
=
dict
(
type
=
'CenterCrop'
,
crop_size
=
224
,
efficientnet_style
=
True
,
crop_padding
=-
1
)
build_from_cfg
(
transform
,
PIPELINES
)
# test repr
transform
=
dict
(
type
=
'CenterCrop'
,
crop_size
=
224
)
center_crop_module
=
build_from_cfg
(
transform
,
PIPELINES
)
assert
isinstance
(
repr
(
center_crop_module
),
str
)
# read test image
results
=
dict
()
img
=
mmcv
.
imread
(
osp
.
join
(
osp
.
dirname
(
__file__
),
'../data/color.jpg'
),
'color'
)
original_img
=
copy
.
deepcopy
(
img
)
results
[
'img'
]
=
img
results
[
'img2'
]
=
copy
.
deepcopy
(
img
)
results
[
'img_shape'
]
=
img
.
shape
results
[
'ori_shape'
]
=
img
.
shape
results
[
'img_fields'
]
=
[
'img'
,
'img2'
]
def
reset_results
(
results
,
original_img
):
results
[
'img'
]
=
copy
.
deepcopy
(
original_img
)
results
[
'img2'
]
=
copy
.
deepcopy
(
original_img
)
results
[
'img_shape'
]
=
original_img
.
shape
results
[
'ori_shape'
]
=
original_img
.
shape
return
results
# test CenterCrop when size is int
transform
=
dict
(
type
=
'CenterCrop'
,
crop_size
=
224
)
center_crop_module
=
build_from_cfg
(
transform
,
PIPELINES
)
results
=
center_crop_module
(
results
)
assert
np
.
equal
(
results
[
'img'
],
results
[
'img2'
]).
all
()
assert
results
[
'img_shape'
]
==
(
224
,
224
,
3
)
# test CenterCrop when size is int and efficientnet_style is True
# and crop_padding=0
transform
=
dict
(
type
=
'CenterCrop'
,
crop_size
=
224
,
efficientnet_style
=
True
,
crop_padding
=
0
)
center_crop_module
=
build_from_cfg
(
transform
,
PIPELINES
)
results
=
reset_results
(
results
,
original_img
)
results
=
center_crop_module
(
results
)
assert
np
.
equal
(
results
[
'img'
],
results
[
'img2'
]).
all
()
assert
results
[
'img_shape'
]
==
(
224
,
224
,
3
)
results_img
=
copy
.
deepcopy
(
results
[
'img'
])
short_edge
=
min
(
*
results
[
'ori_shape'
][:
2
])
transform
=
dict
(
type
=
'CenterCrop'
,
crop_size
=
short_edge
)
baseline_center_crop_module
=
build_from_cfg
(
transform
,
PIPELINES
)
transform
=
dict
(
type
=
'Resize'
,
size
=
224
)
baseline_resize_module
=
build_from_cfg
(
transform
,
PIPELINES
)
results
=
reset_results
(
results
,
original_img
)
results
=
baseline_center_crop_module
(
results
)
results
=
baseline_resize_module
(
results
)
assert
np
.
equal
(
results
[
'img'
],
results_img
).
all
()
# test CenterCrop when size is tuple
transform
=
dict
(
type
=
'CenterCrop'
,
crop_size
=
(
224
,
224
))
center_crop_module
=
build_from_cfg
(
transform
,
PIPELINES
)
results
=
reset_results
(
results
,
original_img
)
results
=
center_crop_module
(
results
)
assert
np
.
equal
(
results
[
'img'
],
results
[
'img2'
]).
all
()
assert
results
[
'img_shape'
]
==
(
224
,
224
,
3
)
# test CenterCrop when crop_height != crop_width
transform
=
dict
(
type
=
'CenterCrop'
,
crop_size
=
(
256
,
224
))
center_crop_module
=
build_from_cfg
(
transform
,
PIPELINES
)
results
=
reset_results
(
results
,
original_img
)
results
=
center_crop_module
(
results
)
assert
np
.
equal
(
results
[
'img'
],
results
[
'img2'
]).
all
()
assert
results
[
'img_shape'
]
==
(
256
,
224
,
3
)
# test CenterCrop when crop_size is equal to img.shape
img_height
,
img_width
,
_
=
original_img
.
shape
transform
=
dict
(
type
=
'CenterCrop'
,
crop_size
=
(
img_height
,
img_width
))
center_crop_module
=
build_from_cfg
(
transform
,
PIPELINES
)
results
=
reset_results
(
results
,
original_img
)
results
=
center_crop_module
(
results
)
assert
np
.
equal
(
results
[
'img'
],
results
[
'img2'
]).
all
()
assert
results
[
'img_shape'
]
==
(
img_height
,
img_width
,
3
)
# test CenterCrop when crop_size is larger than img.shape
transform
=
dict
(
type
=
'CenterCrop'
,
crop_size
=
(
img_height
*
2
,
img_width
*
2
))
center_crop_module
=
build_from_cfg
(
transform
,
PIPELINES
)
results
=
reset_results
(
results
,
original_img
)
results
=
center_crop_module
(
results
)
assert
np
.
equal
(
results
[
'img'
],
results
[
'img2'
]).
all
()
assert
results
[
'img_shape'
]
==
(
img_height
,
img_width
,
3
)
# test CenterCrop when crop_width is smaller than img_width
transform
=
dict
(
type
=
'CenterCrop'
,
crop_size
=
(
img_height
,
img_width
/
2
))
center_crop_module
=
build_from_cfg
(
transform
,
PIPELINES
)
results
=
reset_results
(
results
,
original_img
)
results
=
center_crop_module
(
results
)
assert
np
.
equal
(
results
[
'img'
],
results
[
'img2'
]).
all
()
assert
results
[
'img_shape'
]
==
(
img_height
,
img_width
/
2
,
3
)
# test CenterCrop when crop_height is smaller than img_height
transform
=
dict
(
type
=
'CenterCrop'
,
crop_size
=
(
img_height
/
2
,
img_width
))
center_crop_module
=
build_from_cfg
(
transform
,
PIPELINES
)
results
=
reset_results
(
results
,
original_img
)
results
=
center_crop_module
(
results
)
assert
np
.
equal
(
results
[
'img'
],
results
[
'img2'
]).
all
()
assert
results
[
'img_shape'
]
==
(
img_height
/
2
,
img_width
,
3
)
# compare results with torchvision
transform
=
dict
(
type
=
'CenterCrop'
,
crop_size
=
224
)
center_crop_module
=
build_from_cfg
(
transform
,
PIPELINES
)
results
=
reset_results
(
results
,
original_img
)
results
=
center_crop_module
(
results
)
center_crop_module
=
transforms
.
CenterCrop
(
size
=
224
)
pil_img
=
Image
.
fromarray
(
original_img
)
cropped_img
=
center_crop_module
(
pil_img
)
cropped_img
=
np
.
array
(
cropped_img
)
assert
np
.
equal
(
results
[
'img'
],
results
[
'img2'
]).
all
()
assert
np
.
equal
(
results
[
'img'
],
cropped_img
).
all
()
def
test_normalize
():
img_norm_cfg
=
dict
(
mean
=
[
123.675
,
116.28
,
103.53
],
std
=
[
58.395
,
57.12
,
57.375
],
to_rgb
=
True
)
# test repr
transform
=
dict
(
type
=
'Normalize'
,
**
img_norm_cfg
)
normalize_module
=
build_from_cfg
(
transform
,
PIPELINES
)
assert
isinstance
(
repr
(
normalize_module
),
str
)
# read data
results
=
dict
()
img
=
mmcv
.
imread
(
osp
.
join
(
osp
.
dirname
(
__file__
),
'../data/color.jpg'
),
'color'
)
original_img
=
copy
.
deepcopy
(
img
)
results
[
'img'
]
=
img
results
[
'img2'
]
=
copy
.
deepcopy
(
img
)
results
[
'img_shape'
]
=
img
.
shape
results
[
'ori_shape'
]
=
img
.
shape
results
[
'img_fields'
]
=
[
'img'
,
'img2'
]
norm_results
=
normalize_module
(
results
)
assert
np
.
equal
(
norm_results
[
'img'
],
norm_results
[
'img2'
]).
all
()
# compare results with manual computation
mean
=
np
.
array
(
img_norm_cfg
[
'mean'
])
std
=
np
.
array
(
img_norm_cfg
[
'std'
])
normalized_img
=
(
original_img
[...,
::
-
1
]
-
mean
)
/
std
assert
np
.
allclose
(
norm_results
[
'img'
],
normalized_img
)
# compare results with torchvision
normalize_module
=
transforms
.
Normalize
(
mean
=
mean
,
std
=
std
)
tensor_img
=
original_img
[...,
::
-
1
].
copy
()
tensor_img
=
torch
.
Tensor
(
tensor_img
.
transpose
(
2
,
0
,
1
))
normalized_img
=
normalize_module
(
tensor_img
)
normalized_img
=
np
.
array
(
normalized_img
).
transpose
(
1
,
2
,
0
)
assert
np
.
equal
(
norm_results
[
'img'
],
normalized_img
).
all
()
def
test_randomcrop
():
ori_img
=
mmcv
.
imread
(
osp
.
join
(
osp
.
dirname
(
__file__
),
'../data/color.jpg'
),
'color'
)
ori_img_pil
=
Image
.
open
(
osp
.
join
(
osp
.
dirname
(
__file__
),
'../data/color.jpg'
))
seed
=
random
.
randint
(
0
,
100
)
# test crop size is int
kwargs
=
dict
(
size
=
200
,
padding
=
0
,
pad_if_needed
=
True
,
fill
=
0
)
random
.
seed
(
seed
)
np
.
random
.
seed
(
seed
)
aug
=
[]
aug
.
extend
([
torchvision
.
transforms
.
RandomCrop
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
baseline
=
composed_transform
(
ori_img_pil
)
kwargs
=
dict
(
size
=
200
,
padding
=
0
,
pad_if_needed
=
True
,
pad_val
=
0
)
random
.
seed
(
seed
)
np
.
random
.
seed
(
seed
)
aug
=
[]
aug
.
extend
([
mmcls_transforms
.
RandomCrop
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
# test __repr__()
print
(
composed_transform
)
results
=
dict
()
results
[
'img'
]
=
ori_img
img
=
composed_transform
(
results
)[
'img'
]
assert
np
.
array
(
img
).
shape
==
(
200
,
200
,
3
)
assert
np
.
array
(
baseline
).
shape
==
(
200
,
200
,
3
)
nonzero
=
len
((
ori_img
-
np
.
array
(
ori_img_pil
)[:,
:,
::
-
1
]).
nonzero
())
nonzero_transform
=
len
((
img
-
np
.
array
(
baseline
)[:,
:,
::
-
1
]).
nonzero
())
assert
nonzero
==
nonzero_transform
# test crop size < image size
kwargs
=
dict
(
size
=
(
200
,
300
),
padding
=
0
,
pad_if_needed
=
True
,
fill
=
0
)
random
.
seed
(
seed
)
np
.
random
.
seed
(
seed
)
aug
=
[]
aug
.
extend
([
torchvision
.
transforms
.
RandomCrop
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
baseline
=
composed_transform
(
ori_img_pil
)
kwargs
=
dict
(
size
=
(
200
,
300
),
padding
=
0
,
pad_if_needed
=
True
,
pad_val
=
0
)
random
.
seed
(
seed
)
np
.
random
.
seed
(
seed
)
aug
=
[]
aug
.
extend
([
mmcls_transforms
.
RandomCrop
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
results
=
dict
()
results
[
'img'
]
=
ori_img
img
=
composed_transform
(
results
)[
'img'
]
assert
np
.
array
(
img
).
shape
==
(
200
,
300
,
3
)
assert
np
.
array
(
baseline
).
shape
==
(
200
,
300
,
3
)
nonzero
=
len
((
ori_img
-
np
.
array
(
ori_img_pil
)[:,
:,
::
-
1
]).
nonzero
())
nonzero_transform
=
len
((
img
-
np
.
array
(
baseline
)[:,
:,
::
-
1
]).
nonzero
())
assert
nonzero
==
nonzero_transform
# test crop size > image size
kwargs
=
dict
(
size
=
(
600
,
700
),
padding
=
0
,
pad_if_needed
=
True
,
fill
=
0
)
random
.
seed
(
seed
)
np
.
random
.
seed
(
seed
)
aug
=
[]
aug
.
extend
([
torchvision
.
transforms
.
RandomCrop
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
baseline
=
composed_transform
(
ori_img_pil
)
kwargs
=
dict
(
size
=
(
600
,
700
),
padding
=
0
,
pad_if_needed
=
True
,
pad_val
=
0
)
random
.
seed
(
seed
)
np
.
random
.
seed
(
seed
)
aug
=
[]
aug
.
extend
([
mmcls_transforms
.
RandomCrop
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
results
=
dict
()
results
[
'img'
]
=
ori_img
img
=
composed_transform
(
results
)[
'img'
]
assert
np
.
array
(
img
).
shape
==
(
600
,
700
,
3
)
assert
np
.
array
(
baseline
).
shape
==
(
600
,
700
,
3
)
nonzero
=
len
((
ori_img
-
np
.
array
(
ori_img_pil
)[:,
:,
::
-
1
]).
nonzero
())
nonzero_transform
=
len
((
img
-
np
.
array
(
baseline
)[:,
:,
::
-
1
]).
nonzero
())
assert
nonzero
==
nonzero_transform
# test crop size == image size
kwargs
=
dict
(
size
=
(
ori_img
.
shape
[
0
],
ori_img
.
shape
[
1
]),
padding
=
0
,
pad_if_needed
=
True
,
fill
=
0
)
random
.
seed
(
seed
)
np
.
random
.
seed
(
seed
)
aug
=
[]
aug
.
extend
([
torchvision
.
transforms
.
RandomCrop
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
baseline
=
composed_transform
(
ori_img_pil
)
kwargs
=
dict
(
size
=
(
ori_img
.
shape
[
0
],
ori_img
.
shape
[
1
]),
padding
=
0
,
pad_if_needed
=
True
,
pad_val
=
0
)
random
.
seed
(
seed
)
np
.
random
.
seed
(
seed
)
aug
=
[]
aug
.
extend
([
mmcls_transforms
.
RandomCrop
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
results
=
dict
()
results
[
'img'
]
=
ori_img
img
=
composed_transform
(
results
)[
'img'
]
assert
np
.
array
(
img
).
shape
==
(
img
.
shape
[
0
],
img
.
shape
[
1
],
3
)
assert
np
.
array
(
baseline
).
shape
==
(
img
.
shape
[
0
],
img
.
shape
[
1
],
3
)
nonzero
=
len
((
ori_img
-
np
.
array
(
ori_img_pil
)[:,
:,
::
-
1
]).
nonzero
())
nonzero_transform
=
len
((
img
-
np
.
array
(
baseline
)[:,
:,
::
-
1
]).
nonzero
())
assert
nonzero
==
nonzero_transform
assert_array_equal
(
ori_img
,
img
)
assert_array_equal
(
np
.
array
(
baseline
),
np
.
array
(
ori_img_pil
))
# test different padding mode
for
mode
in
[
'constant'
,
'edge'
,
'reflect'
,
'symmetric'
]:
kwargs
=
dict
(
size
=
(
500
,
600
),
padding
=
0
,
pad_if_needed
=
True
,
fill
=
0
)
kwargs
[
'padding_mode'
]
=
mode
random
.
seed
(
seed
)
np
.
random
.
seed
(
seed
)
aug
=
[]
aug
.
extend
([
torchvision
.
transforms
.
RandomCrop
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
baseline
=
composed_transform
(
ori_img_pil
)
kwargs
=
dict
(
size
=
(
500
,
600
),
padding
=
0
,
pad_if_needed
=
True
,
pad_val
=
0
)
random
.
seed
(
seed
)
np
.
random
.
seed
(
seed
)
aug
=
[]
aug
.
extend
([
mmcls_transforms
.
RandomCrop
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
results
=
dict
()
results
[
'img'
]
=
ori_img
img
=
composed_transform
(
results
)[
'img'
]
assert
np
.
array
(
img
).
shape
==
(
500
,
600
,
3
)
assert
np
.
array
(
baseline
).
shape
==
(
500
,
600
,
3
)
nonzero
=
len
((
ori_img
-
np
.
array
(
ori_img_pil
)[:,
:,
::
-
1
]).
nonzero
())
nonzero_transform
=
len
(
(
img
-
np
.
array
(
baseline
)[:,
:,
::
-
1
]).
nonzero
())
assert
nonzero
==
nonzero_transform
def
test_randomresizedcrop
():
ori_img
=
mmcv
.
imread
(
osp
.
join
(
osp
.
dirname
(
__file__
),
'../data/color.jpg'
),
'color'
)
ori_img_pil
=
Image
.
open
(
osp
.
join
(
osp
.
dirname
(
__file__
),
'../data/color.jpg'
))
seed
=
random
.
randint
(
0
,
100
)
# test when scale is not of kind (min, max)
with
pytest
.
raises
(
ValueError
):
kwargs
=
dict
(
size
=
(
200
,
300
),
scale
=
(
1.0
,
0.08
),
ratio
=
(
3.
/
4.
,
4.
/
3.
))
aug
=
[]
aug
.
extend
([
mmcls_transforms
.
RandomResizedCrop
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
results
=
dict
()
results
[
'img'
]
=
ori_img
composed_transform
(
results
)[
'img'
]
# test when ratio is not of kind (min, max)
with
pytest
.
raises
(
ValueError
):
kwargs
=
dict
(
size
=
(
200
,
300
),
scale
=
(
0.08
,
1.0
),
ratio
=
(
4.
/
3.
,
3.
/
4.
))
aug
=
[]
aug
.
extend
([
mmcls_transforms
.
RandomResizedCrop
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
results
=
dict
()
results
[
'img'
]
=
ori_img
composed_transform
(
results
)[
'img'
]
# test when efficientnet_style is True and crop_padding < 0
with
pytest
.
raises
(
AssertionError
):
kwargs
=
dict
(
size
=
200
,
efficientnet_style
=
True
,
crop_padding
=-
1
)
aug
=
[]
aug
.
extend
([
mmcls_transforms
.
RandomResizedCrop
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
results
=
dict
()
results
[
'img'
]
=
ori_img
composed_transform
(
results
)[
'img'
]
# test crop size is int
kwargs
=
dict
(
size
=
200
,
scale
=
(
0.08
,
1.0
),
ratio
=
(
3.
/
4.
,
4.
/
3.
))
random
.
seed
(
seed
)
np
.
random
.
seed
(
seed
)
aug
=
[]
aug
.
extend
([
torchvision
.
transforms
.
RandomResizedCrop
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
baseline
=
composed_transform
(
ori_img_pil
)
random
.
seed
(
seed
)
np
.
random
.
seed
(
seed
)
aug
=
[]
aug
.
extend
([
mmcls_transforms
.
RandomResizedCrop
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
# test __repr__()
print
(
composed_transform
)
results
=
dict
()
results
[
'img'
]
=
ori_img
img
=
composed_transform
(
results
)[
'img'
]
assert
np
.
array
(
img
).
shape
==
(
200
,
200
,
3
)
assert
np
.
array
(
baseline
).
shape
==
(
200
,
200
,
3
)
nonzero
=
len
((
ori_img
-
np
.
array
(
ori_img_pil
)[:,
:,
::
-
1
]).
nonzero
())
nonzero_transform
=
len
((
img
-
np
.
array
(
baseline
)[:,
:,
::
-
1
]).
nonzero
())
assert
nonzero
==
nonzero_transform
# test crop size < image size
kwargs
=
dict
(
size
=
(
200
,
300
),
scale
=
(
0.08
,
1.0
),
ratio
=
(
3.
/
4.
,
4.
/
3.
))
random
.
seed
(
seed
)
np
.
random
.
seed
(
seed
)
aug
=
[]
aug
.
extend
([
torchvision
.
transforms
.
RandomResizedCrop
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
baseline
=
composed_transform
(
ori_img_pil
)
random
.
seed
(
seed
)
np
.
random
.
seed
(
seed
)
aug
=
[]
aug
.
extend
([
mmcls_transforms
.
RandomResizedCrop
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
results
=
dict
()
results
[
'img'
]
=
ori_img
img
=
composed_transform
(
results
)[
'img'
]
assert
np
.
array
(
img
).
shape
==
(
200
,
300
,
3
)
assert
np
.
array
(
baseline
).
shape
==
(
200
,
300
,
3
)
nonzero
=
len
((
ori_img
-
np
.
array
(
ori_img_pil
)[:,
:,
::
-
1
]).
nonzero
())
nonzero_transform
=
len
((
img
-
np
.
array
(
baseline
)[:,
:,
::
-
1
]).
nonzero
())
assert
nonzero
==
nonzero_transform
# test crop size < image size when efficientnet_style = True
kwargs
=
dict
(
size
=
200
,
scale
=
(
0.08
,
1.0
),
ratio
=
(
3.
/
4.
,
4.
/
3.
),
efficientnet_style
=
True
)
random
.
seed
(
seed
)
np
.
random
.
seed
(
seed
)
aug
=
[]
aug
.
extend
([
mmcls_transforms
.
RandomResizedCrop
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
results
=
dict
()
results
[
'img'
]
=
ori_img
img
=
composed_transform
(
results
)[
'img'
]
assert
img
.
shape
==
(
200
,
200
,
3
)
# test crop size > image size
kwargs
=
dict
(
size
=
(
600
,
700
),
scale
=
(
0.08
,
1.0
),
ratio
=
(
3.
/
4.
,
4.
/
3.
))
random
.
seed
(
seed
)
np
.
random
.
seed
(
seed
)
aug
=
[]
aug
.
extend
([
torchvision
.
transforms
.
RandomResizedCrop
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
baseline
=
composed_transform
(
ori_img_pil
)
random
.
seed
(
seed
)
np
.
random
.
seed
(
seed
)
aug
=
[]
aug
.
extend
([
mmcls_transforms
.
RandomResizedCrop
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
results
=
dict
()
results
[
'img'
]
=
ori_img
img
=
composed_transform
(
results
)[
'img'
]
assert
np
.
array
(
img
).
shape
==
(
600
,
700
,
3
)
assert
np
.
array
(
baseline
).
shape
==
(
600
,
700
,
3
)
nonzero
=
len
((
ori_img
-
np
.
array
(
ori_img_pil
)[:,
:,
::
-
1
]).
nonzero
())
nonzero_transform
=
len
((
img
-
np
.
array
(
baseline
)[:,
:,
::
-
1
]).
nonzero
())
assert
nonzero
==
nonzero_transform
# test crop size < image size when efficientnet_style = True
kwargs
=
dict
(
size
=
600
,
scale
=
(
0.08
,
1.0
),
ratio
=
(
3.
/
4.
,
4.
/
3.
),
efficientnet_style
=
True
)
random
.
seed
(
seed
)
np
.
random
.
seed
(
seed
)
aug
=
[]
aug
.
extend
([
mmcls_transforms
.
RandomResizedCrop
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
results
=
dict
()
results
[
'img'
]
=
ori_img
img
=
composed_transform
(
results
)[
'img'
]
assert
img
.
shape
==
(
600
,
600
,
3
)
# test cropping the whole image
kwargs
=
dict
(
size
=
(
ori_img
.
shape
[
0
],
ori_img
.
shape
[
1
]),
scale
=
(
1.0
,
2.0
),
ratio
=
(
1.0
,
2.0
))
random
.
seed
(
seed
)
np
.
random
.
seed
(
seed
)
aug
=
[]
aug
.
extend
([
torchvision
.
transforms
.
RandomResizedCrop
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
baseline
=
composed_transform
(
ori_img_pil
)
random
.
seed
(
seed
)
np
.
random
.
seed
(
seed
)
aug
=
[]
aug
.
extend
([
mmcls_transforms
.
RandomResizedCrop
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
results
=
dict
()
results
[
'img'
]
=
ori_img
img
=
composed_transform
(
results
)[
'img'
]
assert
np
.
array
(
img
).
shape
==
(
ori_img
.
shape
[
0
],
ori_img
.
shape
[
1
],
3
)
assert
np
.
array
(
baseline
).
shape
==
(
ori_img
.
shape
[
0
],
ori_img
.
shape
[
1
],
3
)
nonzero
=
len
((
ori_img
-
np
.
array
(
ori_img_pil
)[:,
:,
::
-
1
]).
nonzero
())
nonzero_transform
=
len
((
img
-
np
.
array
(
baseline
)[:,
:,
::
-
1
]).
nonzero
())
assert
nonzero
==
nonzero_transform
# assert_array_equal(ori_img, img)
# assert_array_equal(np.array(ori_img_pil), np.array(baseline))
# test central crop when in_ratio < min(ratio)
kwargs
=
dict
(
size
=
(
ori_img
.
shape
[
0
],
ori_img
.
shape
[
1
]),
scale
=
(
1.0
,
2.0
),
ratio
=
(
2.
,
3.
))
random
.
seed
(
seed
)
np
.
random
.
seed
(
seed
)
aug
=
[]
aug
.
extend
([
torchvision
.
transforms
.
RandomResizedCrop
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
baseline
=
composed_transform
(
ori_img_pil
)
random
.
seed
(
seed
)
np
.
random
.
seed
(
seed
)
aug
=
[]
aug
.
extend
([
mmcls_transforms
.
RandomResizedCrop
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
results
=
dict
()
results
[
'img'
]
=
ori_img
img
=
composed_transform
(
results
)[
'img'
]
assert
np
.
array
(
img
).
shape
==
(
ori_img
.
shape
[
0
],
ori_img
.
shape
[
1
],
3
)
assert
np
.
array
(
baseline
).
shape
==
(
ori_img
.
shape
[
0
],
ori_img
.
shape
[
1
],
3
)
nonzero
=
len
((
ori_img
-
np
.
array
(
ori_img_pil
)[:,
:,
::
-
1
]).
nonzero
())
nonzero_transform
=
len
((
img
-
np
.
array
(
baseline
)[:,
:,
::
-
1
]).
nonzero
())
assert
nonzero
==
nonzero_transform
# test central crop when in_ratio > max(ratio)
kwargs
=
dict
(
size
=
(
ori_img
.
shape
[
0
],
ori_img
.
shape
[
1
]),
scale
=
(
1.0
,
2.0
),
ratio
=
(
3.
/
4.
,
1
))
random
.
seed
(
seed
)
np
.
random
.
seed
(
seed
)
aug
=
[]
aug
.
extend
([
torchvision
.
transforms
.
RandomResizedCrop
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
baseline
=
composed_transform
(
ori_img_pil
)
random
.
seed
(
seed
)
np
.
random
.
seed
(
seed
)
aug
=
[]
aug
.
extend
([
mmcls_transforms
.
RandomResizedCrop
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
results
=
dict
()
results
[
'img'
]
=
ori_img
img
=
composed_transform
(
results
)[
'img'
]
assert
np
.
array
(
img
).
shape
==
(
ori_img
.
shape
[
0
],
ori_img
.
shape
[
1
],
3
)
assert
np
.
array
(
baseline
).
shape
==
(
ori_img
.
shape
[
0
],
ori_img
.
shape
[
1
],
3
)
nonzero
=
len
((
ori_img
-
np
.
array
(
ori_img_pil
)[:,
:,
::
-
1
]).
nonzero
())
nonzero_transform
=
len
((
img
-
np
.
array
(
baseline
)[:,
:,
::
-
1
]).
nonzero
())
assert
nonzero
==
nonzero_transform
# test central crop when max_attempts = 0 and efficientnet_style = True
kwargs
=
dict
(
size
=
200
,
scale
=
(
0.08
,
1.0
),
ratio
=
(
3.
/
4.
,
4.
/
3.
),
efficientnet_style
=
True
,
max_attempts
=
0
,
crop_padding
=
32
)
random
.
seed
(
seed
)
np
.
random
.
seed
(
seed
)
aug
=
[]
aug
.
extend
([
mmcls_transforms
.
RandomResizedCrop
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
results
=
dict
()
results
[
'img'
]
=
ori_img
img
=
composed_transform
(
results
)[
'img'
]
kwargs
=
dict
(
crop_size
=
200
,
efficientnet_style
=
True
,
crop_padding
=
32
)
resize_kwargs
=
dict
(
size
=
200
)
random
.
seed
(
seed
)
np
.
random
.
seed
(
seed
)
aug
=
[]
aug
.
extend
([
mmcls_transforms
.
CenterCrop
(
**
kwargs
)])
aug
.
extend
([
mmcls_transforms
.
Resize
(
**
resize_kwargs
)])
composed_transform
=
Compose
(
aug
)
results
=
dict
()
results
[
'img'
]
=
ori_img
baseline
=
composed_transform
(
results
)[
'img'
]
assert
img
.
shape
==
baseline
.
shape
assert
np
.
equal
(
img
,
baseline
).
all
()
# test central crop when max_attempts = 0 and efficientnet_style = True
kwargs
=
dict
(
size
=
200
,
scale
=
(
0.08
,
1.0
),
ratio
=
(
3.
/
4.
,
4.
/
3.
),
efficientnet_style
=
True
,
max_attempts
=
100
,
min_covered
=
1
)
random
.
seed
(
seed
)
np
.
random
.
seed
(
seed
)
aug
=
[]
aug
.
extend
([
mmcls_transforms
.
RandomResizedCrop
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
results
=
dict
()
results
[
'img'
]
=
ori_img
img
=
composed_transform
(
results
)[
'img'
]
kwargs
=
dict
(
crop_size
=
200
,
efficientnet_style
=
True
,
crop_padding
=
32
)
resize_kwargs
=
dict
(
size
=
200
)
random
.
seed
(
seed
)
np
.
random
.
seed
(
seed
)
aug
=
[]
aug
.
extend
([
mmcls_transforms
.
CenterCrop
(
**
kwargs
)])
aug
.
extend
([
mmcls_transforms
.
Resize
(
**
resize_kwargs
)])
composed_transform
=
Compose
(
aug
)
results
=
dict
()
results
[
'img'
]
=
ori_img
baseline
=
composed_transform
(
results
)[
'img'
]
assert
img
.
shape
==
baseline
.
shape
assert
np
.
equal
(
img
,
baseline
).
all
()
# test different interpolation types
for
mode
in
[
'nearest'
,
'bilinear'
,
'bicubic'
,
'area'
,
'lanczos'
]:
kwargs
=
dict
(
size
=
(
600
,
700
),
scale
=
(
0.08
,
1.0
),
ratio
=
(
3.
/
4.
,
4.
/
3.
),
interpolation
=
mode
)
aug
=
[]
aug
.
extend
([
mmcls_transforms
.
RandomResizedCrop
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
results
=
dict
()
results
[
'img'
]
=
ori_img
img
=
composed_transform
(
results
)[
'img'
]
assert
img
.
shape
==
(
600
,
700
,
3
)
def
test_randomgrayscale
():
# test rgb2gray, return the grayscale image with p>1
in_img
=
np
.
random
.
rand
(
10
,
10
,
3
).
astype
(
np
.
float32
)
kwargs
=
dict
(
gray_prob
=
2
)
aug
=
[]
aug
.
extend
([
mmcls_transforms
.
RandomGrayscale
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
print
(
composed_transform
)
results
=
dict
()
results
[
'img'
]
=
in_img
img
=
composed_transform
(
results
)[
'img'
]
computed_gray
=
(
in_img
[:,
:,
0
]
*
0.299
+
in_img
[:,
:,
1
]
*
0.587
+
in_img
[:,
:,
2
]
*
0.114
)
for
i
in
range
(
img
.
shape
[
2
]):
assert_array_almost_equal
(
img
[:,
:,
i
],
computed_gray
,
decimal
=
4
)
assert
img
.
shape
==
(
10
,
10
,
3
)
# test rgb2gray, return the original image with p=-1
in_img
=
np
.
random
.
rand
(
10
,
10
,
3
).
astype
(
np
.
float32
)
kwargs
=
dict
(
gray_prob
=-
1
)
aug
=
[]
aug
.
extend
([
mmcls_transforms
.
RandomGrayscale
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
results
=
dict
()
results
[
'img'
]
=
in_img
img
=
composed_transform
(
results
)[
'img'
]
assert_array_equal
(
img
,
in_img
)
assert
img
.
shape
==
(
10
,
10
,
3
)
# test image with one channel with our method
# and the function from torchvision
in_img
=
np
.
random
.
rand
(
10
,
10
,
1
).
astype
(
np
.
float32
)
kwargs
=
dict
(
gray_prob
=
2
)
aug
=
[]
aug
.
extend
([
mmcls_transforms
.
RandomGrayscale
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
results
=
dict
()
results
[
'img'
]
=
in_img
img
=
composed_transform
(
results
)[
'img'
]
assert_array_equal
(
img
,
in_img
)
assert
img
.
shape
==
(
10
,
10
,
1
)
in_img_pil
=
Image
.
fromarray
(
in_img
[:,
:,
0
],
mode
=
'L'
)
kwargs
=
dict
(
p
=
2
)
aug
=
[]
aug
.
extend
([
torchvision
.
transforms
.
RandomGrayscale
(
**
kwargs
)])
composed_transform
=
Compose
(
aug
)
img_pil
=
composed_transform
(
in_img_pil
)
assert_array_equal
(
np
.
array
(
img_pil
),
np
.
array
(
in_img_pil
))
assert
np
.
array
(
img_pil
).
shape
==
(
10
,
10
)
def
test_randomflip
():
# test assertion if flip probability is smaller than 0
with
pytest
.
raises
(
AssertionError
):
transform
=
dict
(
type
=
'RandomFlip'
,
flip_prob
=-
1
)
build_from_cfg
(
transform
,
PIPELINES
)
# test assertion if flip probability is larger than 1
with
pytest
.
raises
(
AssertionError
):
transform
=
dict
(
type
=
'RandomFlip'
,
flip_prob
=
2
)
build_from_cfg
(
transform
,
PIPELINES
)
# test assertion if direction is not horizontal and vertical
with
pytest
.
raises
(
AssertionError
):
transform
=
dict
(
type
=
'RandomFlip'
,
direction
=
'random'
)
build_from_cfg
(
transform
,
PIPELINES
)
# test assertion if direction is not lowercase
with
pytest
.
raises
(
AssertionError
):
transform
=
dict
(
type
=
'RandomFlip'
,
direction
=
'Horizontal'
)
build_from_cfg
(
transform
,
PIPELINES
)
# read test image
results
=
dict
()
img
=
mmcv
.
imread
(
osp
.
join
(
osp
.
dirname
(
__file__
),
'../data/color.jpg'
),
'color'
)
original_img
=
copy
.
deepcopy
(
img
)
results
[
'img'
]
=
img
results
[
'img2'
]
=
copy
.
deepcopy
(
img
)
results
[
'img_shape'
]
=
img
.
shape
results
[
'ori_shape'
]
=
img
.
shape
results
[
'img_fields'
]
=
[
'img'
,
'img2'
]
def
reset_results
(
results
,
original_img
):
results
[
'img'
]
=
copy
.
deepcopy
(
original_img
)
results
[
'img2'
]
=
copy
.
deepcopy
(
original_img
)
results
[
'img_shape'
]
=
original_img
.
shape
results
[
'ori_shape'
]
=
original_img
.
shape
return
results
# test RandomFlip when flip_prob is 0
transform
=
dict
(
type
=
'RandomFlip'
,
flip_prob
=
0
)
flip_module
=
build_from_cfg
(
transform
,
PIPELINES
)
results
=
flip_module
(
results
)
assert
np
.
equal
(
results
[
'img'
],
original_img
).
all
()
assert
np
.
equal
(
results
[
'img'
],
results
[
'img2'
]).
all
()
# test RandomFlip when flip_prob is 1
transform
=
dict
(
type
=
'RandomFlip'
,
flip_prob
=
1
)
flip_module
=
build_from_cfg
(
transform
,
PIPELINES
)
results
=
flip_module
(
results
)
assert
np
.
equal
(
results
[
'img'
],
results
[
'img2'
]).
all
()
# compare hotizontal flip with torchvision
transform
=
dict
(
type
=
'RandomFlip'
,
flip_prob
=
1
,
direction
=
'horizontal'
)
flip_module
=
build_from_cfg
(
transform
,
PIPELINES
)
results
=
reset_results
(
results
,
original_img
)
results
=
flip_module
(
results
)
flip_module
=
transforms
.
RandomHorizontalFlip
(
p
=
1
)
pil_img
=
Image
.
fromarray
(
original_img
)
flipped_img
=
flip_module
(
pil_img
)
flipped_img
=
np
.
array
(
flipped_img
)
assert
np
.
equal
(
results
[
'img'
],
results
[
'img2'
]).
all
()
assert
np
.
equal
(
results
[
'img'
],
flipped_img
).
all
()
# compare vertical flip with torchvision
transform
=
dict
(
type
=
'RandomFlip'
,
flip_prob
=
1
,
direction
=
'vertical'
)
flip_module
=
build_from_cfg
(
transform
,
PIPELINES
)
results
=
reset_results
(
results
,
original_img
)
results
=
flip_module
(
results
)
flip_module
=
transforms
.
RandomVerticalFlip
(
p
=
1
)
pil_img
=
Image
.
fromarray
(
original_img
)
flipped_img
=
flip_module
(
pil_img
)
flipped_img
=
np
.
array
(
flipped_img
)
assert
np
.
equal
(
results
[
'img'
],
results
[
'img2'
]).
all
()
assert
np
.
equal
(
results
[
'img'
],
flipped_img
).
all
()
def
test_random_erasing
():
# test erase_prob assertion
with
pytest
.
raises
(
AssertionError
):
cfg
=
dict
(
type
=
'RandomErasing'
,
erase_prob
=-
1.
)
build_from_cfg
(
cfg
,
PIPELINES
)
with
pytest
.
raises
(
AssertionError
):
cfg
=
dict
(
type
=
'RandomErasing'
,
erase_prob
=
1
)
build_from_cfg
(
cfg
,
PIPELINES
)
# test area_ratio assertion
with
pytest
.
raises
(
AssertionError
):
cfg
=
dict
(
type
=
'RandomErasing'
,
min_area_ratio
=-
1.
)
build_from_cfg
(
cfg
,
PIPELINES
)
with
pytest
.
raises
(
AssertionError
):
cfg
=
dict
(
type
=
'RandomErasing'
,
max_area_ratio
=
1
)
build_from_cfg
(
cfg
,
PIPELINES
)
with
pytest
.
raises
(
AssertionError
):
# min_area_ratio should be smaller than max_area_ratio
cfg
=
dict
(
type
=
'RandomErasing'
,
min_area_ratio
=
0.6
,
max_area_ratio
=
0.4
)
build_from_cfg
(
cfg
,
PIPELINES
)
# test aspect_range assertion
with
pytest
.
raises
(
AssertionError
):
cfg
=
dict
(
type
=
'RandomErasing'
,
aspect_range
=
'str'
)
build_from_cfg
(
cfg
,
PIPELINES
)
with
pytest
.
raises
(
AssertionError
):
cfg
=
dict
(
type
=
'RandomErasing'
,
aspect_range
=-
1
)
build_from_cfg
(
cfg
,
PIPELINES
)
with
pytest
.
raises
(
AssertionError
):
# In aspect_range (min, max), min should be smaller than max.
cfg
=
dict
(
type
=
'RandomErasing'
,
aspect_range
=
[
1.6
,
0.6
])
build_from_cfg
(
cfg
,
PIPELINES
)
# test mode assertion
with
pytest
.
raises
(
AssertionError
):
cfg
=
dict
(
type
=
'RandomErasing'
,
mode
=
'unknown'
)
build_from_cfg
(
cfg
,
PIPELINES
)
# test fill_std assertion
with
pytest
.
raises
(
AssertionError
):
cfg
=
dict
(
type
=
'RandomErasing'
,
fill_std
=
'unknown'
)
build_from_cfg
(
cfg
,
PIPELINES
)
# test implicit conversion of aspect_range
cfg
=
dict
(
type
=
'RandomErasing'
,
aspect_range
=
0.5
)
random_erasing
=
build_from_cfg
(
cfg
,
PIPELINES
)
assert
random_erasing
.
aspect_range
==
(
0.5
,
2.
)
cfg
=
dict
(
type
=
'RandomErasing'
,
aspect_range
=
2.
)
random_erasing
=
build_from_cfg
(
cfg
,
PIPELINES
)
assert
random_erasing
.
aspect_range
==
(
0.5
,
2.
)
# test implicit conversion of fill_color
cfg
=
dict
(
type
=
'RandomErasing'
,
fill_color
=
15
)
random_erasing
=
build_from_cfg
(
cfg
,
PIPELINES
)
assert
random_erasing
.
fill_color
==
[
15
,
15
,
15
]
# test implicit conversion of fill_std
cfg
=
dict
(
type
=
'RandomErasing'
,
fill_std
=
0.5
)
random_erasing
=
build_from_cfg
(
cfg
,
PIPELINES
)
assert
random_erasing
.
fill_std
==
[
0.5
,
0.5
,
0.5
]
# test when erase_prob=0.
results
=
construct_toy_data
()
cfg
=
dict
(
type
=
'RandomErasing'
,
erase_prob
=
0.
,
mode
=
'const'
,
fill_color
=
(
255
,
255
,
255
))
random_erasing
=
build_from_cfg
(
cfg
,
PIPELINES
)
results
=
random_erasing
(
results
)
np
.
testing
.
assert_array_equal
(
results
[
'img'
],
results
[
'ori_img'
])
# test mode 'const'
random
.
seed
(
0
)
np
.
random
.
seed
(
0
)
results
=
construct_toy_data
()
cfg
=
dict
(
type
=
'RandomErasing'
,
erase_prob
=
1.
,
mode
=
'const'
,
fill_color
=
(
255
,
255
,
255
))
random_erasing
=
build_from_cfg
(
cfg
,
PIPELINES
)
results
=
random_erasing
(
results
)
expect_out
=
np
.
array
([[
1
,
255
,
3
,
4
],
[
5
,
255
,
7
,
8
],
[
9
,
10
,
11
,
12
]],
dtype
=
np
.
uint8
)
expect_out
=
np
.
stack
([
expect_out
]
*
3
,
axis
=-
1
)
np
.
testing
.
assert_array_equal
(
results
[
'img'
],
expect_out
)
# test mode 'rand' with normal distribution
random
.
seed
(
0
)
np
.
random
.
seed
(
0
)
results
=
construct_toy_data
()
cfg
=
dict
(
type
=
'RandomErasing'
,
erase_prob
=
1.
,
mode
=
'rand'
)
random_erasing
=
build_from_cfg
(
cfg
,
PIPELINES
)
results
=
random_erasing
(
results
)
expect_out
=
results
[
'ori_img'
]
expect_out
[:
2
,
1
]
=
[[
159
,
98
,
76
],
[
14
,
69
,
122
]]
np
.
testing
.
assert_array_equal
(
results
[
'img'
],
expect_out
)
# test mode 'rand' with uniform distribution
random
.
seed
(
0
)
np
.
random
.
seed
(
0
)
results
=
construct_toy_data
()
cfg
=
dict
(
type
=
'RandomErasing'
,
erase_prob
=
1.
,
mode
=
'rand'
,
fill_std
=
(
10
,
255
,
0
))
random_erasing
=
build_from_cfg
(
cfg
,
PIPELINES
)
results
=
random_erasing
(
results
)
expect_out
=
results
[
'ori_img'
]
expect_out
[:
2
,
1
]
=
[[
113
,
255
,
128
],
[
126
,
83
,
128
]]
np
.
testing
.
assert_array_equal
(
results
[
'img'
],
expect_out
)
def
test_color_jitter
():
# read test image
results
=
dict
()
img
=
mmcv
.
imread
(
osp
.
join
(
osp
.
dirname
(
__file__
),
'../data/color.jpg'
),
'color'
)
original_img
=
copy
.
deepcopy
(
img
)
results
[
'img'
]
=
img
results
[
'img2'
]
=
copy
.
deepcopy
(
img
)
results
[
'img_shape'
]
=
img
.
shape
results
[
'ori_shape'
]
=
img
.
shape
results
[
'img_fields'
]
=
[
'img'
,
'img2'
]
def
reset_results
(
results
,
original_img
):
results
[
'img'
]
=
copy
.
deepcopy
(
original_img
)
results
[
'img2'
]
=
copy
.
deepcopy
(
original_img
)
results
[
'img_shape'
]
=
original_img
.
shape
results
[
'ori_shape'
]
=
original_img
.
shape
return
results
transform
=
dict
(
type
=
'ColorJitter'
,
brightness
=
0.
,
contrast
=
0.
,
saturation
=
0.
)
colorjitter_module
=
build_from_cfg
(
transform
,
PIPELINES
)
results
=
colorjitter_module
(
results
)
assert
np
.
equal
(
results
[
'img'
],
original_img
).
all
()
assert
np
.
equal
(
results
[
'img'
],
results
[
'img2'
]).
all
()
results
=
reset_results
(
results
,
original_img
)
transform
=
dict
(
type
=
'ColorJitter'
,
brightness
=
0.3
,
contrast
=
0.3
,
saturation
=
0.3
)
colorjitter_module
=
build_from_cfg
(
transform
,
PIPELINES
)
results
=
colorjitter_module
(
results
)
assert
not
np
.
equal
(
results
[
'img'
],
original_img
).
all
()
def
test_lighting
():
# test assertion if eigval or eigvec is wrong type or length
with
pytest
.
raises
(
AssertionError
):
transform
=
dict
(
type
=
'Lighting'
,
eigval
=
1
,
eigvec
=
[[
1
,
0
,
0
]])
build_from_cfg
(
transform
,
PIPELINES
)
with
pytest
.
raises
(
AssertionError
):
transform
=
dict
(
type
=
'Lighting'
,
eigval
=
[
1
],
eigvec
=
[
1
,
0
,
0
])
build_from_cfg
(
transform
,
PIPELINES
)
with
pytest
.
raises
(
AssertionError
):
transform
=
dict
(
type
=
'Lighting'
,
eigval
=
[
1
,
2
],
eigvec
=
[[
1
,
0
,
0
],
[
0
,
1
]])
build_from_cfg
(
transform
,
PIPELINES
)
# read test image
results
=
dict
()
img
=
mmcv
.
imread
(
osp
.
join
(
osp
.
dirname
(
__file__
),
'../data/color.jpg'
),
'color'
)
original_img
=
copy
.
deepcopy
(
img
)
results
[
'img'
]
=
img
results
[
'img2'
]
=
copy
.
deepcopy
(
img
)
results
[
'img_shape'
]
=
img
.
shape
results
[
'ori_shape'
]
=
img
.
shape
results
[
'img_fields'
]
=
[
'img'
,
'img2'
]
def
reset_results
(
results
,
original_img
):
results
[
'img'
]
=
copy
.
deepcopy
(
original_img
)
results
[
'img2'
]
=
copy
.
deepcopy
(
original_img
)
results
[
'img_shape'
]
=
original_img
.
shape
results
[
'ori_shape'
]
=
original_img
.
shape
return
results
eigval
=
[
0.2175
,
0.0188
,
0.0045
]
eigvec
=
[[
-
0.5675
,
0.7192
,
0.4009
],
[
-
0.5808
,
-
0.0045
,
-
0.8140
],
[
-
0.5836
,
-
0.6948
,
0.4203
]]
transform
=
dict
(
type
=
'Lighting'
,
eigval
=
eigval
,
eigvec
=
eigvec
)
lightening_module
=
build_from_cfg
(
transform
,
PIPELINES
)
results
=
lightening_module
(
results
)
assert
not
np
.
equal
(
results
[
'img'
],
results
[
'img2'
]).
all
()
assert
results
[
'img'
].
dtype
==
float
assert
results
[
'img2'
].
dtype
==
float
results
=
reset_results
(
results
,
original_img
)
transform
=
dict
(
type
=
'Lighting'
,
eigval
=
eigval
,
eigvec
=
eigvec
,
alphastd
=
0.
,
to_rgb
=
False
)
lightening_module
=
build_from_cfg
(
transform
,
PIPELINES
)
results
=
lightening_module
(
results
)
assert
np
.
equal
(
results
[
'img'
],
original_img
).
all
()
assert
np
.
equal
(
results
[
'img'
],
results
[
'img2'
]).
all
()
assert
results
[
'img'
].
dtype
==
float
assert
results
[
'img2'
].
dtype
==
float
def
test_albu_transform
():
results
=
dict
(
img_prefix
=
osp
.
join
(
osp
.
dirname
(
__file__
),
'../data'
),
img_info
=
dict
(
filename
=
'color.jpg'
))
# Define simple pipeline
load
=
dict
(
type
=
'LoadImageFromFile'
)
load
=
build_from_cfg
(
load
,
PIPELINES
)
albu_transform
=
dict
(
type
=
'Albu'
,
transforms
=
[
dict
(
type
=
'ChannelShuffle'
,
p
=
1
)])
albu_transform
=
build_from_cfg
(
albu_transform
,
PIPELINES
)
normalize
=
dict
(
type
=
'Normalize'
,
mean
=
[
0
]
*
3
,
std
=
[
0
]
*
3
,
to_rgb
=
True
)
normalize
=
build_from_cfg
(
normalize
,
PIPELINES
)
# Execute transforms
results
=
load
(
results
)
results
=
albu_transform
(
results
)
results
=
normalize
(
results
)
assert
results
[
'img'
].
dtype
==
np
.
float32
openmmlab_test/mmclassification-speed-benchmark/tools/analyze_results.py
0 → 100644
View file @
85529f35
import
argparse
import
os.path
as
osp
import
mmcv
from
mmcv
import
DictAction
from
mmcls.datasets
import
build_dataset
from
mmcls.models
import
build_classifier
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'MMCls evaluate prediction success/fail'
)
parser
.
add_argument
(
'config'
,
help
=
'test config file path'
)
parser
.
add_argument
(
'result'
,
help
=
'test result json/pkl file'
)
parser
.
add_argument
(
'--out-dir'
,
help
=
'dir to store output files'
)
parser
.
add_argument
(
'--topk'
,
default
=
20
,
type
=
int
,
help
=
'Number of images to select for success/fail'
)
parser
.
add_argument
(
'--options'
,
nargs
=
'+'
,
action
=
DictAction
,
help
=
'override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file.'
)
args
=
parser
.
parse_args
()
return
args
def
save_imgs
(
result_dir
,
folder_name
,
results
,
model
):
full_dir
=
osp
.
join
(
result_dir
,
folder_name
)
mmcv
.
mkdir_or_exist
(
full_dir
)
mmcv
.
dump
(
results
,
osp
.
join
(
full_dir
,
folder_name
+
'.json'
))
# save imgs
show_keys
=
[
'pred_score'
,
'pred_class'
,
'gt_class'
]
for
result
in
results
:
result_show
=
dict
((
k
,
v
)
for
k
,
v
in
result
.
items
()
if
k
in
show_keys
)
outfile
=
osp
.
join
(
full_dir
,
osp
.
basename
(
result
[
'filename'
]))
model
.
show_result
(
result
[
'filename'
],
result_show
,
out_file
=
outfile
)
def
main
():
args
=
parse_args
()
cfg
=
mmcv
.
Config
.
fromfile
(
args
.
config
)
if
args
.
options
is
not
None
:
cfg
.
merge_from_dict
(
args
.
options
)
model
=
build_classifier
(
cfg
.
model
)
# build the dataloader
dataset
=
build_dataset
(
cfg
.
data
.
test
)
filenames
=
list
()
for
info
in
dataset
.
data_infos
:
if
info
[
'img_prefix'
]
is
not
None
:
filename
=
osp
.
join
(
info
[
'img_prefix'
],
info
[
'img_info'
][
'filename'
])
else
:
filename
=
info
[
'img_info'
][
'filename'
]
filenames
.
append
(
filename
)
gt_labels
=
list
(
dataset
.
get_gt_labels
())
gt_classes
=
[
dataset
.
CLASSES
[
x
]
for
x
in
gt_labels
]
# load test results
outputs
=
mmcv
.
load
(
args
.
result
)
outputs
[
'filename'
]
=
filenames
outputs
[
'gt_label'
]
=
gt_labels
outputs
[
'gt_class'
]
=
gt_classes
outputs_list
=
list
()
for
i
in
range
(
len
(
gt_labels
)):
output
=
dict
()
for
k
in
outputs
.
keys
():
output
[
k
]
=
outputs
[
k
][
i
]
outputs_list
.
append
(
output
)
# sort result
outputs_list
=
sorted
(
outputs_list
,
key
=
lambda
x
:
x
[
'pred_score'
])
success
=
list
()
fail
=
list
()
for
output
in
outputs_list
:
if
output
[
'pred_label'
]
==
output
[
'gt_label'
]:
success
.
append
(
output
)
else
:
fail
.
append
(
output
)
success
=
success
[:
args
.
topk
]
fail
=
fail
[:
args
.
topk
]
save_imgs
(
args
.
out_dir
,
'success'
,
success
,
model
)
save_imgs
(
args
.
out_dir
,
'fail'
,
fail
,
model
)
if
__name__
==
'__main__'
:
main
()
openmmlab_test/mmclassification-speed-benchmark/tools/benchmark_regression.py
0 → 100644
View file @
85529f35
import
argparse
import
copy
import
os
import
os.path
as
osp
import
time
import
mmcv
import
torch
from
mmcv
import
Config
,
DictAction
from
mmcv.runner
import
get_dist_info
,
init_dist
from
mmcls
import
__version__
from
mmcls.apis
import
set_random_seed
,
train_model
from
mmcls.datasets
import
build_dataset
from
mmcls.models
import
build_classifier
from
mmcls.utils
import
collect_env
,
get_root_logger
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'Benchmark Regression on mulit models'
)
parser
.
add_argument
(
'--configs'
,
nargs
=
'+'
,
help
=
'train config files path'
)
parser
.
add_argument
(
'--work-dir'
,
help
=
'the dir to save logs and models'
)
parser
.
add_argument
(
'--epochs'
,
type
=
int
,
default
=
0
,
help
=
'how many epochs to train, if 0, use config setting.'
)
parser
.
add_argument
(
'--no-validate'
,
action
=
'store_true'
,
help
=
'whether not to evaluate the checkpoint during training'
)
group_gpus
=
parser
.
add_mutually_exclusive_group
()
group_gpus
.
add_argument
(
'--device'
,
help
=
'device used for training'
)
group_gpus
.
add_argument
(
'--gpus'
,
type
=
int
,
help
=
'number of gpus to use '
'(only applicable to non-distributed training)'
)
group_gpus
.
add_argument
(
'--gpu-ids'
,
type
=
int
,
nargs
=
'+'
,
help
=
'ids of gpus to use '
'(only applicable to non-distributed training)'
)
parser
.
add_argument
(
'--seed'
,
type
=
int
,
default
=
None
,
help
=
'random seed'
)
parser
.
add_argument
(
'--deterministic'
,
action
=
'store_true'
,
help
=
'whether to set deterministic options for CUDNN backend.'
)
parser
.
add_argument
(
'--options'
,
nargs
=
'+'
,
action
=
DictAction
,
help
=
'arguments in dict'
)
parser
.
add_argument
(
'--launcher'
,
choices
=
[
'none'
,
'pytorch'
,
'slurm'
,
'mpi'
],
default
=
'none'
,
help
=
'job launcher'
)
parser
.
add_argument
(
'--local_rank'
,
type
=
int
,
default
=
0
)
args
=
parser
.
parse_args
()
if
'LOCAL_RANK'
not
in
os
.
environ
:
os
.
environ
[
'LOCAL_RANK'
]
=
str
(
args
.
local_rank
)
return
args
def
main
():
args
=
parse_args
()
dist_inited
=
False
for
config
in
args
.
configs
:
cfg
=
Config
.
fromfile
(
config
)
if
args
.
options
is
not
None
:
cfg
.
merge_from_dict
(
args
.
options
)
# set cudnn_benchmark
if
cfg
.
get
(
'cudnn_benchmark'
,
False
):
torch
.
backends
.
cudnn
.
benchmark
=
True
if
args
.
work_dir
is
not
None
:
# update configs according to CLI args if args.work_dir is not None
work_dir_root
=
args
.
work_dir
else
:
# use config filename as default work_dir if cfg.work_dir is None
work_dir_root
=
'./work_dirs/benchmark_regression'
cfg
.
work_dir
=
osp
.
join
(
work_dir_root
,
osp
.
splitext
(
osp
.
basename
(
config
))[
0
])
if
args
.
gpu_ids
is
not
None
:
cfg
.
gpu_ids
=
args
.
gpu_ids
else
:
cfg
.
gpu_ids
=
range
(
1
)
if
args
.
gpus
is
None
else
range
(
args
.
gpus
)
if
args
.
epochs
>
0
:
cfg
.
runner
.
max_epochs
=
args
.
epochs
# init distributed env first, since logger depends on the dist info.
if
args
.
launcher
==
'none'
:
distributed
=
False
elif
not
dist_inited
:
distributed
=
True
init_dist
(
args
.
launcher
,
**
cfg
.
dist_params
)
_
,
world_size
=
get_dist_info
()
cfg
.
gpu_ids
=
range
(
world_size
)
dist_inited
=
True
# create work_dir
mmcv
.
mkdir_or_exist
(
osp
.
abspath
(
cfg
.
work_dir
))
# dump config
cfg
.
dump
(
osp
.
join
(
cfg
.
work_dir
,
osp
.
basename
(
config
)))
# init the logger before other steps
timestamp
=
time
.
strftime
(
'%Y%m%d_%H%M%S'
,
time
.
localtime
())
log_file
=
osp
.
join
(
cfg
.
work_dir
,
f
'
{
timestamp
}
.log'
)
logger
=
get_root_logger
(
log_file
=
log_file
,
log_level
=
cfg
.
log_level
)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta
=
dict
()
# log env info
env_info_dict
=
collect_env
()
env_info
=
'
\n
'
.
join
([(
f
'
{
k
}
:
{
v
}
'
)
for
k
,
v
in
env_info_dict
.
items
()])
dash_line
=
'-'
*
60
+
'
\n
'
logger
.
info
(
'Environment info:
\n
'
+
dash_line
+
env_info
+
'
\n
'
+
dash_line
)
meta
[
'env_info'
]
=
env_info
# log some basic info
logger
.
info
(
f
'Distributed training:
{
distributed
}
'
)
logger
.
info
(
f
'Config:
\n
{
cfg
.
pretty_text
}
'
)
# set random seeds
if
args
.
seed
is
not
None
:
logger
.
info
(
f
'Set random seed to
{
args
.
seed
}
, '
f
'deterministic:
{
args
.
deterministic
}
'
)
set_random_seed
(
args
.
seed
,
deterministic
=
args
.
deterministic
)
cfg
.
seed
=
args
.
seed
meta
[
'seed'
]
=
args
.
seed
model
=
build_classifier
(
cfg
.
model
)
model
.
init_weights
()
datasets
=
[
build_dataset
(
cfg
.
data
.
train
)]
if
len
(
cfg
.
workflow
)
==
2
:
val_dataset
=
copy
.
deepcopy
(
cfg
.
data
.
val
)
val_dataset
.
pipeline
=
cfg
.
data
.
train
.
pipeline
datasets
.
append
(
build_dataset
(
val_dataset
))
if
cfg
.
checkpoint_config
is
not
None
:
# save mmcls version, config file content and class names in
# checkpoints as meta data
cfg
.
checkpoint_config
.
meta
=
dict
(
mmcls_version
=
__version__
,
config
=
cfg
.
pretty_text
,
CLASSES
=
datasets
[
0
].
CLASSES
)
# add an attribute for visualization convenience
train_model
(
model
,
datasets
,
cfg
,
distributed
=
distributed
,
validate
=
(
not
args
.
no_validate
),
timestamp
=
timestamp
,
device
=
'cpu'
if
args
.
device
==
'cpu'
else
'cuda'
,
meta
=
meta
)
if
__name__
==
'__main__'
:
main
()
openmmlab_test/mmclassification-speed-benchmark/tools/convert_models/mobilenetv2_to_mmcls.py
0 → 100644
View file @
85529f35
import
argparse
from
collections
import
OrderedDict
import
torch
def
convert_conv1
(
model_key
,
model_weight
,
state_dict
,
converted_names
):
if
model_key
.
find
(
'features.0.0'
)
>=
0
:
new_key
=
model_key
.
replace
(
'features.0.0'
,
'backbone.conv1.conv'
)
else
:
new_key
=
model_key
.
replace
(
'features.0.1'
,
'backbone.conv1.bn'
)
state_dict
[
new_key
]
=
model_weight
converted_names
.
add
(
model_key
)
print
(
f
'Convert
{
model_key
}
to
{
new_key
}
'
)
def
convert_conv5
(
model_key
,
model_weight
,
state_dict
,
converted_names
):
if
model_key
.
find
(
'features.18.0'
)
>=
0
:
new_key
=
model_key
.
replace
(
'features.18.0'
,
'backbone.conv2.conv'
)
else
:
new_key
=
model_key
.
replace
(
'features.18.1'
,
'backbone.conv2.bn'
)
state_dict
[
new_key
]
=
model_weight
converted_names
.
add
(
model_key
)
print
(
f
'Convert
{
model_key
}
to
{
new_key
}
'
)
def
convert_head
(
model_key
,
model_weight
,
state_dict
,
converted_names
):
new_key
=
model_key
.
replace
(
'classifier.1'
,
'head.fc'
)
state_dict
[
new_key
]
=
model_weight
converted_names
.
add
(
model_key
)
print
(
f
'Convert
{
model_key
}
to
{
new_key
}
'
)
def
convert_block
(
model_key
,
model_weight
,
state_dict
,
converted_names
):
split_keys
=
model_key
.
split
(
'.'
)
layer_id
=
int
(
split_keys
[
1
])
new_layer_id
=
0
sub_id
=
0
if
layer_id
==
1
:
new_layer_id
=
1
sub_id
=
0
elif
layer_id
in
range
(
2
,
4
):
new_layer_id
=
2
sub_id
=
layer_id
-
2
elif
layer_id
in
range
(
4
,
7
):
new_layer_id
=
3
sub_id
=
layer_id
-
4
elif
layer_id
in
range
(
7
,
11
):
new_layer_id
=
4
sub_id
=
layer_id
-
7
elif
layer_id
in
range
(
11
,
14
):
new_layer_id
=
5
sub_id
=
layer_id
-
11
elif
layer_id
in
range
(
14
,
17
):
new_layer_id
=
6
sub_id
=
layer_id
-
14
elif
layer_id
==
17
:
new_layer_id
=
7
sub_id
=
0
new_key
=
model_key
.
replace
(
f
'features.
{
layer_id
}
'
,
f
'backbone.layer
{
new_layer_id
}
.
{
sub_id
}
'
)
if
new_layer_id
==
1
:
if
new_key
.
find
(
'conv.0.0'
)
>=
0
:
new_key
=
new_key
.
replace
(
'conv.0.0'
,
'conv.0.conv'
)
elif
new_key
.
find
(
'conv.0.1'
)
>=
0
:
new_key
=
new_key
.
replace
(
'conv.0.1'
,
'conv.0.bn'
)
elif
new_key
.
find
(
'conv.1'
)
>=
0
:
new_key
=
new_key
.
replace
(
'conv.1'
,
'conv.1.conv'
)
elif
new_key
.
find
(
'conv.2'
)
>=
0
:
new_key
=
new_key
.
replace
(
'conv.2'
,
'conv.1.bn'
)
else
:
raise
ValueError
(
f
'Unsupported conversion of key
{
model_key
}
'
)
else
:
if
new_key
.
find
(
'conv.0.0'
)
>=
0
:
new_key
=
new_key
.
replace
(
'conv.0.0'
,
'conv.0.conv'
)
elif
new_key
.
find
(
'conv.0.1'
)
>=
0
:
new_key
=
new_key
.
replace
(
'conv.0.1'
,
'conv.0.bn'
)
elif
new_key
.
find
(
'conv.1.0'
)
>=
0
:
new_key
=
new_key
.
replace
(
'conv.1.0'
,
'conv.1.conv'
)
elif
new_key
.
find
(
'conv.1.1'
)
>=
0
:
new_key
=
new_key
.
replace
(
'conv.1.1'
,
'conv.1.bn'
)
elif
new_key
.
find
(
'conv.2'
)
>=
0
:
new_key
=
new_key
.
replace
(
'conv.2'
,
'conv.2.conv'
)
elif
new_key
.
find
(
'conv.3'
)
>=
0
:
new_key
=
new_key
.
replace
(
'conv.3'
,
'conv.2.bn'
)
else
:
raise
ValueError
(
f
'Unsupported conversion of key
{
model_key
}
'
)
print
(
f
'Convert
{
model_key
}
to
{
new_key
}
'
)
state_dict
[
new_key
]
=
model_weight
converted_names
.
add
(
model_key
)
def
convert
(
src
,
dst
):
"""Convert keys in torchvision pretrained MobileNetV2 models to mmcls
style."""
# load pytorch model
blobs
=
torch
.
load
(
src
,
map_location
=
'cpu'
)
# convert to pytorch style
state_dict
=
OrderedDict
()
converted_names
=
set
()
for
key
,
weight
in
blobs
.
items
():
if
'features.0'
in
key
:
convert_conv1
(
key
,
weight
,
state_dict
,
converted_names
)
elif
'classifier'
in
key
:
convert_head
(
key
,
weight
,
state_dict
,
converted_names
)
elif
'features.18'
in
key
:
convert_conv5
(
key
,
weight
,
state_dict
,
converted_names
)
else
:
convert_block
(
key
,
weight
,
state_dict
,
converted_names
)
# check if all layers are converted
for
key
in
blobs
:
if
key
not
in
converted_names
:
print
(
f
'not converted:
{
key
}
'
)
# save checkpoint
checkpoint
=
dict
()
checkpoint
[
'state_dict'
]
=
state_dict
torch
.
save
(
checkpoint
,
dst
)
def
main
():
parser
=
argparse
.
ArgumentParser
(
description
=
'Convert model keys'
)
parser
.
add_argument
(
'src'
,
help
=
'src detectron model path'
)
parser
.
add_argument
(
'dst'
,
help
=
'save path'
)
args
=
parser
.
parse_args
()
convert
(
args
.
src
,
args
.
dst
)
if
__name__
==
'__main__'
:
main
()
openmmlab_test/mmclassification-speed-benchmark/tools/convert_models/shufflenetv2_to_mmcls.py
0 → 100644
View file @
85529f35
import
argparse
from
collections
import
OrderedDict
import
torch
def
convert_conv1
(
model_key
,
model_weight
,
state_dict
,
converted_names
):
if
model_key
.
find
(
'conv1.0'
)
>=
0
:
new_key
=
model_key
.
replace
(
'conv1.0'
,
'backbone.conv1.conv'
)
else
:
new_key
=
model_key
.
replace
(
'conv1.1'
,
'backbone.conv1.bn'
)
state_dict
[
new_key
]
=
model_weight
converted_names
.
add
(
model_key
)
print
(
f
'Convert
{
model_key
}
to
{
new_key
}
'
)
def
convert_conv5
(
model_key
,
model_weight
,
state_dict
,
converted_names
):
if
model_key
.
find
(
'conv5.0'
)
>=
0
:
new_key
=
model_key
.
replace
(
'conv5.0'
,
'backbone.layers.3.conv'
)
else
:
new_key
=
model_key
.
replace
(
'conv5.1'
,
'backbone.layers.3.bn'
)
state_dict
[
new_key
]
=
model_weight
converted_names
.
add
(
model_key
)
print
(
f
'Convert
{
model_key
}
to
{
new_key
}
'
)
def
convert_head
(
model_key
,
model_weight
,
state_dict
,
converted_names
):
new_key
=
model_key
.
replace
(
'fc'
,
'head.fc'
)
state_dict
[
new_key
]
=
model_weight
converted_names
.
add
(
model_key
)
print
(
f
'Convert
{
model_key
}
to
{
new_key
}
'
)
def
convert_block
(
model_key
,
model_weight
,
state_dict
,
converted_names
):
split_keys
=
model_key
.
split
(
'.'
)
layer
,
block
,
branch
=
split_keys
[:
3
]
layer_id
=
int
(
layer
[
-
1
])
-
2
new_key
=
model_key
.
replace
(
layer
,
f
'backbone.layers.
{
layer_id
}
'
)
if
branch
==
'branch1'
:
if
new_key
.
find
(
'branch1.0'
)
>=
0
:
new_key
=
new_key
.
replace
(
'branch1.0'
,
'branch1.0.conv'
)
elif
new_key
.
find
(
'branch1.1'
)
>=
0
:
new_key
=
new_key
.
replace
(
'branch1.1'
,
'branch1.0.bn'
)
elif
new_key
.
find
(
'branch1.2'
)
>=
0
:
new_key
=
new_key
.
replace
(
'branch1.2'
,
'branch1.1.conv'
)
elif
new_key
.
find
(
'branch1.3'
)
>=
0
:
new_key
=
new_key
.
replace
(
'branch1.3'
,
'branch1.1.bn'
)
elif
branch
==
'branch2'
:
if
new_key
.
find
(
'branch2.0'
)
>=
0
:
new_key
=
new_key
.
replace
(
'branch2.0'
,
'branch2.0.conv'
)
elif
new_key
.
find
(
'branch2.1'
)
>=
0
:
new_key
=
new_key
.
replace
(
'branch2.1'
,
'branch2.0.bn'
)
elif
new_key
.
find
(
'branch2.3'
)
>=
0
:
new_key
=
new_key
.
replace
(
'branch2.3'
,
'branch2.1.conv'
)
elif
new_key
.
find
(
'branch2.4'
)
>=
0
:
new_key
=
new_key
.
replace
(
'branch2.4'
,
'branch2.1.bn'
)
elif
new_key
.
find
(
'branch2.5'
)
>=
0
:
new_key
=
new_key
.
replace
(
'branch2.5'
,
'branch2.2.conv'
)
elif
new_key
.
find
(
'branch2.6'
)
>=
0
:
new_key
=
new_key
.
replace
(
'branch2.6'
,
'branch2.2.bn'
)
else
:
raise
ValueError
(
f
'Unsupported conversion of key
{
model_key
}
'
)
else
:
raise
ValueError
(
f
'Unsupported conversion of key
{
model_key
}
'
)
print
(
f
'Convert
{
model_key
}
to
{
new_key
}
'
)
state_dict
[
new_key
]
=
model_weight
converted_names
.
add
(
model_key
)
def
convert
(
src
,
dst
):
"""Convert keys in torchvision pretrained ShuffleNetV2 models to mmcls
style."""
# load pytorch model
blobs
=
torch
.
load
(
src
,
map_location
=
'cpu'
)
# convert to pytorch style
state_dict
=
OrderedDict
()
converted_names
=
set
()
for
key
,
weight
in
blobs
.
items
():
if
'conv1'
in
key
:
convert_conv1
(
key
,
weight
,
state_dict
,
converted_names
)
elif
'fc'
in
key
:
convert_head
(
key
,
weight
,
state_dict
,
converted_names
)
elif
key
.
startswith
(
's'
):
convert_block
(
key
,
weight
,
state_dict
,
converted_names
)
elif
'conv5'
in
key
:
convert_conv5
(
key
,
weight
,
state_dict
,
converted_names
)
# check if all layers are converted
for
key
in
blobs
:
if
key
not
in
converted_names
:
print
(
f
'not converted:
{
key
}
'
)
# save checkpoint
checkpoint
=
dict
()
checkpoint
[
'state_dict'
]
=
state_dict
torch
.
save
(
checkpoint
,
dst
)
def
main
():
parser
=
argparse
.
ArgumentParser
(
description
=
'Convert model keys'
)
parser
.
add_argument
(
'src'
,
help
=
'src detectron model path'
)
parser
.
add_argument
(
'dst'
,
help
=
'save path'
)
args
=
parser
.
parse_args
()
convert
(
args
.
src
,
args
.
dst
)
if
__name__
==
'__main__'
:
main
()
openmmlab_test/mmclassification-speed-benchmark/tools/convert_models/vgg_to_mmcls.py
0 → 100644
View file @
85529f35
import
argparse
import
os
from
collections
import
OrderedDict
import
torch
def
get_layer_maps
(
layer_num
,
with_bn
):
layer_maps
=
{
'conv'
:
{},
'bn'
:
{}}
if
with_bn
:
if
layer_num
==
11
:
layer_idxs
=
[
0
,
4
,
8
,
11
,
15
,
18
,
22
,
25
]
elif
layer_num
==
13
:
layer_idxs
=
[
0
,
3
,
7
,
10
,
14
,
17
,
21
,
24
,
28
,
31
]
elif
layer_num
==
16
:
layer_idxs
=
[
0
,
3
,
7
,
10
,
14
,
17
,
20
,
24
,
27
,
30
,
34
,
37
,
40
]
elif
layer_num
==
19
:
layer_idxs
=
[
0
,
3
,
7
,
10
,
14
,
17
,
20
,
23
,
27
,
30
,
33
,
36
,
40
,
43
,
46
,
49
]
else
:
raise
ValueError
(
f
'Invalid number of layers:
{
layer_num
}
'
)
for
i
,
layer_idx
in
enumerate
(
layer_idxs
):
if
i
==
0
:
new_layer_idx
=
layer_idx
else
:
new_layer_idx
+=
int
((
layer_idx
-
layer_idxs
[
i
-
1
])
/
2
)
layer_maps
[
'conv'
][
layer_idx
]
=
new_layer_idx
layer_maps
[
'bn'
][
layer_idx
+
1
]
=
new_layer_idx
else
:
if
layer_num
==
11
:
layer_idxs
=
[
0
,
3
,
6
,
8
,
11
,
13
,
16
,
18
]
new_layer_idxs
=
[
0
,
2
,
4
,
5
,
7
,
8
,
10
,
11
]
elif
layer_num
==
13
:
layer_idxs
=
[
0
,
2
,
5
,
7
,
10
,
12
,
15
,
17
,
20
,
22
]
new_layer_idxs
=
[
0
,
1
,
3
,
4
,
6
,
7
,
9
,
10
,
12
,
13
]
elif
layer_num
==
16
:
layer_idxs
=
[
0
,
2
,
5
,
7
,
10
,
12
,
14
,
17
,
19
,
21
,
24
,
26
,
28
]
new_layer_idxs
=
[
0
,
1
,
3
,
4
,
6
,
7
,
8
,
10
,
11
,
12
,
14
,
15
,
16
]
elif
layer_num
==
19
:
layer_idxs
=
[
0
,
2
,
5
,
7
,
10
,
12
,
14
,
16
,
19
,
21
,
23
,
25
,
28
,
30
,
32
,
34
]
new_layer_idxs
=
[
0
,
1
,
3
,
4
,
6
,
7
,
8
,
9
,
11
,
12
,
13
,
14
,
16
,
17
,
18
,
19
]
else
:
raise
ValueError
(
f
'Invalid number of layers:
{
layer_num
}
'
)
layer_maps
[
'conv'
]
=
{
layer_idx
:
new_layer_idx
for
layer_idx
,
new_layer_idx
in
zip
(
layer_idxs
,
new_layer_idxs
)
}
return
layer_maps
def
convert
(
src
,
dst
,
layer_num
,
with_bn
=
False
):
"""Convert keys in torchvision pretrained VGG models to mmcls style."""
# load pytorch model
assert
os
.
path
.
isfile
(
src
),
f
'no checkpoint found at
{
src
}
'
blobs
=
torch
.
load
(
src
,
map_location
=
'cpu'
)
# convert to pytorch style
state_dict
=
OrderedDict
()
layer_maps
=
get_layer_maps
(
layer_num
,
with_bn
)
prefix
=
'backbone'
delimiter
=
'.'
for
key
,
weight
in
blobs
.
items
():
if
'features'
in
key
:
module
,
layer_idx
,
weight_type
=
key
.
split
(
delimiter
)
new_key
=
delimiter
.
join
([
prefix
,
key
])
layer_idx
=
int
(
layer_idx
)
for
layer_key
,
maps
in
layer_maps
.
items
():
if
layer_idx
in
maps
:
new_layer_idx
=
maps
[
layer_idx
]
new_key
=
delimiter
.
join
([
prefix
,
'features'
,
str
(
new_layer_idx
),
layer_key
,
weight_type
])
state_dict
[
new_key
]
=
weight
print
(
f
'Convert
{
key
}
to
{
new_key
}
'
)
elif
'classifier'
in
key
:
new_key
=
delimiter
.
join
([
prefix
,
key
])
state_dict
[
new_key
]
=
weight
print
(
f
'Convert
{
key
}
to
{
new_key
}
'
)
else
:
state_dict
[
key
]
=
weight
# save checkpoint
checkpoint
=
dict
()
checkpoint
[
'state_dict'
]
=
state_dict
torch
.
save
(
checkpoint
,
dst
)
def
main
():
parser
=
argparse
.
ArgumentParser
(
description
=
'Convert model keys'
)
parser
.
add_argument
(
'src'
,
help
=
'src torchvision model path'
)
parser
.
add_argument
(
'dst'
,
help
=
'save path'
)
parser
.
add_argument
(
'--bn'
,
action
=
'store_true'
,
help
=
'whether original vgg has BN'
)
parser
.
add_argument
(
'--layer_num'
,
type
=
int
,
choices
=
[
11
,
13
,
16
,
19
],
default
=
11
,
help
=
'number of VGG layers'
)
args
=
parser
.
parse_args
()
convert
(
args
.
src
,
args
.
dst
,
layer_num
=
args
.
layer_num
,
with_bn
=
args
.
bn
)
if
__name__
==
'__main__'
:
main
()
openmmlab_test/mmclassification-speed-benchmark/tools/deployment/mmcls2torchserve.py
0 → 100644
View file @
85529f35
from
argparse
import
ArgumentParser
,
Namespace
from
pathlib
import
Path
from
tempfile
import
TemporaryDirectory
import
mmcv
try
:
from
model_archiver.model_packaging
import
package_model
from
model_archiver.model_packaging_utils
import
ModelExportUtils
except
ImportError
:
package_model
=
None
def
mmcls2torchserve
(
config_file
:
str
,
checkpoint_file
:
str
,
output_folder
:
str
,
model_name
:
str
,
model_version
:
str
=
'1.0'
,
force
:
bool
=
False
,
):
"""Converts mmclassification model (config + checkpoint) to TorchServe
`.mar`.
Args:
config_file:
In MMClassification config format.
The contents vary for each task repository.
checkpoint_file:
In MMClassification checkpoint format.
The contents vary for each task repository.
output_folder:
Folder where `{model_name}.mar` will be created.
The file created will be in TorchServe archive format.
model_name:
If not None, used for naming the `{model_name}.mar` file
that will be created under `output_folder`.
If None, `{Path(checkpoint_file).stem}` will be used.
model_version:
Model's version.
force:
If True, if there is an existing `{model_name}.mar`
file under `output_folder` it will be overwritten.
"""
mmcv
.
mkdir_or_exist
(
output_folder
)
config
=
mmcv
.
Config
.
fromfile
(
config_file
)
with
TemporaryDirectory
()
as
tmpdir
:
config
.
dump
(
f
'
{
tmpdir
}
/config.py'
)
args
=
Namespace
(
**
{
'model_file'
:
f
'
{
tmpdir
}
/config.py'
,
'serialized_file'
:
checkpoint_file
,
'handler'
:
f
'
{
Path
(
__file__
).
parent
}
/mmcls_handler.py'
,
'model_name'
:
model_name
or
Path
(
checkpoint_file
).
stem
,
'version'
:
model_version
,
'export_path'
:
output_folder
,
'force'
:
force
,
'requirements_file'
:
None
,
'extra_files'
:
None
,
'runtime'
:
'python'
,
'archive_format'
:
'default'
})
manifest
=
ModelExportUtils
.
generate_manifest_json
(
args
)
package_model
(
args
,
manifest
)
def
parse_args
():
parser
=
ArgumentParser
(
description
=
'Convert mmcls models to TorchServe `.mar` format.'
)
parser
.
add_argument
(
'config'
,
type
=
str
,
help
=
'config file path'
)
parser
.
add_argument
(
'checkpoint'
,
type
=
str
,
help
=
'checkpoint file path'
)
parser
.
add_argument
(
'--output-folder'
,
type
=
str
,
required
=
True
,
help
=
'Folder where `{model_name}.mar` will be created.'
)
parser
.
add_argument
(
'--model-name'
,
type
=
str
,
default
=
None
,
help
=
'If not None, used for naming the `{model_name}.mar`'
'file that will be created under `output_folder`.'
'If None, `{Path(checkpoint_file).stem}` will be used.'
)
parser
.
add_argument
(
'--model-version'
,
type
=
str
,
default
=
'1.0'
,
help
=
'Number used for versioning.'
)
parser
.
add_argument
(
'-f'
,
'--force'
,
action
=
'store_true'
,
help
=
'overwrite the existing `{model_name}.mar`'
)
args
=
parser
.
parse_args
()
return
args
if
__name__
==
'__main__'
:
args
=
parse_args
()
if
package_model
is
None
:
raise
ImportError
(
'`torch-model-archiver` is required.'
'Try: pip install torch-model-archiver'
)
mmcls2torchserve
(
args
.
config
,
args
.
checkpoint
,
args
.
output_folder
,
args
.
model_name
,
args
.
model_version
,
args
.
force
)
openmmlab_test/mmclassification-speed-benchmark/tools/deployment/mmcls_handler.py
0 → 100644
View file @
85529f35
import
base64
import
os
import
mmcv
import
torch
from
ts.torch_handler.base_handler
import
BaseHandler
from
mmcls.apis
import
inference_model
,
init_model
class
MMclsHandler
(
BaseHandler
):
def
initialize
(
self
,
context
):
properties
=
context
.
system_properties
self
.
map_location
=
'cuda'
if
torch
.
cuda
.
is_available
()
else
'cpu'
self
.
device
=
torch
.
device
(
self
.
map_location
+
':'
+
str
(
properties
.
get
(
'gpu_id'
))
if
torch
.
cuda
.
is_available
()
else
self
.
map_location
)
self
.
manifest
=
context
.
manifest
model_dir
=
properties
.
get
(
'model_dir'
)
serialized_file
=
self
.
manifest
[
'model'
][
'serializedFile'
]
checkpoint
=
os
.
path
.
join
(
model_dir
,
serialized_file
)
self
.
config_file
=
os
.
path
.
join
(
model_dir
,
'config.py'
)
self
.
model
=
init_model
(
self
.
config_file
,
checkpoint
,
self
.
device
)
self
.
initialized
=
True
def
preprocess
(
self
,
data
):
images
=
[]
for
row
in
data
:
image
=
row
.
get
(
'data'
)
or
row
.
get
(
'body'
)
if
isinstance
(
image
,
str
):
image
=
base64
.
b64decode
(
image
)
image
=
mmcv
.
imfrombytes
(
image
)
images
.
append
(
image
)
return
images
def
inference
(
self
,
data
,
*
args
,
**
kwargs
):
results
=
[]
for
image
in
data
:
results
.
append
(
inference_model
(
self
.
model
,
image
))
return
results
def
postprocess
(
self
,
data
):
for
result
in
data
:
result
[
'pred_label'
]
=
int
(
result
[
'pred_label'
])
return
data
openmmlab_test/mmclassification-speed-benchmark/tools/deployment/onnx2tensorrt.py
0 → 100644
View file @
85529f35
import
argparse
import
os
import
os.path
as
osp
import
numpy
as
np
def
get_GiB
(
x
:
int
):
"""return x GiB."""
return
x
*
(
1
<<
30
)
def
onnx2tensorrt
(
onnx_file
,
trt_file
,
input_shape
,
max_batch_size
,
fp16_mode
=
False
,
verify
=
False
,
workspace_size
=
1
):
"""Create tensorrt engine from onnx model.
Args:
onnx_file (str): Filename of the input ONNX model file.
trt_file (str): Filename of the output TensorRT engine file.
input_shape (list[int]): Input shape of the model.
eg [1, 3, 224, 224].
max_batch_size (int): Max batch size of the model.
verify (bool, optional): Whether to verify the converted model.
Defaults to False.
workspace_size (int, optional): Maximium workspace of GPU.
Defaults to 1.
"""
import
onnx
from
mmcv.tensorrt
import
TRTWraper
,
onnx2trt
,
save_trt_engine
onnx_model
=
onnx
.
load
(
onnx_file
)
# create trt engine and wraper
assert
max_batch_size
>=
1
max_shape
=
[
max_batch_size
]
+
list
(
input_shape
[
1
:])
opt_shape_dict
=
{
'input'
:
[
input_shape
,
input_shape
,
max_shape
]}
max_workspace_size
=
get_GiB
(
workspace_size
)
trt_engine
=
onnx2trt
(
onnx_model
,
opt_shape_dict
,
fp16_mode
=
fp16_mode
,
max_workspace_size
=
max_workspace_size
)
save_dir
,
_
=
osp
.
split
(
trt_file
)
if
save_dir
:
os
.
makedirs
(
save_dir
,
exist_ok
=
True
)
save_trt_engine
(
trt_engine
,
trt_file
)
print
(
f
'Successfully created TensorRT engine:
{
trt_file
}
'
)
if
verify
:
import
torch
import
onnxruntime
as
ort
input_img
=
torch
.
randn
(
*
input_shape
)
input_img_cpu
=
input_img
.
detach
().
cpu
().
numpy
()
input_img_cuda
=
input_img
.
cuda
()
# Get results from ONNXRuntime
session_options
=
ort
.
SessionOptions
()
sess
=
ort
.
InferenceSession
(
onnx_file
,
session_options
)
# get input and output names
input_names
=
[
_
.
name
for
_
in
sess
.
get_inputs
()]
output_names
=
[
_
.
name
for
_
in
sess
.
get_outputs
()]
onnx_outputs
=
sess
.
run
(
None
,
{
input_names
[
0
]:
input_img_cpu
,
})
# Get results from TensorRT
trt_model
=
TRTWraper
(
trt_file
,
input_names
,
output_names
)
with
torch
.
no_grad
():
trt_outputs
=
trt_model
({
input_names
[
0
]:
input_img_cuda
})
trt_outputs
=
[
trt_outputs
[
_
].
detach
().
cpu
().
numpy
()
for
_
in
output_names
]
# Compare results
np
.
testing
.
assert_allclose
(
onnx_outputs
[
0
],
trt_outputs
[
0
],
rtol
=
1e-05
,
atol
=
1e-05
)
print
(
'The numerical values are the same '
+
'between ONNXRuntime and TensorRT'
)
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'Convert MMClassification models from ONNX to TensorRT'
)
parser
.
add_argument
(
'model'
,
help
=
'Filename of the input ONNX model'
)
parser
.
add_argument
(
'--trt-file'
,
type
=
str
,
default
=
'tmp.trt'
,
help
=
'Filename of the output TensorRT engine'
)
parser
.
add_argument
(
'--verify'
,
action
=
'store_true'
,
help
=
'Verify the outputs of ONNXRuntime and TensorRT'
)
parser
.
add_argument
(
'--shape'
,
type
=
int
,
nargs
=
'+'
,
default
=
[
224
,
224
],
help
=
'Input size of the model'
)
parser
.
add_argument
(
'--max-batch-size'
,
type
=
int
,
default
=
1
,
help
=
'Maximum batch size of TensorRT model.'
)
parser
.
add_argument
(
'--fp16'
,
action
=
'store_true'
,
help
=
'Enable fp16 mode'
)
parser
.
add_argument
(
'--workspace-size'
,
type
=
int
,
default
=
1
,
help
=
'Max workspace size of GPU in GiB'
)
args
=
parser
.
parse_args
()
return
args
if
__name__
==
'__main__'
:
args
=
parse_args
()
if
len
(
args
.
shape
)
==
1
:
input_shape
=
(
1
,
3
,
args
.
shape
[
0
],
args
.
shape
[
0
])
elif
len
(
args
.
shape
)
==
2
:
input_shape
=
(
1
,
3
)
+
tuple
(
args
.
shape
)
else
:
raise
ValueError
(
'invalid input shape'
)
# Create TensorRT engine
onnx2tensorrt
(
args
.
model
,
args
.
trt_file
,
input_shape
,
args
.
max_batch_size
,
fp16_mode
=
args
.
fp16
,
verify
=
args
.
verify
,
workspace_size
=
args
.
workspace_size
)
openmmlab_test/mmclassification-speed-benchmark/tools/deployment/pytorch2onnx.py
0 → 100644
View file @
85529f35
import
argparse
from
functools
import
partial
import
mmcv
import
numpy
as
np
import
onnxruntime
as
rt
import
torch
from
mmcv.onnx
import
register_extra_symbolics
from
mmcv.runner
import
load_checkpoint
from
mmcls.models
import
build_classifier
torch
.
manual_seed
(
3
)
def
_demo_mm_inputs
(
input_shape
,
num_classes
):
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple):
input batch dimensions
num_classes (int):
number of semantic classes
"""
(
N
,
C
,
H
,
W
)
=
input_shape
rng
=
np
.
random
.
RandomState
(
0
)
imgs
=
rng
.
rand
(
*
input_shape
)
gt_labels
=
rng
.
randint
(
low
=
0
,
high
=
num_classes
,
size
=
(
N
,
1
)).
astype
(
np
.
uint8
)
mm_inputs
=
{
'imgs'
:
torch
.
FloatTensor
(
imgs
).
requires_grad_
(
True
),
'gt_labels'
:
torch
.
LongTensor
(
gt_labels
),
}
return
mm_inputs
def
pytorch2onnx
(
model
,
input_shape
,
opset_version
=
11
,
dynamic_export
=
False
,
show
=
False
,
output_file
=
'tmp.onnx'
,
do_simplify
=
False
,
verify
=
False
):
"""Export Pytorch model to ONNX model and verify the outputs are same
between Pytorch and ONNX.
Args:
model (nn.Module): Pytorch model we want to export.
input_shape (tuple): Use this input shape to construct
the corresponding dummy input and execute the model.
opset_version (int): The onnx op version. Default: 11.
show (bool): Whether print the computation graph. Default: False.
output_file (string): The path to where we store the output ONNX model.
Default: `tmp.onnx`.
verify (bool): Whether compare the outputs between Pytorch and ONNX.
Default: False.
"""
model
.
cpu
().
eval
()
num_classes
=
model
.
head
.
num_classes
mm_inputs
=
_demo_mm_inputs
(
input_shape
,
num_classes
)
imgs
=
mm_inputs
.
pop
(
'imgs'
)
img_list
=
[
img
[
None
,
:]
for
img
in
imgs
]
# replace original forward function
origin_forward
=
model
.
forward
model
.
forward
=
partial
(
model
.
forward
,
img_metas
=
{},
return_loss
=
False
)
register_extra_symbolics
(
opset_version
)
# support dynamic shape export
if
dynamic_export
:
dynamic_axes
=
{
'input'
:
{
0
:
'batch'
,
2
:
'width'
,
3
:
'height'
},
'probs'
:
{
0
:
'batch'
}
}
else
:
dynamic_axes
=
{}
with
torch
.
no_grad
():
torch
.
onnx
.
export
(
model
,
(
img_list
,
),
output_file
,
input_names
=
[
'input'
],
output_names
=
[
'probs'
],
export_params
=
True
,
keep_initializers_as_inputs
=
True
,
dynamic_axes
=
dynamic_axes
,
verbose
=
show
,
opset_version
=
opset_version
)
print
(
f
'Successfully exported ONNX model:
{
output_file
}
'
)
model
.
forward
=
origin_forward
if
do_simplify
:
from
mmcv
import
digit_version
import
onnxsim
min_required_version
=
'0.3.0'
assert
digit_version
(
mmcv
.
__version__
)
>=
digit_version
(
min_required_version
),
f
'Requires to install onnx-simplify>=
{
min_required_version
}
'
if
dynamic_axes
:
input_shape
=
(
input_shape
[
0
],
input_shape
[
1
],
input_shape
[
2
]
*
2
,
input_shape
[
3
]
*
2
)
else
:
input_shape
=
(
input_shape
[
0
],
input_shape
[
1
],
input_shape
[
2
],
input_shape
[
3
])
imgs
=
_demo_mm_inputs
(
input_shape
,
model
.
head
.
num_classes
).
pop
(
'imgs'
)
input_dic
=
{
'input'
:
imgs
.
detach
().
cpu
().
numpy
()}
input_shape_dic
=
{
'input'
:
list
(
input_shape
)}
onnxsim
.
simplify
(
output_file
,
input_shapes
=
input_shape_dic
,
input_data
=
input_dic
,
dynamic_input_shape
=
dynamic_export
)
if
verify
:
# check by onnx
import
onnx
onnx_model
=
onnx
.
load
(
output_file
)
onnx
.
checker
.
check_model
(
onnx_model
)
# test the dynamic model
if
dynamic_export
:
dynamic_test_inputs
=
_demo_mm_inputs
(
(
input_shape
[
0
],
input_shape
[
1
],
input_shape
[
2
]
*
2
,
input_shape
[
3
]
*
2
),
model
.
head
.
num_classes
)
imgs
=
dynamic_test_inputs
.
pop
(
'imgs'
)
img_list
=
[
img
[
None
,
:]
for
img
in
imgs
]
# check the numerical value
# get pytorch output
pytorch_result
=
model
(
img_list
,
img_metas
=
{},
return_loss
=
False
)[
0
]
# get onnx output
input_all
=
[
node
.
name
for
node
in
onnx_model
.
graph
.
input
]
input_initializer
=
[
node
.
name
for
node
in
onnx_model
.
graph
.
initializer
]
net_feed_input
=
list
(
set
(
input_all
)
-
set
(
input_initializer
))
assert
(
len
(
net_feed_input
)
==
1
)
sess
=
rt
.
InferenceSession
(
output_file
)
onnx_result
=
sess
.
run
(
None
,
{
net_feed_input
[
0
]:
img_list
[
0
].
detach
().
numpy
()})[
0
]
if
not
np
.
allclose
(
pytorch_result
,
onnx_result
):
raise
ValueError
(
'The outputs are different between Pytorch and ONNX'
)
print
(
'The outputs are same between Pytorch and ONNX'
)
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'Convert MMCls to ONNX'
)
parser
.
add_argument
(
'config'
,
help
=
'test config file path'
)
parser
.
add_argument
(
'--checkpoint'
,
help
=
'checkpoint file'
,
default
=
None
)
parser
.
add_argument
(
'--show'
,
action
=
'store_true'
,
help
=
'show onnx graph'
)
parser
.
add_argument
(
'--verify'
,
action
=
'store_true'
,
help
=
'verify the onnx model'
)
parser
.
add_argument
(
'--output-file'
,
type
=
str
,
default
=
'tmp.onnx'
)
parser
.
add_argument
(
'--opset-version'
,
type
=
int
,
default
=
11
)
parser
.
add_argument
(
'--simplify'
,
action
=
'store_true'
,
help
=
'Whether to simplify onnx model.'
)
parser
.
add_argument
(
'--shape'
,
type
=
int
,
nargs
=
'+'
,
default
=
[
224
,
224
],
help
=
'input image size'
)
parser
.
add_argument
(
'--dynamic-export'
,
action
=
'store_true'
,
help
=
'Whether to export ONNX with dynamic input shape.
\
Defaults to False.'
)
args
=
parser
.
parse_args
()
return
args
if
__name__
==
'__main__'
:
args
=
parse_args
()
if
len
(
args
.
shape
)
==
1
:
input_shape
=
(
1
,
3
,
args
.
shape
[
0
],
args
.
shape
[
0
])
elif
len
(
args
.
shape
)
==
2
:
input_shape
=
(
1
,
3
,
)
+
tuple
(
args
.
shape
)
else
:
raise
ValueError
(
'invalid input shape'
)
cfg
=
mmcv
.
Config
.
fromfile
(
args
.
config
)
cfg
.
model
.
pretrained
=
None
# build the model and load checkpoint
classifier
=
build_classifier
(
cfg
.
model
)
if
args
.
checkpoint
:
load_checkpoint
(
classifier
,
args
.
checkpoint
,
map_location
=
'cpu'
)
# conver model to onnx file
pytorch2onnx
(
classifier
,
input_shape
,
opset_version
=
args
.
opset_version
,
show
=
args
.
show
,
dynamic_export
=
args
.
dynamic_export
,
output_file
=
args
.
output_file
,
do_simplify
=
args
.
simplify
,
verify
=
args
.
verify
)
openmmlab_test/mmclassification-speed-benchmark/tools/deployment/pytorch2torchscript.py
0 → 100644
View file @
85529f35
import
argparse
import
os
import
os.path
as
osp
from
functools
import
partial
import
mmcv
import
numpy
as
np
import
torch
from
mmcv.runner
import
load_checkpoint
from
torch
import
nn
from
mmcls.models
import
build_classifier
torch
.
manual_seed
(
3
)
def
_demo_mm_inputs
(
input_shape
:
tuple
,
num_classes
:
int
):
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple):
input batch dimensions
num_classes (int):
number of semantic classes
"""
(
N
,
C
,
H
,
W
)
=
input_shape
rng
=
np
.
random
.
RandomState
(
0
)
imgs
=
rng
.
rand
(
*
input_shape
)
gt_labels
=
rng
.
randint
(
low
=
0
,
high
=
num_classes
,
size
=
(
N
,
1
)).
astype
(
np
.
uint8
)
mm_inputs
=
{
'imgs'
:
torch
.
FloatTensor
(
imgs
).
requires_grad_
(
False
),
'gt_labels'
:
torch
.
LongTensor
(
gt_labels
),
}
return
mm_inputs
def
pytorch2torchscript
(
model
:
nn
.
Module
,
input_shape
:
tuple
,
output_file
:
str
,
verify
:
bool
):
"""Export Pytorch model to TorchScript model through torch.jit.trace and
verify the outputs are same between Pytorch and TorchScript.
Args:
model (nn.Module): Pytorch model we want to export.
input_shape (tuple): Use this input shape to construct
the corresponding dummy input and execute the model.
show (bool): Whether print the computation graph. Default: False.
output_file (string): The path to where we store the output
TorchScript model.
verify (bool): Whether compare the outputs between Pytorch
and TorchScript through loading generated output_file.
"""
model
.
cpu
().
eval
()
num_classes
=
model
.
head
.
num_classes
mm_inputs
=
_demo_mm_inputs
(
input_shape
,
num_classes
)
imgs
=
mm_inputs
.
pop
(
'imgs'
)
img_list
=
[
img
[
None
,
:]
for
img
in
imgs
]
# replace original forward function
origin_forward
=
model
.
forward
model
.
forward
=
partial
(
model
.
forward
,
img_metas
=
{},
return_loss
=
False
)
with
torch
.
no_grad
():
trace_model
=
torch
.
jit
.
trace
(
model
,
img_list
[
0
])
save_dir
,
_
=
osp
.
split
(
output_file
)
if
save_dir
:
os
.
makedirs
(
save_dir
,
exist_ok
=
True
)
trace_model
.
save
(
output_file
)
print
(
f
'Successfully exported TorchScript model:
{
output_file
}
'
)
model
.
forward
=
origin_forward
if
verify
:
# load by torch.jit
jit_model
=
torch
.
jit
.
load
(
output_file
)
# check the numerical value
# get pytorch output
pytorch_result
=
model
(
img_list
,
img_metas
=
{},
return_loss
=
False
)[
0
]
# get jit output
jit_result
=
jit_model
(
img_list
[
0
])[
0
].
detach
().
numpy
()
if
not
np
.
allclose
(
pytorch_result
,
jit_result
):
raise
ValueError
(
'The outputs are different between Pytorch and TorchScript'
)
print
(
'The outputs are same between Pytorch and TorchScript'
)
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'Convert MMCls to TorchScript'
)
parser
.
add_argument
(
'config'
,
help
=
'test config file path'
)
parser
.
add_argument
(
'--checkpoint'
,
help
=
'checkpoint file'
,
type
=
str
)
parser
.
add_argument
(
'--verify'
,
action
=
'store_true'
,
help
=
'verify the TorchScript model'
,
default
=
False
)
parser
.
add_argument
(
'--output-file'
,
type
=
str
,
default
=
'tmp.pt'
)
parser
.
add_argument
(
'--shape'
,
type
=
int
,
nargs
=
'+'
,
default
=
[
224
,
224
],
help
=
'input image size'
)
args
=
parser
.
parse_args
()
return
args
if
__name__
==
'__main__'
:
args
=
parse_args
()
if
len
(
args
.
shape
)
==
1
:
input_shape
=
(
1
,
3
,
args
.
shape
[
0
],
args
.
shape
[
0
])
elif
len
(
args
.
shape
)
==
2
:
input_shape
=
(
1
,
3
,
)
+
tuple
(
args
.
shape
)
else
:
raise
ValueError
(
'invalid input shape'
)
cfg
=
mmcv
.
Config
.
fromfile
(
args
.
config
)
cfg
.
model
.
pretrained
=
None
# build the model and load checkpoint
classifier
=
build_classifier
(
cfg
.
model
)
if
args
.
checkpoint
:
load_checkpoint
(
classifier
,
args
.
checkpoint
,
map_location
=
'cpu'
)
# conver model to TorchScript file
pytorch2torchscript
(
classifier
,
input_shape
,
output_file
=
args
.
output_file
,
verify
=
args
.
verify
)
openmmlab_test/mmclassification-speed-benchmark/tools/deployment/test.py
0 → 100644
View file @
85529f35
import
argparse
import
warnings
import
mmcv
import
numpy
as
np
from
mmcv
import
DictAction
from
mmcv.parallel
import
MMDataParallel
from
mmcls.apis
import
single_gpu_test
from
mmcls.core.export
import
ONNXRuntimeClassifier
,
TensorRTClassifier
from
mmcls.datasets
import
build_dataloader
,
build_dataset
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'Test (and eval) an ONNX model using ONNXRuntime.'
)
parser
.
add_argument
(
'config'
,
help
=
'model config file'
)
parser
.
add_argument
(
'model'
,
help
=
'filename of the input ONNX model'
)
parser
.
add_argument
(
'--backend'
,
help
=
'Backend of the model.'
,
choices
=
[
'onnxruntime'
,
'tensorrt'
])
parser
.
add_argument
(
'--out'
,
type
=
str
,
help
=
'output result file in pickle format'
)
parser
.
add_argument
(
'--cfg-options'
,
nargs
=
'+'
,
action
=
DictAction
,
help
=
'override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file.'
)
parser
.
add_argument
(
'--metrics'
,
type
=
str
,
nargs
=
'+'
,
help
=
'evaluation metrics, which depends on the dataset, e.g., '
'"accuracy", "precision", "recall", "f1_score", "support" for single '
'label dataset, and "mAP", "CP", "CR", "CF1", "OP", "OR", "OF1" for '
'multi-label dataset'
)
parser
.
add_argument
(
'--metric-options'
,
nargs
=
'+'
,
action
=
DictAction
,
default
=
{},
help
=
'custom options for evaluation, the key-value pair in xxx=yyy '
'format will be parsed as a dict metric_options for dataset.evaluate()'
' function.'
)
parser
.
add_argument
(
'--show'
,
action
=
'store_true'
,
help
=
'show results'
)
parser
.
add_argument
(
'--show-dir'
,
help
=
'directory where painted images will be saved'
)
args
=
parser
.
parse_args
()
return
args
def
main
():
args
=
parse_args
()
if
args
.
out
is
not
None
and
not
args
.
out
.
endswith
((
'.pkl'
,
'.pickle'
)):
raise
ValueError
(
'The output file must be a pkl file.'
)
cfg
=
mmcv
.
Config
.
fromfile
(
args
.
config
)
if
args
.
cfg_options
is
not
None
:
cfg
.
merge_from_dict
(
args
.
cfg_options
)
# build dataset and dataloader
dataset
=
build_dataset
(
cfg
.
data
.
test
)
data_loader
=
build_dataloader
(
dataset
,
samples_per_gpu
=
cfg
.
data
.
samples_per_gpu
,
workers_per_gpu
=
cfg
.
data
.
workers_per_gpu
,
shuffle
=
False
,
round_up
=
False
)
# build onnxruntime model and run inference.
if
args
.
backend
==
'onnxruntime'
:
model
=
ONNXRuntimeClassifier
(
args
.
model
,
class_names
=
dataset
.
CLASSES
,
device_id
=
0
)
elif
args
.
backend
==
'tensorrt'
:
model
=
TensorRTClassifier
(
args
.
model
,
class_names
=
dataset
.
CLASSES
,
device_id
=
0
)
else
:
print
(
'Unknown backend: {}.'
.
format
(
args
.
model
))
exit
()
model
=
MMDataParallel
(
model
,
device_ids
=
[
0
])
model
.
CLASSES
=
dataset
.
CLASSES
outputs
=
single_gpu_test
(
model
,
data_loader
,
args
.
show
,
args
.
show_dir
)
if
args
.
metrics
:
results
=
dataset
.
evaluate
(
outputs
,
args
.
metrics
,
args
.
metric_options
)
for
k
,
v
in
results
.
items
():
print
(
f
'
\n
{
k
}
:
{
v
:.
2
f
}
'
)
else
:
warnings
.
warn
(
'Evaluation metrics are not specified.'
)
scores
=
np
.
vstack
(
outputs
)
pred_score
=
np
.
max
(
scores
,
axis
=
1
)
pred_label
=
np
.
argmax
(
scores
,
axis
=
1
)
pred_class
=
[
dataset
.
CLASSES
[
lb
]
for
lb
in
pred_label
]
results
=
{
'pred_score'
:
pred_score
,
'pred_label'
:
pred_label
,
'pred_class'
:
pred_class
}
if
not
args
.
out
:
print
(
'
\n
the predicted result for the first element is '
f
'pred_score =
{
pred_score
[
0
]:.
2
f
}
, '
f
'pred_label =
{
pred_label
[
0
]
}
'
f
'and pred_class =
{
pred_class
[
0
]
}
. '
'Specify --out to save all results to files.'
)
if
args
.
out
:
print
(
f
'
\n
writing results to
{
args
.
out
}
'
)
mmcv
.
dump
(
results
,
args
.
out
)
if
__name__
==
'__main__'
:
main
()
openmmlab_test/mmclassification-speed-benchmark/tools/dist_test.sh
0 → 100644
View file @
85529f35
#!/usr/bin/env bash
CONFIG
=
$1
CHECKPOINT
=
$2
GPUS
=
$3
PORT
=
${
PORT
:-
29500
}
PYTHONPATH
=
"
$(
dirname
$0
)
/.."
:
$PYTHONPATH
\
python
-m
torch.distributed.launch
--nproc_per_node
=
$GPUS
--master_port
=
$PORT
\
$(
dirname
"
$0
"
)
/test.py
$CONFIG
$CHECKPOINT
--launcher
pytorch
${
@
:4
}
openmmlab_test/mmclassification-speed-benchmark/tools/dist_train.sh
0 → 100644
View file @
85529f35
#!/usr/bin/env bash
CONFIG
=
$1
GPUS
=
$2
PORT
=
${
PORT
:-
29500
}
PYTHONPATH
=
"
$(
dirname
$0
)
/.."
:
$PYTHONPATH
\
python
-m
torch.distributed.launch
--nproc_per_node
=
$GPUS
--master_port
=
$PORT
\
$(
dirname
"
$0
"
)
/train.py
$CONFIG
--launcher
pytorch
${
@
:3
}
openmmlab_test/mmclassification-speed-benchmark/tools/get_flops.py
0 → 100644
View file @
85529f35
import
argparse
from
mmcv
import
Config
from
mmcv.cnn.utils
import
get_model_complexity_info
from
mmcls.models
import
build_classifier
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'Get model flops and params'
)
parser
.
add_argument
(
'config'
,
help
=
'config file path'
)
parser
.
add_argument
(
'--shape'
,
type
=
int
,
nargs
=
'+'
,
default
=
[
224
,
224
],
help
=
'input image size'
)
args
=
parser
.
parse_args
()
return
args
def
main
():
args
=
parse_args
()
if
len
(
args
.
shape
)
==
1
:
input_shape
=
(
3
,
args
.
shape
[
0
],
args
.
shape
[
0
])
elif
len
(
args
.
shape
)
==
2
:
input_shape
=
(
3
,
)
+
tuple
(
args
.
shape
)
else
:
raise
ValueError
(
'invalid input shape'
)
cfg
=
Config
.
fromfile
(
args
.
config
)
model
=
build_classifier
(
cfg
.
model
)
model
.
eval
()
if
hasattr
(
model
,
'extract_feat'
):
model
.
forward
=
model
.
extract_feat
else
:
raise
NotImplementedError
(
'FLOPs counter is currently not currently supported with {}'
.
format
(
model
.
__class__
.
__name__
))
flops
,
params
=
get_model_complexity_info
(
model
,
input_shape
)
split_line
=
'='
*
30
print
(
f
'
{
split_line
}
\n
Input shape:
{
input_shape
}
\n
'
f
'Flops:
{
flops
}
\n
Params:
{
params
}
\n
{
split_line
}
'
)
print
(
'!!!Please be cautious if you use the results in papers. '
'You may need to check if all ops are supported and verify that the '
'flops computation is correct.'
)
if
__name__
==
'__main__'
:
main
()
openmmlab_test/mmclassification-speed-benchmark/tools/publish_model.py
0 → 100644
View file @
85529f35
import
argparse
import
subprocess
import
torch
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'Process a checkpoint to be published'
)
parser
.
add_argument
(
'in_file'
,
help
=
'input checkpoint filename'
)
parser
.
add_argument
(
'out_file'
,
help
=
'output checkpoint filename'
)
args
=
parser
.
parse_args
()
return
args
def
process_checkpoint
(
in_file
,
out_file
):
checkpoint
=
torch
.
load
(
in_file
,
map_location
=
'cpu'
)
# remove optimizer for smaller file size
if
'optimizer'
in
checkpoint
:
del
checkpoint
[
'optimizer'
]
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
torch
.
save
(
checkpoint
,
out_file
)
sha
=
subprocess
.
check_output
([
'sha256sum'
,
out_file
]).
decode
()
if
out_file
.
endswith
(
'.pth'
):
out_file_name
=
out_file
[:
-
4
]
else
:
out_file_name
=
out_file
final_file
=
out_file_name
+
f
'-
{
sha
[:
8
]
}
.pth'
subprocess
.
Popen
([
'mv'
,
out_file
,
final_file
])
def
main
():
args
=
parse_args
()
process_checkpoint
(
args
.
in_file
,
args
.
out_file
)
if
__name__
==
'__main__'
:
main
()
openmmlab_test/mmclassification-speed-benchmark/tools/slurm_test.sh
0 → 100644
View file @
85529f35
#!/usr/bin/env bash
set
-x
PARTITION
=
$1
JOB_NAME
=
$2
CONFIG
=
$3
CHECKPOINT
=
$4
GPUS
=
${
GPUS
:-
8
}
GPUS_PER_NODE
=
${
GPUS_PER_NODE
:-
8
}
CPUS_PER_TASK
=
${
CPUS_PER_TASK
:-
5
}
PY_ARGS
=
${
@
:5
}
SRUN_ARGS
=
${
SRUN_ARGS
:-
""
}
PYTHONPATH
=
"
$(
dirname
$0
)
/.."
:
$PYTHONPATH
\
srun
-p
${
PARTITION
}
\
--job-name
=
${
JOB_NAME
}
\
--gres
=
gpu:
${
GPUS_PER_NODE
}
\
--ntasks
=
${
GPUS
}
\
--ntasks-per-node
=
${
GPUS_PER_NODE
}
\
--cpus-per-task
=
${
CPUS_PER_TASK
}
\
--kill-on-bad-exit
=
1
\
${
SRUN_ARGS
}
\
python
-u
tools/test.py
${
CONFIG
}
${
CHECKPOINT
}
--launcher
=
"slurm"
${
PY_ARGS
}
openmmlab_test/mmclassification-speed-benchmark/tools/slurm_train.sh
0 → 100644
View file @
85529f35
#!/usr/bin/env bash
set
-x
PARTITION
=
$1
JOB_NAME
=
$2
CONFIG
=
$3
WORK_DIR
=
$4
GPUS
=
${
GPUS
:-
8
}
GPUS_PER_NODE
=
${
GPUS_PER_NODE
:-
8
}
CPUS_PER_TASK
=
${
CPUS_PER_TASK
:-
5
}
SRUN_ARGS
=
${
SRUN_ARGS
:-
""
}
PY_ARGS
=
${
@
:5
}
PYTHONPATH
=
"
$(
dirname
$0
)
/.."
:
$PYTHONPATH
\
srun
-p
${
PARTITION
}
\
--job-name
=
${
JOB_NAME
}
\
--gres
=
gpu:
${
GPUS_PER_NODE
}
\
--ntasks
=
${
GPUS
}
\
--ntasks-per-node
=
${
GPUS_PER_NODE
}
\
--cpus-per-task
=
${
CPUS_PER_TASK
}
\
--kill-on-bad-exit
=
1
\
${
SRUN_ARGS
}
\
python
-u
tools/train.py
${
CONFIG
}
--work-dir
=
${
WORK_DIR
}
--launcher
=
"slurm"
${
PY_ARGS
}
openmmlab_test/mmclassification-speed-benchmark/tools/test.py
0 → 100644
View file @
85529f35
import
argparse
import
os
import
warnings
import
mmcv
import
numpy
as
np
import
torch
from
mmcv
import
DictAction
from
mmcv.parallel
import
MMDataParallel
,
MMDistributedDataParallel
from
mmcv.runner
import
get_dist_info
,
init_dist
,
load_checkpoint
from
mmcls.apis
import
multi_gpu_test
,
single_gpu_test
from
mmcls.datasets
import
build_dataloader
,
build_dataset
from
mmcls.models
import
build_classifier
# TODO import `wrap_fp16_model` from mmcv and delete them from mmcls
try
:
from
mmcv.runner
import
wrap_fp16_model
except
ImportError
:
warnings
.
warn
(
'wrap_fp16_model from mmcls will be deprecated.'
'Please install mmcv>=1.1.4.'
)
from
mmcls.core
import
wrap_fp16_model
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'mmcls test model'
)
parser
.
add_argument
(
'config'
,
help
=
'test config file path'
)
parser
.
add_argument
(
'checkpoint'
,
help
=
'checkpoint file'
)
parser
.
add_argument
(
'--out'
,
help
=
'output result file'
)
parser
.
add_argument
(
'--metrics'
,
type
=
str
,
nargs
=
'+'
,
help
=
'evaluation metrics, which depends on the dataset, e.g., '
'"accuracy", "precision", "recall", "f1_score", "support" for single '
'label dataset, and "mAP", "CP", "CR", "CF1", "OP", "OR", "OF1" for '
'multi-label dataset'
)
parser
.
add_argument
(
'--show'
,
action
=
'store_true'
,
help
=
'show results'
)
parser
.
add_argument
(
'--show-dir'
,
help
=
'directory where painted images will be saved'
)
parser
.
add_argument
(
'--gpu_collect'
,
action
=
'store_true'
,
help
=
'whether to use gpu to collect results'
)
parser
.
add_argument
(
'--tmpdir'
,
help
=
'tmp dir for writing some results'
)
parser
.
add_argument
(
'--options'
,
nargs
=
'+'
,
action
=
DictAction
,
help
=
'override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file.'
)
parser
.
add_argument
(
'--metric-options'
,
nargs
=
'+'
,
action
=
DictAction
,
default
=
{},
help
=
'custom options for evaluation, the key-value pair in xxx=yyy '
'format will be parsed as a dict metric_options for dataset.evaluate()'
' function.'
)
parser
.
add_argument
(
'--show-options'
,
nargs
=
'+'
,
action
=
DictAction
,
help
=
'custom options for show_result. key-value pair in xxx=yyy.'
'Check available options in `model.show_result`.'
)
parser
.
add_argument
(
'--launcher'
,
choices
=
[
'none'
,
'pytorch'
,
'slurm'
,
'mpi'
],
default
=
'none'
,
help
=
'job launcher'
)
parser
.
add_argument
(
'--local_rank'
,
type
=
int
,
default
=
0
)
parser
.
add_argument
(
'--device'
,
choices
=
[
'cpu'
,
'cuda'
],
default
=
'cuda'
,
help
=
'device used for testing'
)
args
=
parser
.
parse_args
()
if
'LOCAL_RANK'
not
in
os
.
environ
:
os
.
environ
[
'LOCAL_RANK'
]
=
str
(
args
.
local_rank
)
return
args
def
main
():
args
=
parse_args
()
cfg
=
mmcv
.
Config
.
fromfile
(
args
.
config
)
if
args
.
options
is
not
None
:
cfg
.
merge_from_dict
(
args
.
options
)
# set cudnn_benchmark
if
cfg
.
get
(
'cudnn_benchmark'
,
False
):
torch
.
backends
.
cudnn
.
benchmark
=
True
cfg
.
model
.
pretrained
=
None
cfg
.
data
.
test
.
test_mode
=
True
# init distributed env first, since logger depends on the dist info.
if
args
.
launcher
==
'none'
:
distributed
=
False
else
:
distributed
=
True
init_dist
(
args
.
launcher
,
**
cfg
.
dist_params
)
# build the dataloader
dataset
=
build_dataset
(
cfg
.
data
.
test
)
# the extra round_up data will be removed during gpu/cpu collect
data_loader
=
build_dataloader
(
dataset
,
samples_per_gpu
=
cfg
.
data
.
samples_per_gpu
,
workers_per_gpu
=
cfg
.
data
.
workers_per_gpu
,
dist
=
distributed
,
shuffle
=
False
,
round_up
=
True
)
# build the model and load checkpoint
model
=
build_classifier
(
cfg
.
model
)
fp16_cfg
=
cfg
.
get
(
'fp16'
,
None
)
if
fp16_cfg
is
not
None
:
wrap_fp16_model
(
model
)
checkpoint
=
load_checkpoint
(
model
,
args
.
checkpoint
,
map_location
=
'cpu'
)
if
'CLASSES'
in
checkpoint
.
get
(
'meta'
,
{}):
CLASSES
=
checkpoint
[
'meta'
][
'CLASSES'
]
else
:
from
mmcls.datasets
import
ImageNet
warnings
.
simplefilter
(
'once'
)
warnings
.
warn
(
'Class names are not saved in the checkpoint
\'
s '
'meta data, use imagenet by default.'
)
CLASSES
=
ImageNet
.
CLASSES
if
not
distributed
:
if
args
.
device
==
'cpu'
:
model
=
model
.
cpu
()
else
:
model
=
MMDataParallel
(
model
,
device_ids
=
[
0
])
model
.
CLASSES
=
CLASSES
show_kwargs
=
{}
if
args
.
show_options
is
None
else
args
.
show_options
outputs
=
single_gpu_test
(
model
,
data_loader
,
args
.
show
,
args
.
show_dir
,
**
show_kwargs
)
else
:
model
=
MMDistributedDataParallel
(
model
.
cuda
(),
device_ids
=
[
torch
.
cuda
.
current_device
()],
broadcast_buffers
=
False
)
outputs
=
multi_gpu_test
(
model
,
data_loader
,
args
.
tmpdir
,
args
.
gpu_collect
)
rank
,
_
=
get_dist_info
()
if
rank
==
0
:
if
args
.
metrics
:
results
=
dataset
.
evaluate
(
outputs
,
args
.
metrics
,
args
.
metric_options
)
for
k
,
v
in
results
.
items
():
print
(
f
'
\n
{
k
}
:
{
v
:.
2
f
}
'
)
else
:
warnings
.
warn
(
'Evaluation metrics are not specified.'
)
scores
=
np
.
vstack
(
outputs
)
pred_score
=
np
.
max
(
scores
,
axis
=
1
)
pred_label
=
np
.
argmax
(
scores
,
axis
=
1
)
pred_class
=
[
CLASSES
[
lb
]
for
lb
in
pred_label
]
results
=
{
'pred_score'
:
pred_score
,
'pred_label'
:
pred_label
,
'pred_class'
:
pred_class
}
if
not
args
.
out
:
print
(
'
\n
the predicted result for the first element is '
f
'pred_score =
{
pred_score
[
0
]:.
2
f
}
, '
f
'pred_label =
{
pred_label
[
0
]
}
'
f
'and pred_class =
{
pred_class
[
0
]
}
. '
'Specify --out to save all results to files.'
)
if
args
.
out
and
rank
==
0
:
print
(
f
'
\n
writing results to
{
args
.
out
}
'
)
mmcv
.
dump
(
results
,
args
.
out
)
if
__name__
==
'__main__'
:
main
()
Prev
1
…
16
17
18
19
20
21
22
23
24
…
49
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment