Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
nni
Commits
17e267f8
Commit
17e267f8
authored
Nov 25, 2019
by
Cjkkkk
Committed by
chicm-ms
Nov 25, 2019
Browse files
Unit test for QAT_quantization (#1766)
parent
c2e77ded
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
67 additions
and
0 deletions
+67
-0
src/sdk/pynni/tests/test_compressor.py
src/sdk/pynni/tests/test_compressor.py
+67
-0
No files found.
src/sdk/pynni/tests/test_compressor.py
View file @
17e267f8
...
@@ -4,6 +4,7 @@ import tensorflow as tf
...
@@ -4,6 +4,7 @@ import tensorflow as tf
import
torch
import
torch
import
torch.nn.functional
as
F
import
torch.nn.functional
as
F
import
nni.compression.torch
as
torch_compressor
import
nni.compression.torch
as
torch_compressor
import
math
if
tf
.
__version__
>=
'2.0'
:
if
tf
.
__version__
>=
'2.0'
:
import
nni.compression.tensorflow
as
tf_compressor
import
nni.compression.tensorflow
as
tf_compressor
...
@@ -59,6 +60,32 @@ w = np.array([[[[i+1]*3]*3]*5 for i in range(10)])
...
@@ -59,6 +60,32 @@ w = np.array([[[[i+1]*3]*3]*5 for i in range(10)])
class
CompressorTestCase
(
TestCase
):
class
CompressorTestCase
(
TestCase
):
def
test_torch_quantizer_modules_detection
(
self
):
# test if modules can be detected
model
=
TorchModel
()
config_list
=
[{
'quant_types'
:
[
'weight'
],
'quant_bits'
:
8
,
'op_types'
:[
'Conv2d'
,
'Linear'
]
},
{
'quant_types'
:
[
'output'
],
'quant_bits'
:
8
,
'quant_start_step'
:
0
,
'op_types'
:[
'ReLU'
]
}]
model
.
relu
=
torch
.
nn
.
ReLU
()
quantizer
=
torch_compressor
.
QAT_Quantizer
(
model
,
config_list
)
quantizer
.
compress
()
modules_to_compress
=
quantizer
.
get_modules_to_compress
()
modules_to_compress_name
=
[
t
[
0
].
name
for
t
in
modules_to_compress
]
assert
"conv1"
in
modules_to_compress_name
assert
"conv2"
in
modules_to_compress_name
assert
"fc1"
in
modules_to_compress_name
assert
"fc2"
in
modules_to_compress_name
assert
"relu"
in
modules_to_compress_name
assert
len
(
modules_to_compress_name
)
==
5
def
test_torch_level_pruner
(
self
):
def
test_torch_level_pruner
(
self
):
model
=
TorchModel
()
model
=
TorchModel
()
configure_list
=
[{
'sparsity'
:
0.8
,
'op_types'
:
[
'default'
]}]
configure_list
=
[{
'sparsity'
:
0.8
,
'op_types'
:
[
'default'
]}]
...
@@ -201,5 +228,45 @@ class CompressorTestCase(TestCase):
...
@@ -201,5 +228,45 @@ class CompressorTestCase(TestCase):
assert
all
(
mask1
.
numpy
()
==
np
.
array
([
0.
,
0.
,
0.
,
1.
,
1.
]))
assert
all
(
mask1
.
numpy
()
==
np
.
array
([
0.
,
0.
,
0.
,
1.
,
1.
]))
assert
all
(
mask2
.
numpy
()
==
np
.
array
([
0.
,
0.
,
0.
,
1.
,
1.
]))
assert
all
(
mask2
.
numpy
()
==
np
.
array
([
0.
,
0.
,
0.
,
1.
,
1.
]))
def
test_torch_QAT_quantizer
(
self
):
model
=
TorchModel
()
config_list
=
[{
'quant_types'
:
[
'weight'
],
'quant_bits'
:
8
,
'op_types'
:[
'Conv2d'
,
'Linear'
]
},
{
'quant_types'
:
[
'output'
],
'quant_bits'
:
8
,
'quant_start_step'
:
0
,
'op_types'
:[
'ReLU'
]
}]
model
.
relu
=
torch
.
nn
.
ReLU
()
quantizer
=
torch_compressor
.
QAT_Quantizer
(
model
,
config_list
)
quantizer
.
compress
()
# test quantize
# range not including 0
eps
=
1e-7
weight
=
torch
.
tensor
([[
1
,
2
],
[
3
,
5
]]).
float
()
quantize_weight
=
quantizer
.
quantize_weight
(
weight
,
config_list
[
0
],
model
.
conv2
)
assert
math
.
isclose
(
model
.
conv2
.
scale
,
5
/
255
,
abs_tol
=
eps
)
assert
model
.
conv2
.
zero_point
==
0
# range including 0
weight
=
torch
.
tensor
([[
-
1
,
2
],
[
3
,
5
]]).
float
()
quantize_weight
=
quantizer
.
quantize_weight
(
weight
,
config_list
[
0
],
model
.
conv2
)
assert
math
.
isclose
(
model
.
conv2
.
scale
,
6
/
255
,
abs_tol
=
eps
)
assert
model
.
conv2
.
zero_point
==
42
# test ema
x
=
torch
.
tensor
([[
-
0.2
,
0
],
[
0.1
,
0.2
]])
out
=
model
.
relu
(
x
)
assert
math
.
isclose
(
model
.
relu
.
tracked_min_biased
,
0
,
abs_tol
=
eps
)
assert
math
.
isclose
(
model
.
relu
.
tracked_max_biased
,
0.002
,
abs_tol
=
eps
)
quantizer
.
step
()
x
=
torch
.
tensor
([[
0.2
,
0.4
],
[
0.6
,
0.8
]])
out
=
model
.
relu
(
x
)
assert
math
.
isclose
(
model
.
relu
.
tracked_min_biased
,
0.002
,
abs_tol
=
eps
)
assert
math
.
isclose
(
model
.
relu
.
tracked_max_biased
,
0.00998
,
abs_tol
=
eps
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
main
()
main
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment