Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
nni
Commits
9d0b6fa6
Unverified
Commit
9d0b6fa6
authored
Nov 25, 2020
by
chicm-ms
Committed by
GitHub
Nov 25, 2020
Browse files
Pruning schedule supports fpgm (#3110)
parent
055885d9
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
53 additions
and
40 deletions
+53
-40
nni/algorithms/compression/pytorch/pruning/admm_pruner.py
nni/algorithms/compression/pytorch/pruning/admm_pruner.py
+10
-23
nni/algorithms/compression/pytorch/pruning/auto_compress_pruner.py
...ithms/compression/pytorch/pruning/auto_compress_pruner.py
+2
-2
nni/algorithms/compression/pytorch/pruning/constants_pruner.py
...lgorithms/compression/pytorch/pruning/constants_pruner.py
+3
-2
nni/algorithms/compression/pytorch/pruning/net_adapt_pruner.py
...lgorithms/compression/pytorch/pruning/net_adapt_pruner.py
+3
-3
nni/algorithms/compression/pytorch/pruning/sensitivity_pruner.py
...orithms/compression/pytorch/pruning/sensitivity_pruner.py
+2
-2
nni/algorithms/compression/pytorch/pruning/simulated_annealing_pruner.py
...compression/pytorch/pruning/simulated_annealing_pruner.py
+3
-3
test/ut/sdk/test_pruners.py
test/ut/sdk/test_pruners.py
+30
-5
No files found.
nni/algorithms/compression/pytorch/pruning/admm_pruner.py
View file @
9d0b6fa6
...
@@ -4,6 +4,7 @@
...
@@ -4,6 +4,7 @@
import
logging
import
logging
import
torch
import
torch
from
schema
import
And
,
Optional
from
schema
import
And
,
Optional
import
copy
from
nni.compression.pytorch.utils.config_validation
import
CompressorSchema
from
nni.compression.pytorch.utils.config_validation
import
CompressorSchema
from
.constants
import
MASKER_DICT
from
.constants
import
MASKER_DICT
...
@@ -53,7 +54,7 @@ class ADMMPruner(OneshotPruner):
...
@@ -53,7 +54,7 @@ class ADMMPruner(OneshotPruner):
row : float
row : float
Penalty parameters for ADMM training.
Penalty parameters for ADMM training.
base_algo : str
base_algo : str
Base pruning algorithm. `level`, `l1` or `
l2
`, by default `l1`. Given the sparsity distribution among the ops,
Base pruning algorithm. `level`, `l1`
, `l2`
or `
fpgm
`, by default `l1`. Given the sparsity distribution among the ops,
the assigned `base_algo` is used to decide which filters/channels/weights to prune.
the assigned `base_algo` is used to decide which filters/channels/weights to prune.
"""
"""
...
@@ -87,7 +88,7 @@ class ADMMPruner(OneshotPruner):
...
@@ -87,7 +88,7 @@ class ADMMPruner(OneshotPruner):
Optional
(
'op_types'
):
[
str
],
Optional
(
'op_types'
):
[
str
],
Optional
(
'op_names'
):
[
str
],
Optional
(
'op_names'
):
[
str
],
}],
model
,
_logger
)
}],
model
,
_logger
)
elif
self
.
_base_algo
in
[
'l1'
,
'l2'
]:
elif
self
.
_base_algo
in
[
'l1'
,
'l2'
,
'fpgm'
]:
schema
=
CompressorSchema
([{
schema
=
CompressorSchema
([{
'sparsity'
:
And
(
float
,
lambda
n
:
0
<
n
<
1
),
'sparsity'
:
And
(
float
,
lambda
n
:
0
<
n
<
1
),
'op_types'
:
[
'Conv2d'
],
'op_types'
:
[
'Conv2d'
],
...
@@ -96,7 +97,7 @@ class ADMMPruner(OneshotPruner):
...
@@ -96,7 +97,7 @@ class ADMMPruner(OneshotPruner):
schema
.
validate
(
config_list
)
schema
.
validate
(
config_list
)
def
_projection
(
self
,
weight
,
sparsity
):
def
_projection
(
self
,
weight
,
sparsity
,
wrapper
):
'''
'''
Return the Euclidean projection of the weight matrix according to the pruning mode.
Return the Euclidean projection of the weight matrix according to the pruning mode.
...
@@ -106,31 +107,17 @@ class ADMMPruner(OneshotPruner):
...
@@ -106,31 +107,17 @@ class ADMMPruner(OneshotPruner):
original matrix
original matrix
sparsity : float
sparsity : float
the ratio of parameters which need to be set to zero
the ratio of parameters which need to be set to zero
wrapper: PrunerModuleWrapper
layer wrapper of this layer
Returns
Returns
-------
-------
tensor
tensor
the projected matrix
the projected matrix
'''
'''
w_abs
=
weight
.
abs
()
wrapper_copy
=
copy
.
deepcopy
(
wrapper
)
if
self
.
_base_algo
==
'level'
:
wrapper_copy
.
module
.
weight
.
data
=
weight
k
=
int
(
weight
.
numel
()
*
sparsity
)
return
weight
.
data
.
mul
(
self
.
masker
.
calc_mask
(
sparsity
,
wrapper_copy
)[
'weight_mask'
])
if
k
==
0
:
mask_weight
=
torch
.
ones
(
weight
.
shape
).
type_as
(
weight
)
else
:
threshold
=
torch
.
topk
(
w_abs
.
view
(
-
1
),
k
,
largest
=
False
)[
0
].
max
()
mask_weight
=
torch
.
gt
(
w_abs
,
threshold
).
type_as
(
weight
)
elif
self
.
_base_algo
in
[
'l1'
,
'l2'
]:
filters
=
weight
.
size
(
0
)
num_prune
=
int
(
filters
*
sparsity
)
if
filters
<
2
or
num_prune
<
1
:
mask_weight
=
torch
.
ones
(
weight
.
size
()).
type_as
(
weight
).
detach
()
else
:
w_abs_structured
=
w_abs
.
view
(
filters
,
-
1
).
sum
(
dim
=
1
)
threshold
=
torch
.
topk
(
w_abs_structured
.
view
(
-
1
),
num_prune
,
largest
=
False
)[
0
].
max
()
mask_weight
=
torch
.
gt
(
w_abs_structured
,
threshold
)[:,
None
,
None
,
None
].
expand_as
(
weight
).
type_as
(
weight
)
return
weight
.
data
.
mul
(
mask_weight
)
def
compress
(
self
):
def
compress
(
self
):
"""
"""
...
@@ -179,7 +166,7 @@ class ADMMPruner(OneshotPruner):
...
@@ -179,7 +166,7 @@ class ADMMPruner(OneshotPruner):
# U_i^{k+1} = U^k + W_i^{k+1} - Z_i^{k+1}
# U_i^{k+1} = U^k + W_i^{k+1} - Z_i^{k+1}
for
i
,
wrapper
in
enumerate
(
self
.
get_modules_wrapper
()):
for
i
,
wrapper
in
enumerate
(
self
.
get_modules_wrapper
()):
z
=
wrapper
.
module
.
weight
.
data
+
U
[
i
]
z
=
wrapper
.
module
.
weight
.
data
+
U
[
i
]
Z
[
i
]
=
self
.
_projection
(
z
,
wrapper
.
config
[
'sparsity'
])
Z
[
i
]
=
self
.
_projection
(
z
,
wrapper
.
config
[
'sparsity'
]
,
wrapper
)
U
[
i
]
=
U
[
i
]
+
wrapper
.
module
.
weight
.
data
-
Z
[
i
]
U
[
i
]
=
U
[
i
]
+
wrapper
.
module
.
weight
.
data
-
Z
[
i
]
# apply prune
# apply prune
...
...
nni/algorithms/compression/pytorch/pruning/auto_compress_pruner.py
View file @
9d0b6fa6
...
@@ -80,7 +80,7 @@ class AutoCompressPruner(Pruner):
...
@@ -80,7 +80,7 @@ class AutoCompressPruner(Pruner):
optimize_mode : str
optimize_mode : str
optimize mode, `maximize` or `minimize`, by default `maximize`.
optimize mode, `maximize` or `minimize`, by default `maximize`.
base_algo : str
base_algo : str
Base pruning algorithm. `level`, `l1` or `
l2
`, by default `l1`. Given the sparsity distribution among the ops,
Base pruning algorithm. `level`, `l1`
, `l2`
or `
fpgm
`, by default `l1`. Given the sparsity distribution among the ops,
the assigned `base_algo` is used to decide which filters/channels/weights to prune.
the assigned `base_algo` is used to decide which filters/channels/weights to prune.
start_temperature : float
start_temperature : float
Start temperature of the simulated annealing process.
Start temperature of the simulated annealing process.
...
@@ -151,7 +151,7 @@ class AutoCompressPruner(Pruner):
...
@@ -151,7 +151,7 @@ class AutoCompressPruner(Pruner):
Optional
(
'op_types'
):
[
str
],
Optional
(
'op_types'
):
[
str
],
Optional
(
'op_names'
):
[
str
],
Optional
(
'op_names'
):
[
str
],
}],
model
,
_logger
)
}],
model
,
_logger
)
elif
self
.
_base_algo
in
[
'l1'
,
'l2'
]:
elif
self
.
_base_algo
in
[
'l1'
,
'l2'
,
'fpgm'
]:
schema
=
CompressorSchema
([{
schema
=
CompressorSchema
([{
'sparsity'
:
And
(
float
,
lambda
n
:
0
<
n
<
1
),
'sparsity'
:
And
(
float
,
lambda
n
:
0
<
n
<
1
),
'op_types'
:
[
'Conv2d'
],
'op_types'
:
[
'Conv2d'
],
...
...
nni/algorithms/compression/pytorch/pruning/constants_pruner.py
View file @
9d0b6fa6
...
@@ -2,10 +2,11 @@
...
@@ -2,10 +2,11 @@
# Licensed under the MIT license.
# Licensed under the MIT license.
from
.one_shot
import
LevelPruner
,
L1FilterPruner
,
L2FilterPruner
from
.one_shot
import
LevelPruner
,
L1FilterPruner
,
L2FilterPruner
,
FPGMPruner
PRUNER_DICT
=
{
PRUNER_DICT
=
{
'level'
:
LevelPruner
,
'level'
:
LevelPruner
,
'l1'
:
L1FilterPruner
,
'l1'
:
L1FilterPruner
,
'l2'
:
L2FilterPruner
'l2'
:
L2FilterPruner
,
'fpgm'
:
FPGMPruner
}
}
nni/algorithms/compression/pytorch/pruning/net_adapt_pruner.py
View file @
9d0b6fa6
...
@@ -73,7 +73,7 @@ class NetAdaptPruner(Pruner):
...
@@ -73,7 +73,7 @@ class NetAdaptPruner(Pruner):
optimize_mode : str
optimize_mode : str
optimize mode, `maximize` or `minimize`, by default `maximize`.
optimize mode, `maximize` or `minimize`, by default `maximize`.
base_algo : str
base_algo : str
Base pruning algorithm. `level`, `l1` or `
l2
`, by default `l1`. Given the sparsity distribution among the ops,
Base pruning algorithm. `level`, `l1`
, `l2`
or `
fpgm
`, by default `l1`. Given the sparsity distribution among the ops,
the assigned `base_algo` is used to decide which filters/channels/weights to prune.
the assigned `base_algo` is used to decide which filters/channels/weights to prune.
sparsity_per_iteration : float
sparsity_per_iteration : float
sparsity to prune in each iteration.
sparsity to prune in each iteration.
...
@@ -125,7 +125,7 @@ class NetAdaptPruner(Pruner):
...
@@ -125,7 +125,7 @@ class NetAdaptPruner(Pruner):
Optional
(
'op_types'
):
[
str
],
Optional
(
'op_types'
):
[
str
],
Optional
(
'op_names'
):
[
str
],
Optional
(
'op_names'
):
[
str
],
}],
model
,
_logger
)
}],
model
,
_logger
)
elif
self
.
_base_algo
in
[
'l1'
,
'l2'
]:
elif
self
.
_base_algo
in
[
'l1'
,
'l2'
,
'fpgm'
]:
schema
=
CompressorSchema
([{
schema
=
CompressorSchema
([{
'sparsity'
:
And
(
float
,
lambda
n
:
0
<
n
<
1
),
'sparsity'
:
And
(
float
,
lambda
n
:
0
<
n
<
1
),
'op_types'
:
[
'Conv2d'
],
'op_types'
:
[
'Conv2d'
],
...
@@ -149,7 +149,7 @@ class NetAdaptPruner(Pruner):
...
@@ -149,7 +149,7 @@ class NetAdaptPruner(Pruner):
return
config_list_updated
return
config_list_updated
# if op_name is not in self._config_list_generated, create a new json item
# if op_name is not in self._config_list_generated, create a new json item
if
self
.
_base_algo
in
[
'l1'
,
'l2'
]:
if
self
.
_base_algo
in
[
'l1'
,
'l2'
,
'fpgm'
]:
config_list_updated
.
append
(
config_list_updated
.
append
(
{
'sparsity'
:
sparsity
,
'op_types'
:
[
'Conv2d'
],
'op_names'
:
[
op_name
]})
{
'sparsity'
:
sparsity
,
'op_types'
:
[
'Conv2d'
],
'op_names'
:
[
op_name
]})
elif
self
.
_base_algo
==
'level'
:
elif
self
.
_base_algo
==
'level'
:
...
...
nni/algorithms/compression/pytorch/pruning/sensitivity_pruner.py
View file @
9d0b6fa6
...
@@ -68,7 +68,7 @@ class SensitivityPruner(Pruner):
...
@@ -68,7 +68,7 @@ class SensitivityPruner(Pruner):
>>> loss.backward()
>>> loss.backward()
>>> optimizer.step()
>>> optimizer.step()
base_algo: str
base_algo: str
base pruning algorithm. `level`, `l1` or `
l2
`, by default `l1`.
base pruning algorithm. `level`, `l1`
, `l2`
or `
fpgm
`, by default `l1`.
sparsity_proportion_calc: function
sparsity_proportion_calc: function
This function generate the sparsity proportion between the conv layers according to the
This function generate the sparsity proportion between the conv layers according to the
sensitivity analysis results. We provide a default function to quantify the sparsity
sensitivity analysis results. We provide a default function to quantify the sparsity
...
@@ -150,7 +150,7 @@ class SensitivityPruner(Pruner):
...
@@ -150,7 +150,7 @@ class SensitivityPruner(Pruner):
Optional
(
'op_types'
):
[
str
],
Optional
(
'op_types'
):
[
str
],
Optional
(
'op_names'
):
[
str
],
Optional
(
'op_names'
):
[
str
],
}],
model
,
_logger
)
}],
model
,
_logger
)
elif
self
.
base_algo
in
[
'l1'
,
'l2'
]:
elif
self
.
base_algo
in
[
'l1'
,
'l2'
,
'fpgm'
]:
schema
=
CompressorSchema
([{
schema
=
CompressorSchema
([{
'sparsity'
:
And
(
float
,
lambda
n
:
0
<
n
<
1
),
'sparsity'
:
And
(
float
,
lambda
n
:
0
<
n
<
1
),
'op_types'
:
[
'Conv2d'
],
'op_types'
:
[
'Conv2d'
],
...
...
nni/algorithms/compression/pytorch/pruning/simulated_annealing_pruner.py
View file @
9d0b6fa6
...
@@ -54,7 +54,7 @@ class SimulatedAnnealingPruner(Pruner):
...
@@ -54,7 +54,7 @@ class SimulatedAnnealingPruner(Pruner):
optimize_mode : str
optimize_mode : str
Optimize mode, `maximize` or `minimize`, by default `maximize`.
Optimize mode, `maximize` or `minimize`, by default `maximize`.
base_algo : str
base_algo : str
Base pruning algorithm. `level`, `l1` or `
l2
`, by default `l1`. Given the sparsity distribution among the ops,
Base pruning algorithm. `level`, `l1`
, `l2`
or `
fpgm
`, by default `l1`. Given the sparsity distribution among the ops,
the assigned `base_algo` is used to decide which filters/channels/weights to prune.
the assigned `base_algo` is used to decide which filters/channels/weights to prune.
start_temperature : float
start_temperature : float
Start temperature of the simulated annealing process.
Start temperature of the simulated annealing process.
...
@@ -120,7 +120,7 @@ class SimulatedAnnealingPruner(Pruner):
...
@@ -120,7 +120,7 @@ class SimulatedAnnealingPruner(Pruner):
Optional
(
'op_types'
):
[
str
],
Optional
(
'op_types'
):
[
str
],
Optional
(
'op_names'
):
[
str
],
Optional
(
'op_names'
):
[
str
],
}],
model
,
_logger
)
}],
model
,
_logger
)
elif
self
.
_base_algo
in
[
'l1'
,
'l2'
]:
elif
self
.
_base_algo
in
[
'l1'
,
'l2'
,
'fpgm'
]:
schema
=
CompressorSchema
([{
schema
=
CompressorSchema
([{
'sparsity'
:
And
(
float
,
lambda
n
:
0
<
n
<
1
),
'sparsity'
:
And
(
float
,
lambda
n
:
0
<
n
<
1
),
'op_types'
:
[
'Conv2d'
],
'op_types'
:
[
'Conv2d'
],
...
@@ -152,7 +152,7 @@ class SimulatedAnnealingPruner(Pruner):
...
@@ -152,7 +152,7 @@ class SimulatedAnnealingPruner(Pruner):
# a layer with more weights will have no less pruning rate
# a layer with more weights will have no less pruning rate
for
idx
,
wrapper
in
enumerate
(
self
.
get_modules_wrapper
()):
for
idx
,
wrapper
in
enumerate
(
self
.
get_modules_wrapper
()):
# L1Filter Pruner requires to specify op_types
# L1Filter Pruner requires to specify op_types
if
self
.
_base_algo
in
[
'l1'
,
'l2'
]:
if
self
.
_base_algo
in
[
'l1'
,
'l2'
,
'fpgm'
]:
config_list
.
append
(
config_list
.
append
(
{
'sparsity'
:
sparsities
[
idx
],
'op_types'
:
[
'Conv2d'
],
'op_names'
:
[
wrapper
.
name
]})
{
'sparsity'
:
sparsities
[
idx
],
'op_types'
:
[
'Conv2d'
],
'op_names'
:
[
wrapper
.
name
]})
elif
self
.
_base_algo
==
'level'
:
elif
self
.
_base_algo
==
'level'
:
...
...
test/ut/sdk/test_pruners.py
View file @
9d0b6fa6
...
@@ -151,12 +151,37 @@ prune_config = {
...
@@ -151,12 +151,37 @@ prune_config = {
lambda
model
:
validate_sparsity
(
model
.
conv1
,
0.5
,
model
.
bias
)
lambda
model
:
validate_sparsity
(
model
.
conv1
,
0.5
,
model
.
bias
)
]
]
},
},
'autocompress'
:
{
'autocompress
_l1
'
:
{
'pruner_class'
:
AutoCompressPruner
,
'pruner_class'
:
AutoCompressPruner
,
'config_list'
:
[{
'config_list'
:
[{
'sparsity'
:
0.5
,
'sparsity'
:
0.5
,
'op_types'
:
[
'Conv2d'
],
'op_types'
:
[
'Conv2d'
],
}],
}],
'base_algo'
:
'l1'
,
'trainer'
:
lambda
model
,
optimizer
,
criterion
,
epoch
,
callback
:
model
,
'evaluator'
:
lambda
model
:
0.9
,
'dummy_input'
:
torch
.
randn
([
64
,
1
,
28
,
28
]),
'validators'
:
[]
},
'autocompress_l2'
:
{
'pruner_class'
:
AutoCompressPruner
,
'config_list'
:
[{
'sparsity'
:
0.5
,
'op_types'
:
[
'Conv2d'
],
}],
'base_algo'
:
'l2'
,
'trainer'
:
lambda
model
,
optimizer
,
criterion
,
epoch
,
callback
:
model
,
'evaluator'
:
lambda
model
:
0.9
,
'dummy_input'
:
torch
.
randn
([
64
,
1
,
28
,
28
]),
'validators'
:
[]
},
'autocompress_fpgm'
:
{
'pruner_class'
:
AutoCompressPruner
,
'config_list'
:
[{
'sparsity'
:
0.5
,
'op_types'
:
[
'Conv2d'
],
}],
'base_algo'
:
'fpgm'
,
'trainer'
:
lambda
model
,
optimizer
,
criterion
,
epoch
,
callback
:
model
,
'trainer'
:
lambda
model
,
optimizer
,
criterion
,
epoch
,
callback
:
model
,
'evaluator'
:
lambda
model
:
0.9
,
'evaluator'
:
lambda
model
:
0.9
,
'dummy_input'
:
torch
.
randn
([
64
,
1
,
28
,
28
]),
'dummy_input'
:
torch
.
randn
([
64
,
1
,
28
,
28
]),
...
@@ -181,7 +206,7 @@ class Model(nn.Module):
...
@@ -181,7 +206,7 @@ class Model(nn.Module):
def
forward
(
self
,
x
):
def
forward
(
self
,
x
):
return
self
.
fc
(
self
.
pool
(
self
.
bn1
(
self
.
conv1
(
x
))).
view
(
x
.
size
(
0
),
-
1
))
return
self
.
fc
(
self
.
pool
(
self
.
bn1
(
self
.
conv1
(
x
))).
view
(
x
.
size
(
0
),
-
1
))
def
pruners_test
(
pruner_names
=
[
'level'
,
'agp'
,
'slim'
,
'fpgm'
,
'l1'
,
'l2'
,
'taylorfo'
,
'mean_activation'
,
'apoz'
,
'netadapt'
,
'simulatedannealing'
,
'admm'
,
'autocompress
'
],
bias
=
True
):
def
pruners_test
(
pruner_names
=
[
'level'
,
'agp'
,
'slim'
,
'fpgm'
,
'l1'
,
'l2'
,
'taylorfo'
,
'mean_activation'
,
'apoz'
,
'netadapt'
,
'simulatedannealing'
,
'admm'
,
'autocompress
_l1'
,
'autocompress_l2'
,
'autocompress_fpgm'
,
],
bias
=
True
):
for
pruner_name
in
pruner_names
:
for
pruner_name
in
pruner_names
:
print
(
'testing {}...'
.
format
(
pruner_name
))
print
(
'testing {}...'
.
format
(
pruner_name
))
device
=
torch
.
device
(
"cuda"
if
torch
.
cuda
.
is_available
()
else
"cpu"
)
device
=
torch
.
device
(
"cuda"
if
torch
.
cuda
.
is_available
()
else
"cpu"
)
...
@@ -203,8 +228,8 @@ def pruners_test(pruner_names=['level', 'agp', 'slim', 'fpgm', 'l1', 'l2', 'tayl
...
@@ -203,8 +228,8 @@ def pruners_test(pruner_names=['level', 'agp', 'slim', 'fpgm', 'l1', 'l2', 'tayl
pruner
=
prune_config
[
pruner_name
][
'pruner_class'
](
model
,
config_list
,
evaluator
=
prune_config
[
pruner_name
][
'evaluator'
])
pruner
=
prune_config
[
pruner_name
][
'pruner_class'
](
model
,
config_list
,
evaluator
=
prune_config
[
pruner_name
][
'evaluator'
])
elif
pruner_name
==
'admm'
:
elif
pruner_name
==
'admm'
:
pruner
=
prune_config
[
pruner_name
][
'pruner_class'
](
model
,
config_list
,
trainer
=
prune_config
[
pruner_name
][
'trainer'
])
pruner
=
prune_config
[
pruner_name
][
'pruner_class'
](
model
,
config_list
,
trainer
=
prune_config
[
pruner_name
][
'trainer'
])
elif
pruner_name
==
'autocompress'
:
elif
pruner_name
.
startswith
(
'autocompress'
)
:
pruner
=
prune_config
[
pruner_name
][
'pruner_class'
](
model
,
config_list
,
trainer
=
prune_config
[
pruner_name
][
'trainer'
],
evaluator
=
prune_config
[
pruner_name
][
'evaluator'
],
dummy_input
=
x
)
pruner
=
prune_config
[
pruner_name
][
'pruner_class'
](
model
,
config_list
,
trainer
=
prune_config
[
pruner_name
][
'trainer'
],
evaluator
=
prune_config
[
pruner_name
][
'evaluator'
],
dummy_input
=
x
,
base_algo
=
prune_config
[
pruner_name
][
'base_algo'
]
)
else
:
else
:
pruner
=
prune_config
[
pruner_name
][
'pruner_class'
](
model
,
config_list
,
optimizer
)
pruner
=
prune_config
[
pruner_name
][
'pruner_class'
](
model
,
config_list
,
optimizer
)
pruner
.
compress
()
pruner
.
compress
()
...
@@ -272,7 +297,7 @@ class PrunerTestCase(TestCase):
...
@@ -272,7 +297,7 @@ class PrunerTestCase(TestCase):
pruners_test
(
bias
=
False
)
pruners_test
(
bias
=
False
)
def
test_agp_pruner
(
self
):
def
test_agp_pruner
(
self
):
for
pruning_algorithm
in
[
'l1'
,
'l2'
,
'taylorfo'
,
'apoz'
]:
for
pruning_algorithm
in
[
'l1'
,
'l2'
,
'fpgm'
,
'taylorfo'
,
'apoz'
]:
_test_agp
(
pruning_algorithm
)
_test_agp
(
pruning_algorithm
)
for
pruning_algorithm
in
[
'level'
]:
for
pruning_algorithm
in
[
'level'
]:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment