Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
nni
Commits
bb3d2986
Unverified
Commit
bb3d2986
authored
Feb 09, 2021
by
J-shang
Committed by
GitHub
Feb 09, 2021
Browse files
Fix pipeline (#3366)
parent
2cdba296
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
52 additions
and
41 deletions
+52
-41
examples/model_compress/pruning/basic_pruners_torch.py
examples/model_compress/pruning/basic_pruners_torch.py
+26
-18
examples/model_compress/pruning/model_speedup.py
examples/model_compress/pruning/model_speedup.py
+3
-3
pipelines/full-test-linux.yml
pipelines/full-test-linux.yml
+2
-2
pipelines/full-test-windows.yml
pipelines/full-test-windows.yml
+1
-1
test/scripts/model_compression.sh
test/scripts/model_compression.sh
+20
-17
No files found.
examples/model_compress/pruning/basic_pruners_torch.py
View file @
bb3d2986
...
...
@@ -13,7 +13,6 @@ import logging
import
argparse
import
os
import
time
import
argparse
import
torch
import
torch.nn
as
nn
import
torch.nn.functional
as
F
...
...
@@ -35,6 +34,7 @@ from nni.algorithms.compression.pytorch.pruning import (
L1FilterPruner
,
L2FilterPruner
,
AGPPruner
,
ActivationMeanRankFilterPruner
,
ActivationAPoZRankFilterPruner
)
...
...
@@ -49,6 +49,7 @@ str2pruner = {
'slim'
:
SlimPruner
,
'agp'
:
AGPPruner
,
'fpgm'
:
FPGMPruner
,
'mean_activation'
:
ActivationMeanRankFilterPruner
,
'apoz'
:
ActivationAPoZRankFilterPruner
}
...
...
@@ -68,7 +69,7 @@ def get_pruner(model, pruner_name, device, optimizer=None, dependency_aware=Fals
'sparsity'
:
args
.
sparsity
,
'op_types'
:
[
'default'
]
}]
elif
pruner_name
==
'l1filter'
:
elif
pruner_name
in
[
'l1filter'
,
'mean_activation'
,
'apoz'
]
:
# Reproduced result in paper 'PRUNING FILTERS FOR EFFICIENT CONVNETS',
# Conv_1, Conv_8, Conv_9, Conv_10, Conv_11, Conv_12 are pruned with 50% sparsity, as 'VGG-16-pruned-A'
config_list
=
[{
...
...
@@ -81,6 +82,15 @@ def get_pruner(model, pruner_name, device, optimizer=None, dependency_aware=Fals
'sparsity'
:
args
.
sparsity
,
'op_types'
:
[
'BatchNorm2d'
],
}]
elif
pruner_name
==
'agp'
:
config_list
=
[{
'initial_sparsity'
:
0.
,
'final_sparsity'
:
0.8
,
'start_epoch'
:
0
,
'end_epoch'
:
10
,
'frequency'
:
1
,
'op_types'
:
[
'Conv2d'
]
}]
else
:
config_list
=
[{
'sparsity'
:
args
.
sparsity
,
...
...
@@ -150,13 +160,13 @@ def get_model_optimizer_scheduler(args, device, train_loader, test_loader, crite
if
args
.
pretrained_model_dir
is
None
:
optimizer
=
torch
.
optim
.
SGD
(
model
.
parameters
(),
lr
=
0.1
,
momentum
=
0.9
,
weight_decay
=
5e-4
)
scheduler
=
MultiStepLR
(
optimizer
,
milestones
=
[
int
(
args
.
pretrain_epochs
*
0.5
),
int
(
args
.
pretrain_epochs
*
0.75
)],
gamma
=
0.1
)
optimizer
,
milestones
=
[
int
(
args
.
pretrain_epochs
*
0.5
),
int
(
args
.
pretrain_epochs
*
0.75
)],
gamma
=
0.1
)
elif
args
.
model
==
'vgg19'
:
model
=
VGG
(
depth
=
19
).
to
(
device
)
if
args
.
pretrained_model_dir
is
None
:
optimizer
=
torch
.
optim
.
SGD
(
model
.
parameters
(),
lr
=
0.1
,
momentum
=
0.9
,
weight_decay
=
5e-4
)
scheduler
=
MultiStepLR
(
optimizer
,
milestones
=
[
int
(
args
.
pretrain_epochs
*
0.5
),
int
(
args
.
pretrain_epochs
*
0.75
)],
gamma
=
0.1
)
optimizer
,
milestones
=
[
int
(
args
.
pretrain_epochs
*
0.5
),
int
(
args
.
pretrain_epochs
*
0.75
)],
gamma
=
0.1
)
else
:
raise
ValueError
(
"model not recognized"
)
...
...
@@ -183,9 +193,8 @@ def get_model_optimizer_scheduler(args, device, train_loader, test_loader, crite
# setup new opotimizer for fine-tuning
optimizer
=
torch
.
optim
.
SGD
(
model
.
parameters
(),
lr
=
0.01
,
momentum
=
0.9
,
weight_decay
=
5e-4
)
scheduler
=
MultiStepLR
(
optimizer
,
milestones
=
[
int
(
args
.
pretrain_epochs
*
0.5
),
int
(
args
.
pretrain_epochs
*
0.75
)],
gamma
=
0.1
)
scheduler
=
MultiStepLR
(
optimizer
,
milestones
=
[
int
(
args
.
pretrain_epochs
*
0.5
),
int
(
args
.
pretrain_epochs
*
0.75
)],
gamma
=
0.1
)
print
(
'Pretrained model acc:'
,
best_acc
)
return
model
,
optimizer
,
scheduler
...
...
@@ -253,7 +262,6 @@ def main(args):
mask_path
=
os
.
path
.
join
(
args
.
experiment_data_dir
,
'mask_{}_{}_{}.pth'
.
format
(
args
.
model
,
args
.
dataset
,
args
.
pruner
))
pruner
=
get_pruner
(
model
,
args
.
pruner
,
device
,
optimizer
,
args
.
dependency_aware
)
model
=
pruner
.
compress
()
...
...
@@ -284,7 +292,7 @@ def main(args):
args
.
pretrained_model_dir
=
model_path
model
,
_
,
_
=
get_model_optimizer_scheduler
(
args
,
device
,
train_loader
,
test_loader
,
criterion
)
model
.
eval
()
apply_compression_results
(
model
,
mask_path
,
device
)
# test model speed
...
...
@@ -316,7 +324,7 @@ if __name__ == '__main__':
parser
.
add_argument
(
'--data-dir'
,
type
=
str
,
default
=
'./data/'
,
help
=
'dataset directory'
)
parser
.
add_argument
(
'--model'
,
type
=
str
,
default
=
'vgg16'
,
choices
=
[
'
LeN
et'
,
'vgg16'
,
'vgg19'
,
'resnet18'
],
choices
=
[
'
len
et'
,
'vgg16'
,
'vgg19'
,
'resnet18'
],
help
=
'model to use'
)
parser
.
add_argument
(
'--pretrained-model-dir'
,
type
=
str
,
default
=
None
,
help
=
'path to pretrained model'
)
...
...
@@ -344,27 +352,27 @@ if __name__ == '__main__':
help
=
'toggle dependency aware mode'
)
parser
.
add_argument
(
'--pruner'
,
type
=
str
,
default
=
'l1filter'
,
choices
=
[
'level'
,
'l1filter'
,
'l2filter'
,
'slim'
,
'agp'
,
'fpgm
'
,
'apoz'
],
'fpgm'
,
'mean_activation
'
,
'apoz'
],
help
=
'pruner to use'
)
# fine-tuning
parser
.
add_argument
(
'--fine-tune-epochs'
,
type
=
int
,
default
=
160
,
help
=
'epochs to fine tune'
)
# speed-up
parser
.
add_argument
(
'--speed-up'
,
action
=
'store_true'
,
default
=
False
,
help
=
'whether to speed-up the pruned model'
)
parser
.
add_argument
(
'--nni'
,
action
=
'store_true'
,
default
=
False
,
parser
.
add_argument
(
'--nni'
,
action
=
'store_true'
,
default
=
False
,
help
=
"whether to tune the pruners using NNi tuners"
)
args
=
parser
.
parse_args
()
if
args
.
nni
:
params
=
nni
.
get_next_parameter
()
print
(
params
)
args
.
sparsity
=
params
[
'sparsity'
]
args
.
pruner
=
params
[
'pruner'
]
args
.
model
=
params
[
'pruner'
]
params
=
nni
.
get_next_parameter
()
print
(
params
)
args
.
sparsity
=
params
[
'sparsity'
]
args
.
pruner
=
params
[
'pruner'
]
args
.
model
=
params
[
'pruner'
]
main
(
args
)
examples/model_compress/pruning/model_speedup.py
View file @
bb3d2986
...
...
@@ -16,9 +16,9 @@ compare_results = True
config
=
{
'apoz'
:
{
'model_name'
:
'
lenet
'
,
'input_shape'
:
[
64
,
1
,
28
,
28
],
'masks_file'
:
'./experiment_data/mask_
lenet_mnist
_apoz.pth'
'model_name'
:
'
vgg16
'
,
'input_shape'
:
[
64
,
3
,
32
,
32
],
'masks_file'
:
'./experiment_data/mask_
vgg16_cifar10
_apoz.pth'
},
'l1filter'
:
{
'model_name'
:
'vgg16'
,
...
...
pipelines/full-test-linux.yml
View file @
bb3d2986
...
...
@@ -29,8 +29,8 @@ jobs:
-
script
:
|
set -e
python3 -m pip install scikit-learn==0.23.2
python3 -m pip install torchvision==0.
4.2
python3 -m pip install torch==1.
3
.1
python3 -m pip install torchvision==0.
6.1
python3 -m pip install torch==1.
5
.1
python3 -m pip install keras==2.1.6
python3 -m pip install tensorflow==2.3.1 tensorflow-estimator==2.3.0
python3 -m pip install thop
...
...
pipelines/full-test-windows.yml
View file @
bb3d2986
...
...
@@ -27,7 +27,7 @@ jobs:
-
script
:
|
python -m pip install scikit-learn==0.23.2
python -m pip install keras==2.1.6
python -m pip install torchvision===0.
4
.1 torch===1.
3
.1 -f https://download.pytorch.org/whl/torch_stable.html
python -m pip install torchvision===0.
6
.1 torch===1.
5
.1 -f https://download.pytorch.org/whl/torch_stable.html
python -m pip install tensorflow==2.3.1 tensorflow-estimator==2.3.0
displayName
:
Install extra dependencies
...
...
test/scripts/model_compression.sh
View file @
bb3d2986
...
...
@@ -6,30 +6,33 @@ echo ""
echo
"===========================Testing: pruning and speedup==========================="
cd
${
CWD
}
/../examples/model_compress
for
name
in
fpgm slim l1filter apoz
do
echo
"testing
$name
pruning and speedup..."
python3 model_prune_torch.py
--pruner_name
$name
--pretrain_epochs
1
--prune_epochs
1
python3 model_speedup.py
--example_name
$name
done
for
name
in
level mean_activation
do
echo
"testing
$name
pruning..."
python3 model_prune_torch.py
--pruner_name
$name
--pretrain_epochs
1
--prune_epochs
1
done
echo
"testing fpgm pruning and speedup..."
python3 pruning/basic_pruners_torch.py
--pruner
fpgm
--pretrain-epochs
1
--fine-tune-epochs
1
--model
vgg16
--dataset
cifar10
python3 pruning/model_speedup.py
--example_name
fpgm
echo
"testing slim pruning and speedup..."
python3 pruning/basic_pruners_torch.py
--pruner
slim
--pretrain-epochs
1
--fine-tune-epochs
1
--model
vgg19
--dataset
cifar10
--sparsity
0.7
python3 pruning/model_speedup.py
--example_name
slim
echo
"testing l1filter pruning and speedup..."
python3 pruning/basic_pruners_torch.py
--pruner
l1filter
--pretrain-epochs
1
--fine-tune-epochs
1
--model
vgg16
--dataset
cifar10
python3 pruning/model_speedup.py
--example_name
l1filter
echo
"testing apoz pruning and speedup..."
python3 pruning/basic_pruners_torch.py
--pruner
apoz
--pretrain-epochs
1
--fine-tune-epochs
1
--model
vgg16
--dataset
cifar10
python3 pruning/model_speedup.py
--example_name
apoz
echo
'testing level pruner pruning'
python3
model
_prune_torch.py
--pruner
_name
level
--pretrain
_
epochs
1
--
pr
une
_
epochs
1
python3
pruning/basic
_prune
rs
_torch.py
--pruner
level
--pretrain
-
epochs
1
--
fine-t
une
-
epochs
1
--model
lenet
--dataset
mnist
echo
'testing agp pruning'
python3
model
_prune_torch.py
--pruner
_name
agp
--pretrain
_
epochs
1
--
pr
une
_
epochs
2
python3
pruning/basic
_prune
rs
_torch.py
--pruner
agp
--pretrain
-
epochs
1
--
fine-t
une
-
epochs
1
--model
lenet
--dataset
mnist
echo
'testing mean_activation pruning'
python3
model
_prune_torch.py
--pruner
_name
mean_activation
--pretrain
_
epochs
1
--
pr
une
_
epochs
1
python3
pruning/basic
_prune
rs
_torch.py
--pruner
mean_activation
--pretrain
-
epochs
1
--
fine-t
une
-
epochs
1
--model
vgg16
--dataset
cifar10
echo
"testing lottery ticket pruning..."
python3 lottery_torch_mnist_fc.py
--train_epochs
1
python3
pruning/
lottery_torch_mnist_fc.py
--train_epochs
1
echo
""
echo
"===========================Testing: quantizers==========================="
...
...
@@ -43,4 +46,4 @@ echo "===========================Testing: quantizers==========================="
#echo "testing BNN quantizer..."
#python3 BNN_quantizer_cifar10.py
rm
-rf
./
checkpoints
/
*
rm
-rf
./
experiment_data
/
*
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment