Unverified Commit 30df69bf authored by Ningxin Zheng's avatar Ningxin Zheng Committed by GitHub
Browse files

Fix integration test on speedup examples (#3945)

parent a7278d2d
...@@ -37,7 +37,7 @@ Usage ...@@ -37,7 +37,7 @@ Usage
out = model(dummy_input) out = model(dummy_input)
print('elapsed time: ', time.time() - start) print('elapsed time: ', time.time() - start)
For complete examples please refer to :githublink:`the code <examples/model_compress/pruning/model_speedup.py>` For complete examples please refer to :githublink:`the code <examples/model_compress/pruning/speedup/model_speedup.py>`
NOTE: The current implementation supports PyTorch 1.3.1 or newer. NOTE: The current implementation supports PyTorch 1.3.1 or newer.
...@@ -51,7 +51,7 @@ For PyTorch we can only replace modules, if functions in ``forward`` should be r ...@@ -51,7 +51,7 @@ For PyTorch we can only replace modules, if functions in ``forward`` should be r
Speedup Results of Examples Speedup Results of Examples
--------------------------- ---------------------------
The code of these experiments can be found :githublink:`here <examples/model_compress/pruning/model_speedup.py>`. The code of these experiments can be found :githublink:`here <examples/model_compress/pruning/speedup/model_speedup.py>`.
slim pruner example slim pruner example
^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^
......
...@@ -7,7 +7,7 @@ import torch.nn.functional as F ...@@ -7,7 +7,7 @@ import torch.nn.functional as F
from torchvision import datasets, transforms from torchvision import datasets, transforms
import sys import sys
sys.path.append('../../models') sys.path.append('../models')
from cifar10.vgg import VGG from cifar10.vgg import VGG
from mnist.lenet import LeNet from mnist.lenet import LeNet
...@@ -45,7 +45,7 @@ def model_inference(config): ...@@ -45,7 +45,7 @@ def model_inference(config):
masks_file = config['masks_file'] masks_file = config['masks_file']
device = torch.device( device = torch.device(
'cuda') if torch.cuda.is_available() else torch.device('cpu') 'cuda') if torch.cuda.is_available() else torch.device('cpu')
# device = torch.device(config['device']) # device = torch.device(config['device'])
if config['model_name'] == 'vgg16': if config['model_name'] == 'vgg16':
model = VGG(depth=16) model = VGG(depth=16)
......
...@@ -8,19 +8,19 @@ cd ${CWD}/../examples/model_compress/pruning ...@@ -8,19 +8,19 @@ cd ${CWD}/../examples/model_compress/pruning
echo "testing fpgm pruning and speedup..." echo "testing fpgm pruning and speedup..."
python3 basic_pruners_torch.py --pruner fpgm --pretrain-epochs 1 --fine-tune-epochs 1 --model vgg16 --dataset cifar10 python3 basic_pruners_torch.py --pruner fpgm --pretrain-epochs 1 --fine-tune-epochs 1 --model vgg16 --dataset cifar10
python3 model_speedup.py --example_name fpgm python3 speedup/model_speedup.py --example_name fpgm
echo "testing slim pruning and speedup..." echo "testing slim pruning and speedup..."
python3 basic_pruners_torch.py --pruner slim --pretrain-epochs 1 --fine-tune-epochs 1 --model vgg19 --dataset cifar10 --sparsity 0.7 python3 basic_pruners_torch.py --pruner slim --pretrain-epochs 1 --fine-tune-epochs 1 --model vgg19 --dataset cifar10 --sparsity 0.7
python3 model_speedup.py --example_name slim python3 speedup/model_speedup.py --example_name slim
echo "testing l1filter pruning and speedup..." echo "testing l1filter pruning and speedup..."
python3 basic_pruners_torch.py --pruner l1filter --pretrain-epochs 1 --fine-tune-epochs 1 --model vgg16 --dataset cifar10 --pretrained-model-dir experiment_data/pretrain_cifar10_vgg16.pth python3 basic_pruners_torch.py --pruner l1filter --pretrain-epochs 1 --fine-tune-epochs 1 --model vgg16 --dataset cifar10 --pretrained-model-dir experiment_data/pretrain_cifar10_vgg16.pth
python3 model_speedup.py --example_name l1filter python3 speedup/model_speedup.py --example_name l1filter
echo "testing apoz pruning and speedup..." echo "testing apoz pruning and speedup..."
python3 basic_pruners_torch.py --pruner apoz --pretrain-epochs 1 --fine-tune-epochs 1 --model vgg16 --dataset cifar10 --pretrained-model-dir experiment_data/pretrain_cifar10_vgg16.pth python3 basic_pruners_torch.py --pruner apoz --pretrain-epochs 1 --fine-tune-epochs 1 --model vgg16 --dataset cifar10 --pretrained-model-dir experiment_data/pretrain_cifar10_vgg16.pth
python3 model_speedup.py --example_name apoz python3 speedup/model_speedup.py --example_name apoz
echo 'testing level pruner pruning' echo 'testing level pruner pruning'
python3 basic_pruners_torch.py --pruner level --pretrain-epochs 1 --fine-tune-epochs 1 --model lenet --dataset mnist python3 basic_pruners_torch.py --pruner level --pretrain-epochs 1 --fine-tune-epochs 1 --model lenet --dataset mnist
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment