Unverified Commit a911b856 authored by Yuge Zhang's avatar Yuge Zhang Committed by GitHub
Browse files

Resolve conflicts for #4760 (#4762)

parent 14d2966b
"""
Fix a troublsome translation in sphinx.
Related PR: https://github.com/sphinx-doc/sphinx/pull/10303
"""
import subprocess
from pathlib import Path
import sphinx
sphinx_path = Path(sphinx.__path__[0]) / 'locale/zh_CN/LC_MESSAGES'
po_content = (sphinx_path / 'sphinx.po').read_text()
po_content = po_content.replace('%s的别名', '%s 的别名')
(sphinx_path / 'sphinx.po').write_text(po_content)
# build po -> mo
subprocess.run(['msgfmt', '-c', str(sphinx_path / 'sphinx.po'), '-o', str(sphinx_path / 'sphinx.mo')], check=True)
......@@ -15,7 +15,7 @@ import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
from torchvision import datasets, transforms
from nni.compression.pytorch.utils.counter import count_flops_params
from nni.compression.pytorch.utils import count_flops_params
from nni.compression.pytorch import ModelSpeedup
from nni.algorithms.compression.pytorch.pruning import L1FilterPruner
......@@ -277,7 +277,7 @@ if __name__ == '__main__':
parser.add_argument('--sparsity', type=float, default=0.5,
help='target overall target sparsity')
parser.add_argument('--dependency-aware', action='store_true', default=False,
help='toggle dependency aware mode')
help='toggle dependency-aware mode')
# finetuning
parser.add_argument('--finetune-epochs', type=int, default=5,
......@@ -290,8 +290,8 @@ if __name__ == '__main__':
# help='learning rate to finetune the model')
# speedup
# parser.add_argument('--speed-up', action='store_true', default=False,
# help='whether to speed-up the pruned model')
# parser.add_argument('--speedup', action='store_true', default=False,
# help='whether to speedup the pruned model')
# parser.add_argument('--nni', action='store_true', default=False,
# help="whether to tune the pruners using NNi tuners")
......
......@@ -4,7 +4,7 @@
'''
NNI example for supported ActivationAPoZRank and ActivationMeanRank pruning algorithms.
In this example, we show the end-to-end pruning process: pre-training -> pruning -> fine-tuning.
Note that pruners use masks to simulate the real pruning. In order to obtain a real compressed model, model speed up is required.
Note that pruners use masks to simulate the real pruning. In order to obtain a real compressed model, model speedup is required.
'''
import argparse
......@@ -16,11 +16,11 @@ from torch.optim.lr_scheduler import MultiStepLR
import nni
from nni.compression.pytorch import ModelSpeedup
from nni.compression.pytorch.utils.counter import count_flops_params
from nni.algorithms.compression.v2.pytorch.pruning.basic_pruner import ActivationAPoZRankPruner, ActivationMeanRankPruner
from nni.compression.pytorch.utils import count_flops_params
from nni.compression.pytorch.pruning import ActivationAPoZRankPruner, ActivationMeanRankPruner
from pathlib import Path
sys.path.append(str(Path(__file__).absolute().parents[2] / 'models'))
sys.path.append(str(Path(__file__).absolute().parents[1] / 'models'))
from cifar10.vgg import VGG
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
......
......@@ -4,7 +4,7 @@
'''
NNI example for supported ADMM pruning algorithms.
In this example, we show the end-to-end pruning process: pre-training -> pruning -> fine-tuning.
Note that pruners use masks to simulate the real pruning. In order to obtain a real compressed model, model speed up is required.
Note that pruners use masks to simulate the real pruning. In order to obtain a real compressed model, model speedup is required.
'''
import argparse
......@@ -15,11 +15,12 @@ from torchvision import datasets, transforms
from torch.optim.lr_scheduler import MultiStepLR
import nni
from nni.compression.pytorch.utils.counter import count_flops_params
from nni.algorithms.compression.v2.pytorch.pruning.basic_pruner import ADMMPruner
from nni.compression.pytorch.speedup import ModelSpeedup
from nni.compression.pytorch.utils import count_flops_params
from nni.compression.pytorch.pruning import ADMMPruner
from pathlib import Path
sys.path.append(str(Path(__file__).absolute().parents[2] / 'models'))
sys.path.append(str(Path(__file__).absolute().parents[1] / 'models'))
from cifar10.vgg import VGG
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
......@@ -108,18 +109,17 @@ if __name__ == '__main__':
config_list = [{
'sparsity': 0.8,
'op_types': ['Conv2d'],
}, {
'sparsity': 0.92,
'op_types': ['Conv2d'],
}]
# make sure you have used nni.trace to wrap the optimizer class before initialize
traced_optimizer = nni.trace(torch.optim.SGD)(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
pruner = ADMMPruner(model, config_list, trainer, traced_optimizer, criterion, iterations=2, training_epochs=2)
pruner = ADMMPruner(model, config_list, trainer, traced_optimizer, criterion, iterations=10, training_epochs=1, granularity='coarse-grained')
_, masks = pruner.compress()
pruner.show_pruned_weights()
# Fine-grained method does not need to speedup
pruner._unwrap_model()
ModelSpeedup(model, torch.randn([128, 3, 32, 32]).to(device), masks).speedup_model()
print('\n' + '=' * 50 + ' EVALUATE THE MODEL AFTER PRUNING ' + '=' * 50)
evaluator(model)
......
......@@ -5,11 +5,11 @@ import torch
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import MultiStepLR
from nni.algorithms.compression.v2.pytorch.pruning import AMCPruner
from nni.compression.pytorch.utils.counter import count_flops_params
from nni.compression.pytorch.pruning import AMCPruner
from nni.compression.pytorch.utils import count_flops_params
from pathlib import Path
sys.path.append(str(Path(__file__).absolute().parents[2] / 'models'))
sys.path.append(str(Path(__file__).absolute().parents[1] / 'models'))
from cifar10.vgg import VGG
......
......@@ -5,10 +5,10 @@ import torch
from torchvision import datasets, transforms
import nni
from nni.algorithms.compression.v2.pytorch.pruning import AutoCompressPruner
from nni.compression.pytorch.pruning import AutoCompressPruner
from pathlib import Path
sys.path.append(str(Path(__file__).absolute().parents[2] / 'models'))
sys.path.append(str(Path(__file__).absolute().parents[1] / 'models'))
from cifar10.vgg import VGG
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
......
......@@ -4,7 +4,7 @@
'''
NNI example for supported fpgm pruning algorithms.
In this example, we show the end-to-end pruning process: pre-training -> pruning -> fine-tuning.
Note that pruners use masks to simulate the real pruning. In order to obtain a real compressed model, model speed up is required.
Note that pruners use masks to simulate the real pruning. In order to obtain a real compressed model, model speedup is required.
'''
import argparse
......@@ -15,11 +15,11 @@ from torchvision import datasets, transforms
from torch.optim.lr_scheduler import MultiStepLR
from nni.compression.pytorch import ModelSpeedup
from nni.compression.pytorch.utils.counter import count_flops_params
from nni.algorithms.compression.v2.pytorch.pruning.basic_pruner import FPGMPruner
from nni.compression.pytorch.utils import count_flops_params
from nni.compression.pytorch.pruning import FPGMPruner
from pathlib import Path
sys.path.append(str(Path(__file__).absolute().parents[2] / 'models'))
sys.path.append(str(Path(__file__).absolute().parents[1] / 'models'))
from cifar10.vgg import VGG
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
......
......@@ -13,14 +13,14 @@ from tqdm import tqdm
import torch
from torchvision import datasets, transforms
from nni.algorithms.compression.v2.pytorch.pruning import (
from nni.compression.pytorch.pruning import (
LinearPruner,
AGPPruner,
LotteryTicketPruner
)
from pathlib import Path
sys.path.append(str(Path(__file__).absolute().parents[2] / 'models'))
sys.path.append(str(Path(__file__).absolute().parents[1] / 'models'))
from cifar10.vgg import VGG
......@@ -94,8 +94,8 @@ if __name__ == '__main__':
choices=['level', 'l1', 'l2', 'fpgm', 'slim', 'apoz',
'mean_activation', 'taylorfo', 'admm'],
help='algorithm to evaluate weights to prune')
parser.add_argument('--speed-up', type=bool, default=False,
help='Whether to speed-up the pruned model')
parser.add_argument('--speedup', type=bool, default=False,
help='Whether to speedup the pruned model')
parser.add_argument('--reset-weight', type=bool, default=True,
help='Whether to reset weight during each iteration')
......@@ -120,8 +120,8 @@ if __name__ == '__main__':
'evaluator': None,
'finetuner': finetuner}
if args.speed_up:
kw_args['speed_up'] = args.speed_up
if args.speedup:
kw_args['speedup'] = args.speedup
kw_args['dummy_input'] = torch.rand(10, 3, 32, 32).to(device)
if args.pruner == 'linear':
......
......@@ -23,7 +23,7 @@ from data import get_dataset
from utils import AverageMeter, accuracy, progress_bar
from pathlib import Path
sys.path.append(str(Path(__file__).absolute().parents[2] / 'models'))
sys.path.append(str(Path(__file__).absolute().parents[3] / 'models'))
from mobilenet import MobileNet
from mobilenet_v2 import MobileNetV2
......
......@@ -17,10 +17,10 @@ from torchvision import datasets, transforms
from nni.algorithms.compression.pytorch.pruning import L1FilterPruner, L2FilterPruner, FPGMPruner
from nni.algorithms.compression.pytorch.pruning import SimulatedAnnealingPruner, ADMMPruner, NetAdaptPruner, AutoCompressPruner
from nni.compression.pytorch import ModelSpeedup
from nni.compression.pytorch.utils.counter import count_flops_params
from nni.compression.pytorch.utils import count_flops_params
from pathlib import Path
sys.path.append(str(Path(__file__).absolute().parents[1] / 'models'))
sys.path.append(str(Path(__file__).absolute().parents[2] / 'models'))
from mnist.lenet import LeNet
from cifar10.vgg import VGG
from cifar10.resnet import ResNet18, ResNet50
......@@ -292,8 +292,8 @@ def main(args):
os.path.join(args.experiment_data_dir, 'model_masked.pth'), os.path.join(args.experiment_data_dir, 'mask.pth'))
print('Masked model saved to %s' % args.experiment_data_dir)
# model speed up
if args.speed_up:
# model speedup
if args.speedup:
if args.pruner != 'AutoCompressPruner':
if args.model == 'LeNet':
model = LeNet().to(device)
......@@ -310,11 +310,11 @@ def main(args):
m_speedup = ModelSpeedup(model, dummy_input, masks_file, device)
m_speedup.speedup_model()
evaluation_result = evaluator(model)
print('Evaluation result (speed up model): %s' % evaluation_result)
print('Evaluation result (speedup model): %s' % evaluation_result)
result['performance']['speedup'] = evaluation_result
torch.save(model.state_dict(), os.path.join(args.experiment_data_dir, 'model_speed_up.pth'))
print('Speed up model saved to %s' % args.experiment_data_dir)
torch.save(model.state_dict(), os.path.join(args.experiment_data_dir, 'model_speedup.pth'))
print('Speedup model saved to %s' % args.experiment_data_dir)
flops, params, _ = count_flops_params(model, get_input_size(args.dataset))
result['flops']['speedup'] = flops
result['params']['speedup'] = params
......@@ -402,9 +402,9 @@ if __name__ == '__main__':
parser.add_argument('--sparsity-per-iteration', type=float, default=0.05,
help='sparsity_per_iteration of NetAdaptPruner')
# speed-up
parser.add_argument('--speed-up', type=str2bool, default=False,
help='Whether to speed-up the pruned model')
# speedup
parser.add_argument('--speedup', type=str2bool, default=False,
help='Whether to speedup the pruned model')
# others
parser.add_argument('--log-interval', type=int, default=200,
......
......@@ -4,7 +4,7 @@
'''
NNI example for supported basic pruning algorithms.
In this example, we show the end-to-end pruning process: pre-training -> pruning -> fine-tuning.
Note that pruners use masks to simulate the real pruning. In order to obtain a real compressed model, model speed up is required.
Note that pruners use masks to simulate the real pruning. In order to obtain a real compressed model, model speedup is required.
You can also try auto_pruners_torch.py to see the usage of some automatic pruning algorithms.
'''
......@@ -18,12 +18,12 @@ from torch.optim.lr_scheduler import StepLR, MultiStepLR
from torchvision import datasets, transforms
from pathlib import Path
sys.path.append(str(Path(__file__).absolute().parents[1] / 'models'))
sys.path.append(str(Path(__file__).absolute().parents[2] / 'models'))
from mnist.lenet import LeNet
from cifar10.vgg import VGG
from cifar10.resnet import ResNet18
from nni.compression.pytorch.utils.counter import count_flops_params
from nni.compression.pytorch.utils import count_flops_params
import nni
from nni.compression.pytorch import ModelSpeedup
......@@ -292,7 +292,7 @@ def main(args):
if args.test_only:
test(args, model, device, criterion, test_loader)
if args.speed_up:
if args.speedup:
# Unwrap all modules to normal state
pruner._unwrap_model()
m_speedup = ModelSpeedup(model, dummy_input, mask_path, device)
......@@ -356,7 +356,7 @@ if __name__ == '__main__':
parser.add_argument('--sparsity', type=float, default=0.5,
help='target overall target sparsity')
parser.add_argument('--dependency-aware', action='store_true', default=False,
help='toggle dependency aware mode')
help='toggle dependency-aware mode')
parser.add_argument('--global-sort', action='store_true', default=False,
help='toggle global sort mode')
parser.add_argument('--pruner', type=str, default='l1filter',
......@@ -364,9 +364,9 @@ if __name__ == '__main__':
'fpgm', 'mean_activation', 'apoz', 'taylorfo'],
help='pruner to use')
# speed-up
parser.add_argument('--speed-up', action='store_true', default=False,
help='Whether to speed-up the pruned model')
# speedup
parser.add_argument('--speedup', action='store_true', default=False,
help='Whether to speedup the pruned model')
# fine-tuning
parser.add_argument('--fine-tune-epochs', type=int, default=160,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment