demo.py 1.48 KB
Newer Older
qianyj's avatar
qianyj committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.

from pathlib import Path

import torch
from torch.optim import Adam

import nni
from nni.compression.experiment.experiment import CompressionExperiment
from nni.compression.experiment.config import CompressionExperimentConfig, TaylorFOWeightPrunerConfig
from vessel import LeNet, finetuner, evaluator, trainer, criterion, device


model = LeNet().to(device)

# pre-training model
finetuner(model)

optimizer = nni.trace(Adam)(model.parameters())

dummy_input = torch.rand(16, 1, 28, 28).to(device)

# normal experiment setting, no need to set search_space and trial_command
config = CompressionExperimentConfig('local')
config.experiment_name = 'auto compression torch example'
config.trial_concurrency = 1
config.max_trial_number = 10
config.trial_code_directory = Path(__file__).parent
config.tuner.name = 'TPE'
config.tuner.class_args['optimize_mode'] = 'maximize'

# compression experiment specific setting
# single float value means the expected remaining ratio upper limit for flops & params, lower limit for metric
config.compression_setting.flops = 0.2
config.compression_setting.params = 0.5
config.compression_setting.module_types = ['Conv2d', 'Linear']
config.compression_setting.exclude_module_names = ['fc2']
config.compression_setting.pruners = [TaylorFOWeightPrunerConfig()]

experiment = CompressionExperiment(config, model, finetuner, evaluator, dummy_input, trainer, optimizer, criterion, device)

experiment.run(8080)