Unverified Commit 8ac072ae authored by J-shang's avatar J-shang Committed by GitHub
Browse files

[Bugbash] promote pruning v2 (#4733)

parent 1a3c019a
......@@ -16,11 +16,11 @@ from torch.optim.lr_scheduler import MultiStepLR
import nni
from nni.compression.pytorch import ModelSpeedup
from nni.compression.pytorch.utils.counter import count_flops_params
from nni.algorithms.compression.v2.pytorch.pruning.basic_pruner import TaylorFOWeightPruner
from nni.compression.pytorch.utils import count_flops_params
from nni.compression.pytorch.pruning import TaylorFOWeightPruner
from pathlib import Path
sys.path.append(str(Path(__file__).absolute().parents[2] / 'models'))
sys.path.append(str(Path(__file__).absolute().parents[1] / 'models'))
from cifar10.vgg import VGG
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
......
......@@ -67,7 +67,7 @@ config_list = [{
# %%
# Pruners usually require `model` and `config_list` as input arguments.
from nni.algorithms.compression.v2.pytorch.pruning import L1NormPruner
from nni.compression.pytorch.pruning import L1NormPruner
pruner = L1NormPruner(model, config_list)
# show the wrapped model structure, `PrunerModuleWrapper` have wrapped the layers that configured in the config_list.
......
......@@ -10,7 +10,7 @@ import torch
from schema import And, Optional
from nni.compression.pytorch.compressor import Pruner
from nni.compression.pytorch.utils.config_validation import PrunerSchema
from nni.compression.pytorch.utils.sensitivity_analysis import SensitivityAnalysis
from nni.compression.pytorch.utils import SensitivityAnalysis
from .constants_pruner import PRUNER_DICT
......
......@@ -12,7 +12,7 @@ from torch.nn import Module
from nni.algorithms.compression.v2.pytorch.base import Task, TaskResult
from nni.algorithms.compression.v2.pytorch.utils import compute_sparsity, config_list_canonical
from nni.compression.pytorch.utils.counter import count_flops_params
from nni.compression.pytorch.utils import count_flops_params
from .iterative_pruner import IterativePruner, PRUNER_DICT
from .tools import TaskGenerator
......@@ -223,7 +223,7 @@ class AMCPruner(IterativePruner):
Examples
--------
>>> from nni.algorithms.compression.v2.pytorch.pruning import AMCPruner
>>> from nni.compression.pytorch.pruning import AMCPruner
>>> config_list = [{'op_types': ['Conv2d'], 'total_sparsity': 0.5, 'max_sparsity_per_layer': 0.8}]
>>> dummy_input = torch.rand(...).to(device)
>>> evaluator = ...
......@@ -231,7 +231,7 @@ class AMCPruner(IterativePruner):
>>> pruner = AMCPruner(400, model, config_list, dummy_input, evaluator, finetuner=finetuner)
>>> pruner.compress()
The full script can be found :githublink:`here <examples/model_compress/pruning/v2/amc_pruning_torch.py>`.
The full script can be found :githublink:`here <examples/model_compress/pruning/amc_pruning_torch.py>`.
"""
def __init__(self, total_episode: int, model: Module, config_list: List[Dict], dummy_input: Tensor,
......
......@@ -120,7 +120,7 @@ class AutoCompressPruner(IterativePruner):
Examples
--------
>>> import nni
>>> from nni.algorithms.compression.v2.pytorch.pruning import AutoCompressPruner
>>> from nni.compression.pytorch.pruning import AutoCompressPruner
>>> model = ...
>>> config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }]
>>> # make sure you have used nni.trace to wrap the optimizer class before initialize
......@@ -143,7 +143,7 @@ class AutoCompressPruner(IterativePruner):
>>> pruner.compress()
>>> _, model, masks, _, _ = pruner.get_best_result()
The full script can be found :githublink:`here <examples/model_compress/pruning/v2/auto_compress_pruner.py>`.
The full script can be found :githublink:`here <examples/model_compress/pruning/auto_compress_pruner.py>`.
"""
def __init__(self, model: Module, config_list: List[Dict], total_iteration: int, admm_params: Dict,
......
......@@ -188,12 +188,12 @@ class LevelPruner(BasicPruner):
Examples
--------
>>> model = ...
>>> from nni.algorithms.compression.v2.pytorch.pruning import LevelPruner
>>> from nni.compression.pytorch.pruning import LevelPruner
>>> config_list = [{ 'sparsity': 0.8, 'op_types': ['default'] }]
>>> pruner = LevelPruner(model, config_list)
>>> masked_model, masks = pruner.compress()
For detailed example please refer to :githublink:`examples/model_compress/pruning/v2/level_pruning_torch.py <examples/model_compress/pruning/v2/level_pruning_torch.py>`
For detailed example please refer to :githublink:`examples/model_compress/pruning/level_pruning_torch.py <examples/model_compress/pruning/level_pruning_torch.py>`
"""
def __init__(self, model: Module, config_list: List[Dict], mode: str = "normal", balance_gran: Optional[List] = None):
......@@ -360,12 +360,12 @@ class L2NormPruner(NormPruner):
Examples
--------
>>> model = ...
>>> from nni.algorithms.compression.v2.pytorch.pruning import L2NormPruner
>>> from nni.compression.pytorch.pruning import L2NormPruner
>>> config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }]
>>> pruner = L2NormPruner(model, config_list)
>>> masked_model, masks = pruner.compress()
For detailed example please refer to :githublink:`examples/model_compress/pruning/v2/norm_pruning_torch.py <examples/model_compress/pruning/v2/norm_pruning_torch.py>`
For detailed example please refer to :githublink:`examples/model_compress/pruning/norm_pruning_torch.py <examples/model_compress/pruning/norm_pruning_torch.py>`
"""
def __init__(self, model: Module, config_list: List[Dict],
......@@ -410,12 +410,12 @@ class FPGMPruner(BasicPruner):
Examples
--------
>>> model = ...
>>> from nni.algorithms.compression.v2.pytorch.pruning import FPGMPruner
>>> from nni.compression.pytorch.pruning import FPGMPruner
>>> config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }]
>>> pruner = FPGMPruner(model, config_list)
>>> masked_model, masks = pruner.compress()
For detailed example please refer to :githublink:`examples/model_compress/pruning/v2/fpgm_pruning_torch.py <examples/model_compress/pruning/v2/fpgm_pruning_torch.py>`
For detailed example please refer to :githublink:`examples/model_compress/pruning/fpgm_pruning_torch.py <examples/model_compress/pruning/fpgm_pruning_torch.py>`
"""
def __init__(self, model: Module, config_list: List[Dict],
......@@ -506,7 +506,7 @@ class SlimPruner(BasicPruner):
Examples
--------
>>> import nni
>>> from nni.algorithms.compression.v2.pytorch.pruning import SlimPruner
>>> from nni.compression.pytorch.pruning import SlimPruner
>>> model = ...
>>> # make sure you have used nni.trace to wrap the optimizer class before initialize
>>> traced_optimizer = nni.trace(torch.optim.Adam)(model.parameters())
......@@ -516,7 +516,7 @@ class SlimPruner(BasicPruner):
>>> pruner = SlimPruner(model, config_list, trainer, traced_optimizer, criterion, training_epochs=1)
>>> masked_model, masks = pruner.compress()
For detailed example please refer to :githublink:`examples/model_compress/pruning/v2/slim_pruning_torch.py <examples/model_compress/pruning/v2/slim_pruning_torch.py>`
For detailed example please refer to :githublink:`examples/model_compress/pruning/slim_pruning_torch.py <examples/model_compress/pruning/slim_pruning_torch.py>`
"""
def __init__(self, model: Module, config_list: List[Dict], trainer: Callable[[Module, Optimizer, Callable], None],
......@@ -764,7 +764,7 @@ class ActivationAPoZRankPruner(ActivationPruner):
Examples
--------
>>> import nni
>>> from nni.algorithms.compression.v2.pytorch.pruning import ActivationAPoZRankPruner
>>> from nni.compression.pytorch.pruning import ActivationAPoZRankPruner
>>> model = ...
>>> # make sure you have used nni.trace to wrap the optimizer class before initialize
>>> traced_optimizer = nni.trace(torch.optim.Adam)(model.parameters())
......@@ -774,7 +774,7 @@ class ActivationAPoZRankPruner(ActivationPruner):
>>> pruner = ActivationAPoZRankPruner(model, config_list, trainer, traced_optimizer, criterion, training_batches=20)
>>> masked_model, masks = pruner.compress()
For detailed example please refer to :githublink:`examples/model_compress/pruning/v2/activation_pruning_torch.py <examples/model_compress/pruning/v2/activation_pruning_torch.py>`
For detailed example please refer to :githublink:`examples/model_compress/pruning/activation_pruning_torch.py <examples/model_compress/pruning/activation_pruning_torch.py>`
"""
def _activation_trans(self, output: Tensor) -> Tensor:
# return a matrix that the position of zero in `output` is one, others is zero.
......@@ -847,7 +847,7 @@ class ActivationMeanRankPruner(ActivationPruner):
Examples
--------
>>> import nni
>>> from nni.algorithms.compression.v2.pytorch.pruning import ActivationMeanRankPruner
>>> from nni.compression.pytorch.pruning import ActivationMeanRankPruner
>>> model = ...
>>> # make sure you have used nni.trace to wrap the optimizer class before initialize
>>> traced_optimizer = nni.trace(torch.optim.Adam)(model.parameters())
......@@ -857,7 +857,7 @@ class ActivationMeanRankPruner(ActivationPruner):
>>> pruner = ActivationMeanRankPruner(model, config_list, trainer, traced_optimizer, criterion, training_batches=20)
>>> masked_model, masks = pruner.compress()
For detailed example please refer to :githublink:`examples/model_compress/pruning/v2/activation_pruning_torch.py <examples/model_compress/pruning/v2/activation_pruning_torch.py>`
For detailed example please refer to :githublink:`examples/model_compress/pruning/activation_pruning_torch.py <examples/model_compress/pruning/activation_pruning_torch.py>`
"""
def _activation_trans(self, output: Tensor) -> Tensor:
# return the activation of `output` directly.
......@@ -940,7 +940,7 @@ class TaylorFOWeightPruner(BasicPruner):
Examples
--------
>>> import nni
>>> from nni.algorithms.compression.v2.pytorch.pruning import TaylorFOWeightPruner
>>> from nni.compression.pytorch.pruning import TaylorFOWeightPruner
>>> model = ...
>>> # make sure you have used nni.trace to wrap the optimizer class before initialize
>>> traced_optimizer = nni.trace(torch.optim.Adam)(model.parameters())
......@@ -950,7 +950,7 @@ class TaylorFOWeightPruner(BasicPruner):
>>> pruner = TaylorFOWeightPruner(model, config_list, trainer, traced_optimizer, criterion, training_batches=20)
>>> masked_model, masks = pruner.compress()
For detailed example please refer to :githublink:`examples/model_compress/pruning/v2/taylorfo_pruning_torch.py <examples/model_compress/pruning/v2/taylorfo_pruning_torch.py>`
For detailed example please refer to :githublink:`examples/model_compress/pruning/taylorfo_pruning_torch.py <examples/model_compress/pruning/taylorfo_pruning_torch.py>`
"""
def __init__(self, model: Module, config_list: List[Dict], trainer: Callable[[Module, Optimizer, Callable], None],
......@@ -1082,7 +1082,7 @@ class ADMMPruner(BasicPruner):
Examples
--------
>>> import nni
>>> from nni.algorithms.compression.v2.pytorch.pruning import ADMMPruner
>>> from nni.compression.pytorch.pruning import ADMMPruner
>>> model = ...
>>> # make sure you have used nni.trace to wrap the optimizer class before initialize
>>> traced_optimizer = nni.trace(torch.optim.Adam)(model.parameters())
......@@ -1092,7 +1092,7 @@ class ADMMPruner(BasicPruner):
>>> pruner = ADMMPruner(model, config_list, trainer, traced_optimizer, criterion, iterations=10, training_epochs=1)
>>> masked_model, masks = pruner.compress()
For detailed example please refer to :githublink:`examples/model_compress/pruning/v2/admm_pruning_torch.py <examples/model_compress/pruning/v2/admm_pruning_torch.py>`
For detailed example please refer to :githublink:`examples/model_compress/pruning/admm_pruning_torch.py <examples/model_compress/pruning/admm_pruning_torch.py>`
"""
def __init__(self, model: Module, config_list: List[Dict], trainer: Callable[[Module, Optimizer, Callable], None],
......
......@@ -105,14 +105,14 @@ class LinearPruner(IterativePruner):
Examples
--------
>>> from nni.algorithms.compression.v2.pytorch.pruning import LinearPruner
>>> from nni.compression.pytorch.pruning import LinearPruner
>>> config_list = [{'sparsity': 0.8, 'op_types': ['Conv2d']}]
>>> finetuner = ...
>>> pruner = LinearPruner(model, config_list, pruning_algorithm='l1', total_iteration=10, finetuner=finetuner)
>>> pruner.compress()
>>> _, model, masks, _, _ = pruner.get_best_result()
For detailed example please refer to :githublink:`examples/model_compress/pruning/v2/iterative_pruning_torch.py <examples/model_compress/pruning/v2/iterative_pruning_torch.py>`
For detailed example please refer to :githublink:`examples/model_compress/pruning/iterative_pruning_torch.py <examples/model_compress/pruning/iterative_pruning_torch.py>`
"""
def __init__(self, model: Module, config_list: List[Dict], pruning_algorithm: str,
......@@ -170,14 +170,14 @@ class AGPPruner(IterativePruner):
Examples
--------
>>> from nni.algorithms.compression.v2.pytorch.pruning import AGPPruner
>>> from nni.compression.pytorch.pruning import AGPPruner
>>> config_list = [{'sparsity': 0.8, 'op_types': ['Conv2d']}]
>>> finetuner = ...
>>> pruner = AGPPruner(model, config_list, pruning_algorithm='l1', total_iteration=10, finetuner=finetuner)
>>> pruner.compress()
>>> _, model, masks, _, _ = pruner.get_best_result()
For detailed example please refer to :githublink:`examples/model_compress/pruning/v2/iterative_pruning_torch.py <examples/model_compress/pruning/v2/iterative_pruning_torch.py>`
For detailed example please refer to :githublink:`examples/model_compress/pruning/iterative_pruning_torch.py <examples/model_compress/pruning/iterative_pruning_torch.py>`
"""
def __init__(self, model: Module, config_list: List[Dict], pruning_algorithm: str,
......@@ -248,14 +248,14 @@ class LotteryTicketPruner(IterativePruner):
Examples
--------
>>> from nni.algorithms.compression.v2.pytorch.pruning import LotteryTicketPruner
>>> from nni.compression.pytorch.pruning import LotteryTicketPruner
>>> config_list = [{'sparsity': 0.8, 'op_types': ['Conv2d']}]
>>> finetuner = ...
>>> pruner = LotteryTicketPruner(model, config_list, pruning_algorithm='l1', total_iteration=10, finetuner=finetuner, reset_weight=True)
>>> pruner.compress()
>>> _, model, masks, _, _ = pruner.get_best_result()
For detailed example please refer to :githublink:`examples/model_compress/pruning/v2/iterative_pruning_torch.py <examples/model_compress/pruning/v2/iterative_pruning_torch.py>`
For detailed example please refer to :githublink:`examples/model_compress/pruning/iterative_pruning_torch.py <examples/model_compress/pruning/iterative_pruning_torch.py>`
"""
......@@ -325,7 +325,7 @@ class SimulatedAnnealingPruner(IterativePruner):
Examples
--------
>>> from nni.algorithms.compression.v2.pytorch.pruning import SimulatedAnnealingPruner
>>> from nni.compression.pytorch.pruning import SimulatedAnnealingPruner
>>> model = ...
>>> config_list = [{'sparsity': 0.8, 'op_types': ['Conv2d']}]
>>> evaluator = ...
......@@ -334,7 +334,7 @@ class SimulatedAnnealingPruner(IterativePruner):
>>> pruner.compress()
>>> _, model, masks, _, _ = pruner.get_best_result()
For detailed example please refer to :githublink:`examples/model_compress/pruning/v2/simulated_anealing_pruning_torch.py <examples/model_compress/pruning/v2/simulated_anealing_pruning_torch.py>`
For detailed example please refer to :githublink:`examples/model_compress/pruning/simulated_anealing_pruning_torch.py <examples/model_compress/pruning/simulated_anealing_pruning_torch.py>`
"""
def __init__(self, model: Module, config_list: List[Dict], evaluator: Callable[[Module], float], start_temperature: float = 100,
......
......@@ -90,8 +90,8 @@ class MovementPruner(BasicPruner):
The following figure from the paper shows the weight pruning by movement pruning.
.. image:: ../../img/movement_pruning.png
:target: ../../img/movement_pruning.png
.. image:: ../../../img/movement_pruning.png
:target: ../../../img/movement_pruning.png
:alt:
For more details, please refer to `Movement Pruning: Adaptive Sparsity by Fine-Tuning <https://arxiv.org/abs/2005.07683>`__.
......@@ -146,7 +146,7 @@ class MovementPruner(BasicPruner):
Examples
--------
>>> import nni
>>> from nni.algorithms.compression.v2.pytorch.pruning import MovementPruner
>>> from nni.compression.pytorch.pruning import MovementPruner
>>> model = ...
>>> # make sure you have used nni.trace to wrap the optimizer class before initialize
>>> traced_optimizer = nni.trace(torch.optim.Adam)(model.parameters())
......@@ -156,7 +156,7 @@ class MovementPruner(BasicPruner):
>>> pruner = MovementPruner(model, config_list, trainer, traced_optimizer, criterion, 10, 3000, 27000)
>>> masked_model, masks = pruner.compress()
For detailed example please refer to :githublink:`examples/model_compress/pruning/v2/movement_pruning_glue.py <examples/model_compress/pruning/v2/movement_pruning_glue.py>`
For detailed example please refer to :githublink:`examples/model_compress/pruning/movement_pruning_glue.py <examples/model_compress/pruning/movement_pruning_glue.py>`
"""
def __init__(self, model: Module, config_list: List[Dict], trainer: Callable[[Module, Optimizer, Callable], None],
traced_optimizer: Traceable, criterion: Callable[[Tensor, Tensor], Tensor], training_epochs: int, warm_up_step: int,
......
......@@ -11,7 +11,7 @@ from torch import Tensor
from torch.nn import Module
from nni.algorithms.compression.v2.pytorch.utils import config_list_canonical
from nni.compression.pytorch.utils.counter import count_flops_params
from nni.compression.pytorch.utils import count_flops_params
_logger = logging.getLogger(__name__)
......
......@@ -11,7 +11,7 @@ import torch
import numpy as np
import torch.nn as nn
from nni.compression.pytorch.utils.counter import count_flops_params
from nni.compression.pytorch.utils import count_flops_params
LUT_FILE = "lut.npy"
LUT_JSON_FILE = "lut.txt"
......
......@@ -3,4 +3,4 @@
from .speedup import ModelSpeedup
from .compressor import Compressor, Pruner, Quantizer
from .pruning import apply_compression_results
from .utils.apply_compression import apply_compression_results
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from .apply_compression import apply_compression_results
from nni.algorithms.compression.v2.pytorch.pruning import *
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from .counter import count_flops_params
from .mask_conflict import ChannelMaskConflict, GroupMaskConflict
from .utils import *
from .sensitivity_analysis import SensitivityAnalysis
from .shape_dependency import *
from .shape_dependency import ReshapeDependency
def not_safe_to_prune(model, dummy_input):
"""
......
......@@ -10,7 +10,7 @@ from nni.algorithms.compression.v2.pytorch.base import PrunerModuleWrapper as Pr
from .utils import get_module_by_name
__all__ = ['ChannelDependency', 'GroupDependency',
__all__ = ['ChannelDependency', 'GroupDependency', 'ReshapeDependency',
'InputChannelDependency', 'AttentionWeightDependency']
......
......@@ -73,7 +73,7 @@ def check_rest_server(rest_port):
return False, response
else:
time.sleep(1)
return False, response
return False, response
def check_rest_server_quick(rest_port):
'''Check if restful server is ready, only check once'''
......
......@@ -4,7 +4,7 @@ CWD=${PWD}
echo ""
echo "===========================Testing: pruning and speedup==========================="
cd ${CWD}/../examples/model_compress/pruning
cd ${CWD}/../examples/model_compress/pruning/legacy
echo "testing fpgm pruning and speedup..."
python3 basic_pruners_torch.py --pruner fpgm --pretrain-epochs 1 --fine-tune-epochs 1 --model vgg16 --dataset cifar10
......
......@@ -12,7 +12,7 @@ import numpy as np
from nni.algorithms.compression.pytorch.pruning import L1FilterPruner
from nni.compression.pytorch.utils.shape_dependency import ChannelDependency
from nni.compression.pytorch.utils.mask_conflict import fix_mask_conflict
from nni.compression.pytorch.utils.counter import count_flops_params
from nni.compression.pytorch.utils import count_flops_params
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
prefix = 'analysis_test'
......
......@@ -9,7 +9,7 @@ import torch
import torch.nn.functional as F
import nni
from nni.algorithms.compression.v2.pytorch.pruning import (
from nni.compression.pytorch.pruning import (
LinearPruner,
AGPPruner,
LotteryTicketPruner,
......
......@@ -9,7 +9,7 @@ import torch
import torch.nn.functional as F
import nni
from nni.algorithms.compression.v2.pytorch.pruning import (
from nni.compression.pytorch.pruning import (
LevelPruner,
L1NormPruner,
L2NormPruner,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment