"examples/model_compress/pruning/fpgm_pruning_torch.py" did not exist on "e8b88a79f201f2bd8bbf7abad96de9d7307d8d7f"
Unverified Commit 6b8efe3e authored by J-shang's avatar J-shang Committed by GitHub
Browse files

align nni.trace (#4464)

parent 90f96ef5
......@@ -155,11 +155,11 @@ Usage
.. code-block:: python
import nni
from nni.algorithms.compression.v2.pytorch.pruning import SlimPruner
from nni.algorithms.compression.v2.pytorch.utils import trace_parameters
# make sure you have used nni.algorithms.compression.v2.pytorch.utils.trace_parameters to wrap the optimizer class before initialize
traced_optimizer = trace_parameters(torch.optim.Adam)(model.parameters())
# make sure you have used nni.trace to wrap the optimizer class before initialize
traced_optimizer = nni.trace(torch.optim.Adam)(model.parameters())
config_list = [{ 'sparsity': 0.8, 'op_types': ['BatchNorm2d'] }]
pruner = SlimPruner(model, config_list, trainer, traced_optimizer, criterion, training_epochs=1)
......@@ -192,11 +192,11 @@ Usage
.. code-block:: python
import nni
from nni.algorithms.compression.v2.pytorch.pruning import ActivationAPoZRankPruner
from nni.algorithms.compression.v2.pytorch.utils import trace_parameters
# make sure you have used nni.algorithms.compression.v2.pytorch.utils.trace_parameters to wrap the optimizer class before initialize
traced_optimizer = trace_parameters(torch.optim.Adam)(model.parameters())
# make sure you have used nni.trace to wrap the optimizer class before initialize
traced_optimizer = nni.trace(torch.optim.Adam)(model.parameters())
config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }]
pruner = ActivationAPoZRankPruner(model, config_list, trainer, traced_optimizer, criterion, training_batches=20)
......@@ -225,11 +225,11 @@ Usage
.. code-block:: python
import nni
from nni.algorithms.compression.v2.pytorch.pruning import ActivationMeanRankPruner
from nni.algorithms.compression.v2.pytorch.utils import trace_parameters
# make sure you have used nni.algorithms.compression.v2.pytorch.utils.trace_parameters to wrap the optimizer class before initialize
traced_optimizer = trace_parameters(torch.optim.Adam)(model.parameters())
# make sure you have used nni.trace to wrap the optimizer class before initialize
traced_optimizer = nni.traces(torch.optim.Adam)(model.parameters())
config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }]
pruner = ActivationMeanRankPruner(model, config_list, trainer, traced_optimizer, criterion, training_batches=20)
......@@ -262,11 +262,11 @@ Usage
.. code-block:: python
import nni
from nni.algorithms.compression.v2.pytorch.pruning import TaylorFOWeightPruner
from nni.algorithms.compression.v2.pytorch.utils import trace_parameters
# make sure you have used nni.algorithms.compression.v2.pytorch.utils.trace_parameters to wrap the optimizer class before initialize
traced_optimizer = trace_parameters(torch.optim.Adam)(model.parameters())
# make sure you have used nni.trace to wrap the optimizer class before initialize
traced_optimizer = nni.trace(torch.optim.Adam)(model.parameters())
config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }]
pruner = TaylorFOWeightPruner(model, config_list, trainer, traced_optimizer, criterion, training_batches=20)
......@@ -300,11 +300,11 @@ Usage
.. code-block:: python
import nni
from nni.algorithms.compression.v2.pytorch.pruning import ADMMPruner
from nni.algorithms.compression.v2.pytorch.utils import trace_parameters
# make sure you have used nni.algorithms.compression.v2.pytorch.utils.trace_parameters to wrap the optimizer class before initialize
traced_optimizer = trace_parameters(torch.optim.Adam)(model.parameters())
# make sure you have used nni.trace to wrap the optimizer class before initialize
traced_optimizer = nni.trace(torch.optim.Adam)(model.parameters())
config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }]
pruner = ADMMPruner(model, config_list, trainer, traced_optimizer, criterion, iterations=10, training_epochs=1)
......@@ -341,11 +341,11 @@ Usage
.. code-block:: python
import nni
from nni.algorithms.compression.v2.pytorch.pruning import MovementPruner
from nni.algorithms.compression.v2.pytorch.utils import trace_parameters
# make sure you have used nni.algorithms.compression.v2.pytorch.utils.trace_parameters to wrap the optimizer class before initialize
traced_optimizer = trace_parameters(torch.optim.Adam)(model.parameters())
# make sure you have used nni.trace to wrap the optimizer class before initialize
traced_optimizer = nni.trace(torch.optim.Adam)(model.parameters())
config_list = [{'op_types': ['Linear'], 'op_partial_names': ['bert.encoder'], 'sparsity': 0.9}]
pruner = MovementPruner(model, config_list, trainer, traced_optimizer, criterion, 10, 3000, 27000)
......@@ -526,11 +526,11 @@ Usage
.. code-block:: python
import nni
from nni.algorithms.compression.v2.pytorch.pruning import AutoCompressPruner
from nni.algorithms.compression.v2.pytorch.utils import trace_parameters
# make sure you have used nni.algorithms.compression.v2.pytorch.utils.trace_parameters to wrap the optimizer class before initialize
traced_optimizer = trace_parameters(torch.optim.Adam)(model.parameters())
# make sure you have used nni.trace to wrap the optimizer class before initialize
traced_optimizer = nni.trace(torch.optim.Adam)(model.parameters())
config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }]
admm_params = {
......
......@@ -14,10 +14,10 @@ import torch
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import MultiStepLR
import nni
from nni.compression.pytorch import ModelSpeedup
from nni.compression.pytorch.utils.counter import count_flops_params
from nni.algorithms.compression.v2.pytorch.pruning.basic_pruner import ActivationAPoZRankPruner, ActivationMeanRankPruner
from nni.algorithms.compression.v2.pytorch.utils import trace_parameters
from pathlib import Path
sys.path.append(str(Path(__file__).absolute().parents[2] / 'models'))
......@@ -114,8 +114,8 @@ if __name__ == '__main__':
'op_types': ['Conv2d'],
}]
# make sure you have used nni.algorithms.compression.v2.pytorch.utils.trace_parameters to wrap the optimizer class before initialize
traced_optimizer = trace_parameters(torch.optim.SGD)(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
# make sure you have used nni.trace to wrap the optimizer class before initialize
traced_optimizer = nni.trace(torch.optim.SGD)(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
if 'apoz' in args.pruner:
pruner = ActivationAPoZRankPruner(model, config_list, trainer, traced_optimizer, criterion, training_batches=20)
else:
......
......@@ -14,9 +14,9 @@ import torch
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import MultiStepLR
import nni
from nni.compression.pytorch.utils.counter import count_flops_params
from nni.algorithms.compression.v2.pytorch.pruning.basic_pruner import ADMMPruner
from nni.algorithms.compression.v2.pytorch.utils import trace_parameters
from pathlib import Path
sys.path.append(str(Path(__file__).absolute().parents[2] / 'models'))
......@@ -113,8 +113,8 @@ if __name__ == '__main__':
'op_types': ['Conv2d'],
}]
# make sure you have used nni.algorithms.compression.v2.pytorch.utils.trace_parameters to wrap the optimizer class before initialize
traced_optimizer = trace_parameters(torch.optim.SGD)(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
# make sure you have used nni.trace to wrap the optimizer class before initialize
traced_optimizer = nni.trace(torch.optim.SGD)(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
pruner = ADMMPruner(model, config_list, trainer, traced_optimizer, criterion, iterations=2, training_epochs=2)
_, masks = pruner.compress()
pruner.show_pruned_weights()
......
......@@ -4,8 +4,8 @@ from tqdm import tqdm
import torch
from torchvision import datasets, transforms
import nni
from nni.algorithms.compression.v2.pytorch.pruning import AutoCompressPruner
from nni.algorithms.compression.v2.pytorch.utils import trace_parameters
from pathlib import Path
sys.path.append(str(Path(__file__).absolute().parents[2] / 'models'))
......@@ -77,8 +77,8 @@ if __name__ == '__main__':
config_list = [{'op_types': ['Conv2d'], 'total_sparsity': 0.8}]
dummy_input = torch.rand(10, 3, 32, 32).to(device)
# make sure you have used nni.algorithms.compression.v2.pytorch.utils.trace_parameters to wrap the optimizer class before initialize
traced_optimizer = trace_parameters(torch.optim.SGD)(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
# make sure you have used nni.trace to wrap the optimizer class before initialize
traced_optimizer = nni.trace(torch.optim.SGD)(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
admm_params = {
'trainer': trainer,
'traced_optimizer': traced_optimizer,
......
......@@ -13,8 +13,8 @@ from transformers import (
set_seed
)
import nni
from nni.algorithms.compression.v2.pytorch.pruning import MovementPruner
from nni.algorithms.compression.v2.pytorch.utils import trace_parameters
task_to_keys = {
......@@ -110,8 +110,8 @@ if __name__ == '__main__':
config_list = [{'op_types': ['Linear'], 'op_partial_names': ['bert.encoder'], 'sparsity': 0.9}]
p_trainer = functools.partial(trainer, train_dataloader=train_dataloader)
# make sure you have used nni.algorithms.compression.v2.pytorch.utils.trace_parameters to wrap the optimizer class before initialize
traced_optimizer = trace_parameters(Adam)(model.parameters(), lr=2e-5)
# make sure you have used nni.trace to wrap the optimizer class before initialize
traced_optimizer = nni.trace(Adam)(model.parameters(), lr=2e-5)
pruner = MovementPruner(model, config_list, p_trainer, traced_optimizer, criterion, training_epochs=10,
warm_up_step=3000, cool_down_beginning_step=27000)
......
......@@ -14,10 +14,10 @@ import torch
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import MultiStepLR
import nni
from nni.compression.pytorch import ModelSpeedup
from nni.compression.pytorch.utils.counter import count_flops_params
from nni.algorithms.compression.v2.pytorch.pruning.basic_pruner import SlimPruner
from nni.algorithms.compression.v2.pytorch.utils import trace_parameters
from pathlib import Path
sys.path.append(str(Path(__file__).absolute().parents[2] / 'models'))
......@@ -112,8 +112,8 @@ if __name__ == '__main__':
'max_sparsity_per_layer': 0.9
}]
# make sure you have used nni.algorithms.compression.v2.pytorch.utils.trace_parameters to wrap the optimizer class before initialize
traced_optimizer = trace_parameters(torch.optim.SGD)(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
# make sure you have used nni.trace to wrap the optimizer class before initialize
traced_optimizer = nni.trace(torch.optim.SGD)(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
pruner = SlimPruner(model, config_list, trainer, traced_optimizer, criterion, training_epochs=1, scale=0.0001, mode='global')
_, masks = pruner.compress()
pruner.show_pruned_weights()
......
......@@ -14,10 +14,10 @@ import torch
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import MultiStepLR
import nni
from nni.compression.pytorch import ModelSpeedup
from nni.compression.pytorch.utils.counter import count_flops_params
from nni.algorithms.compression.v2.pytorch.pruning.basic_pruner import TaylorFOWeightPruner
from nni.algorithms.compression.v2.pytorch.utils import trace_parameters
from pathlib import Path
sys.path.append(str(Path(__file__).absolute().parents[2] / 'models'))
......@@ -111,8 +111,8 @@ if __name__ == '__main__':
'op_types': ['Conv2d'],
}]
# make sure you have used nni.algorithms.compression.v2.pytorch.utils.trace_parameters to wrap the optimizer class before initialize
traced_optimizer = trace_parameters(torch.optim.SGD)(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
# make sure you have used nni.trace to wrap the optimizer class before initialize
traced_optimizer = nni.trace(torch.optim.SGD)(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
pruner = TaylorFOWeightPruner(model, config_list, trainer, traced_optimizer, criterion, training_batches=20)
_, masks = pruner.compress()
pruner.show_pruned_weights()
......
......@@ -59,8 +59,8 @@ class AutoCompressPruner(IterativePruner):
A callable function used to train model or just inference. Take model, optimizer, criterion as input.
The model will be trained or inferenced `training_epochs` epochs.
- traced_optimizer : nni.common.serializer.Traceable(torch.optim.Optimizer)
The traced optimizer instance which the optimizer class is wrapped by nni.algorithms.compression.v2.pytorch.utils.trace_parameters.
E.g. traced_optimizer = nni.algorithms.compression.v2.pytorch.utils.trace_parameters(torch.nn.Adam)(model.parameters()).
The traced optimizer instance which the optimizer class is wrapped by nni.trace.
E.g. traced_optimizer = nni.trace(torch.nn.Adam)(model.parameters()).
- criterion : Callable[[Tensor, Tensor], Tensor].
The criterion function used in trainer. Take model output and target value as input, and return the loss.
- iterations : int.
......
......@@ -379,8 +379,8 @@ class SlimPruner(BasicPruner):
optimizer.step()
model.train(mode=training)
traced_optimizer : nni.common.serializer.Traceable(torch.optim.Optimizer)
The traced optimizer instance which the optimizer class is wrapped by nni.algorithms.compression.v2.pytorch.utils.trace_parameters.
E.g. traced_optimizer = nni.algorithms.compression.v2.pytorch.utils.trace_parameters(torch.nn.Adam)(model.parameters()).
The traced optimizer instance which the optimizer class is wrapped by nni.trace.
E.g. traced_optimizer = nni.trace(torch.nn.Adam)(model.parameters()).
criterion : Callable[[Tensor, Tensor], Tensor]
The criterion function used in trainer. Take model output and target value as input, and return the loss.
training_epochs : int
......@@ -484,8 +484,8 @@ class ActivationPruner(BasicPruner):
optimizer.step()
model.train(mode=training)
traced_optimizer : nni.common.serializer.Traceable(torch.optim.Optimizer)
The traced optimizer instance which the optimizer class is wrapped by nni.algorithms.compression.v2.pytorch.utils.trace_parameters.
E.g. traced_optimizer = nni.algorithms.compression.v2.pytorch.utils.trace_parameters(torch.nn.Adam)(model.parameters()).
The traced optimizer instance which the optimizer class is wrapped by nni.trace.
E.g. traced_optimizer = nni.trace(torch.nn.Adam)(model.parameters()).
criterion : Callable[[Tensor, Tensor], Tensor]
The criterion function used in trainer. Take model output and target value as input, and return the loss.
training_batches
......@@ -628,8 +628,8 @@ class TaylorFOWeightPruner(BasicPruner):
optimizer.step()
model.train(mode=training)
traced_optimizer : nni.common.serializer.Traceable(torch.optim.Optimizer)
The traced optimizer instance which the optimizer class is wrapped by nni.algorithms.compression.v2.pytorch.utils.trace_parameters.
E.g. traced_optimizer = nni.algorithms.compression.v2.pytorch.utils.trace_parameters(torch.nn.Adam)(model.parameters()).
The traced optimizer instance which the optimizer class is wrapped by nni.trace.
E.g. traced_optimizer = nni.trace(torch.nn.Adam)(model.parameters()).
criterion : Callable[[Tensor, Tensor], Tensor]
The criterion function used in trainer. Take model output and target value as input, and return the loss.
training_batches : int
......@@ -761,8 +761,8 @@ class ADMMPruner(BasicPruner):
optimizer.step()
model.train(mode=training)
traced_optimizer : nni.common.serializer.Traceable(torch.optim.Optimizer)
The traced optimizer instance which the optimizer class is wrapped by nni.algorithms.compression.v2.pytorch.utils.trace_parameters.
E.g. traced_optimizer = nni.algorithms.compression.v2.pytorch.utils.trace_parameters(torch.nn.Adam)(model.parameters()).
The traced optimizer instance which the optimizer class is wrapped by nni.trace.
E.g. traced_optimizer = nni.trace(torch.nn.Adam)(model.parameters()).
criterion : Callable[[Tensor, Tensor], Tensor]
The criterion function used in trainer. Take model output and target value as input, and return the loss.
iterations : int
......
......@@ -157,8 +157,8 @@ class MovementPruner(BasicPruner):
optimizer.step()
model.train(mode=training)
traced_optimizer : nni.common.serializer.Traceable(torch.optim.Optimizer)
The traced optimizer instance which the optimizer class is wrapped by nni.algorithms.compression.v2.pytorch.utils.trace_parameters.
E.g. traced_optimizer = nni.algorithms.compression.v2.pytorch.utils.trace_parameters(torch.nn.Adam)(model.parameters()).
The traced optimizer instance which the optimizer class is wrapped by nni.trace.
E.g. traced_optimizer = nni.trace(torch.nn.Adam)(model.parameters()).
criterion : Callable[[Tensor, Tensor], Tensor]
The criterion function used in trainer. Take model output and target value as input, and return the loss.
training_epochs : int
......
......@@ -12,14 +12,9 @@ from torch.optim.lr_scheduler import _LRScheduler
from nni.common.serializer import _trace_cls
from nni.common.serializer import Traceable
__all__ = ['OptimizerConstructHelper', 'LRSchedulerConstructHelper', 'trace_parameters']
__all__ = ['OptimizerConstructHelper', 'LRSchedulerConstructHelper']
def trace_parameters(base, kw_only=True):
if not isinstance(base, type):
raise Exception('Only class can be traced by this function.')
return _trace_cls(base, kw_only, call_super=False)
class ConstructHelper:
def __init__(self, callable_obj: Callable, *args, **kwargs):
assert callable(callable_obj), '`callable_obj` must be a callable object.'
......@@ -86,7 +81,7 @@ class OptimizerConstructHelper(ConstructHelper):
@staticmethod
def from_trace(model: Module, optimizer_trace: Traceable):
assert isinstance(optimizer_trace, Traceable), \
'Please use nni.algorithms.compression.v2.pytorch.utils.trace_parameters to wrap the optimizer class before initialize the optimizer.'
'Please use nni.trace to wrap the optimizer class before initialize the optimizer.'
assert isinstance(optimizer_trace, Optimizer), \
'It is not an instance of torch.nn.Optimizer.'
return OptimizerConstructHelper(model,
......@@ -118,7 +113,7 @@ class LRSchedulerConstructHelper(ConstructHelper):
@staticmethod
def from_trace(lr_scheduler_trace: Traceable):
assert isinstance(lr_scheduler_trace, Traceable), \
'Please use nni.algorithms.compression.v2.pytorch.utils.trace_parameters to wrap the lr scheduler class before initialize the scheduler.'
'Please use nni.trace to wrap the lr scheduler class before initialize the scheduler.'
assert isinstance(lr_scheduler_trace, _LRScheduler), \
'It is not an instance of torch.nn.lr_scheduler._LRScheduler.'
return LRSchedulerConstructHelper(lr_scheduler_trace.trace_symbol,
......
......@@ -7,6 +7,7 @@ import unittest
import torch
import torch.nn.functional as F
import nni
from nni.algorithms.compression.v2.pytorch.pruning import (
LinearPruner,
AGPPruner,
......@@ -15,8 +16,7 @@ from nni.algorithms.compression.v2.pytorch.pruning import (
AutoCompressPruner,
AMCPruner
)
from nni.algorithms.compression.v2.pytorch.utils import compute_sparsity_mask2compact, trace_parameters
from nni.algorithms.compression.v2.pytorch.utils import compute_sparsity_mask2compact
class TorchModel(torch.nn.Module):
......@@ -53,7 +53,7 @@ def trainer(model, optimizer, criterion):
def get_optimizer(model):
return trace_parameters(torch.optim.SGD)(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
return nni.trace(torch.optim.SGD)(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
criterion = torch.nn.CrossEntropyLoss()
......
......@@ -6,6 +6,7 @@ import unittest
import torch
import torch.nn.functional as F
import nni
from nni.algorithms.compression.v2.pytorch.pruning import (
LevelPruner,
L1NormPruner,
......@@ -18,7 +19,7 @@ from nni.algorithms.compression.v2.pytorch.pruning import (
ADMMPruner,
MovementPruner
)
from nni.algorithms.compression.v2.pytorch.utils import compute_sparsity_mask2compact, trace_parameters
from nni.algorithms.compression.v2.pytorch.utils import compute_sparsity_mask2compact
class TorchModel(torch.nn.Module):
......@@ -55,7 +56,7 @@ def trainer(model, optimizer, criterion):
def get_optimizer(model):
return trace_parameters(torch.optim.SGD)(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
return nni.trace(torch.optim.SGD)(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
criterion = torch.nn.CrossEntropyLoss()
......
......@@ -6,6 +6,7 @@ import unittest
import torch
import torch.nn.functional as F
import nni
from nni.algorithms.compression.v2.pytorch.base import Pruner
from nni.algorithms.compression.v2.pytorch.pruning.tools import (
WeightDataCollector,
......@@ -24,7 +25,7 @@ from nni.algorithms.compression.v2.pytorch.pruning.tools import (
GlobalSparsityAllocator
)
from nni.algorithms.compression.v2.pytorch.pruning.tools.base import HookCollectorInfo
from nni.algorithms.compression.v2.pytorch.utils import get_module_by_name, trace_parameters
from nni.algorithms.compression.v2.pytorch.utils import get_module_by_name
from nni.algorithms.compression.v2.pytorch.utils.constructor_helper import OptimizerConstructHelper
......@@ -62,7 +63,7 @@ def trainer(model, optimizer, criterion):
def get_optimizer(model):
return trace_parameters(torch.optim.SGD)(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
return nni.trace(torch.optim.SGD)(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
criterion = torch.nn.CrossEntropyLoss()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment