Unverified Commit 6b8efe3e authored by J-shang's avatar J-shang Committed by GitHub
Browse files

align nni.trace (#4464)

parent 90f96ef5
...@@ -155,11 +155,11 @@ Usage ...@@ -155,11 +155,11 @@ Usage
.. code-block:: python .. code-block:: python
import nni
from nni.algorithms.compression.v2.pytorch.pruning import SlimPruner from nni.algorithms.compression.v2.pytorch.pruning import SlimPruner
from nni.algorithms.compression.v2.pytorch.utils import trace_parameters
# make sure you have used nni.algorithms.compression.v2.pytorch.utils.trace_parameters to wrap the optimizer class before initialize # make sure you have used nni.trace to wrap the optimizer class before initialize
traced_optimizer = trace_parameters(torch.optim.Adam)(model.parameters()) traced_optimizer = nni.trace(torch.optim.Adam)(model.parameters())
config_list = [{ 'sparsity': 0.8, 'op_types': ['BatchNorm2d'] }] config_list = [{ 'sparsity': 0.8, 'op_types': ['BatchNorm2d'] }]
pruner = SlimPruner(model, config_list, trainer, traced_optimizer, criterion, training_epochs=1) pruner = SlimPruner(model, config_list, trainer, traced_optimizer, criterion, training_epochs=1)
...@@ -192,11 +192,11 @@ Usage ...@@ -192,11 +192,11 @@ Usage
.. code-block:: python .. code-block:: python
import nni
from nni.algorithms.compression.v2.pytorch.pruning import ActivationAPoZRankPruner from nni.algorithms.compression.v2.pytorch.pruning import ActivationAPoZRankPruner
from nni.algorithms.compression.v2.pytorch.utils import trace_parameters
# make sure you have used nni.algorithms.compression.v2.pytorch.utils.trace_parameters to wrap the optimizer class before initialize # make sure you have used nni.trace to wrap the optimizer class before initialize
traced_optimizer = trace_parameters(torch.optim.Adam)(model.parameters()) traced_optimizer = nni.trace(torch.optim.Adam)(model.parameters())
config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }] config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }]
pruner = ActivationAPoZRankPruner(model, config_list, trainer, traced_optimizer, criterion, training_batches=20) pruner = ActivationAPoZRankPruner(model, config_list, trainer, traced_optimizer, criterion, training_batches=20)
...@@ -225,11 +225,11 @@ Usage ...@@ -225,11 +225,11 @@ Usage
.. code-block:: python .. code-block:: python
import nni
from nni.algorithms.compression.v2.pytorch.pruning import ActivationMeanRankPruner from nni.algorithms.compression.v2.pytorch.pruning import ActivationMeanRankPruner
from nni.algorithms.compression.v2.pytorch.utils import trace_parameters
# make sure you have used nni.algorithms.compression.v2.pytorch.utils.trace_parameters to wrap the optimizer class before initialize # make sure you have used nni.trace to wrap the optimizer class before initialize
traced_optimizer = trace_parameters(torch.optim.Adam)(model.parameters()) traced_optimizer = nni.traces(torch.optim.Adam)(model.parameters())
config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }] config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }]
pruner = ActivationMeanRankPruner(model, config_list, trainer, traced_optimizer, criterion, training_batches=20) pruner = ActivationMeanRankPruner(model, config_list, trainer, traced_optimizer, criterion, training_batches=20)
...@@ -262,11 +262,11 @@ Usage ...@@ -262,11 +262,11 @@ Usage
.. code-block:: python .. code-block:: python
import nni
from nni.algorithms.compression.v2.pytorch.pruning import TaylorFOWeightPruner from nni.algorithms.compression.v2.pytorch.pruning import TaylorFOWeightPruner
from nni.algorithms.compression.v2.pytorch.utils import trace_parameters
# make sure you have used nni.algorithms.compression.v2.pytorch.utils.trace_parameters to wrap the optimizer class before initialize # make sure you have used nni.trace to wrap the optimizer class before initialize
traced_optimizer = trace_parameters(torch.optim.Adam)(model.parameters()) traced_optimizer = nni.trace(torch.optim.Adam)(model.parameters())
config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }] config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }]
pruner = TaylorFOWeightPruner(model, config_list, trainer, traced_optimizer, criterion, training_batches=20) pruner = TaylorFOWeightPruner(model, config_list, trainer, traced_optimizer, criterion, training_batches=20)
...@@ -300,11 +300,11 @@ Usage ...@@ -300,11 +300,11 @@ Usage
.. code-block:: python .. code-block:: python
import nni
from nni.algorithms.compression.v2.pytorch.pruning import ADMMPruner from nni.algorithms.compression.v2.pytorch.pruning import ADMMPruner
from nni.algorithms.compression.v2.pytorch.utils import trace_parameters
# make sure you have used nni.algorithms.compression.v2.pytorch.utils.trace_parameters to wrap the optimizer class before initialize # make sure you have used nni.trace to wrap the optimizer class before initialize
traced_optimizer = trace_parameters(torch.optim.Adam)(model.parameters()) traced_optimizer = nni.trace(torch.optim.Adam)(model.parameters())
config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }] config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }]
pruner = ADMMPruner(model, config_list, trainer, traced_optimizer, criterion, iterations=10, training_epochs=1) pruner = ADMMPruner(model, config_list, trainer, traced_optimizer, criterion, iterations=10, training_epochs=1)
...@@ -341,11 +341,11 @@ Usage ...@@ -341,11 +341,11 @@ Usage
.. code-block:: python .. code-block:: python
import nni
from nni.algorithms.compression.v2.pytorch.pruning import MovementPruner from nni.algorithms.compression.v2.pytorch.pruning import MovementPruner
from nni.algorithms.compression.v2.pytorch.utils import trace_parameters
# make sure you have used nni.algorithms.compression.v2.pytorch.utils.trace_parameters to wrap the optimizer class before initialize # make sure you have used nni.trace to wrap the optimizer class before initialize
traced_optimizer = trace_parameters(torch.optim.Adam)(model.parameters()) traced_optimizer = nni.trace(torch.optim.Adam)(model.parameters())
config_list = [{'op_types': ['Linear'], 'op_partial_names': ['bert.encoder'], 'sparsity': 0.9}] config_list = [{'op_types': ['Linear'], 'op_partial_names': ['bert.encoder'], 'sparsity': 0.9}]
pruner = MovementPruner(model, config_list, trainer, traced_optimizer, criterion, 10, 3000, 27000) pruner = MovementPruner(model, config_list, trainer, traced_optimizer, criterion, 10, 3000, 27000)
...@@ -526,11 +526,11 @@ Usage ...@@ -526,11 +526,11 @@ Usage
.. code-block:: python .. code-block:: python
import nni
from nni.algorithms.compression.v2.pytorch.pruning import AutoCompressPruner from nni.algorithms.compression.v2.pytorch.pruning import AutoCompressPruner
from nni.algorithms.compression.v2.pytorch.utils import trace_parameters
# make sure you have used nni.algorithms.compression.v2.pytorch.utils.trace_parameters to wrap the optimizer class before initialize # make sure you have used nni.trace to wrap the optimizer class before initialize
traced_optimizer = trace_parameters(torch.optim.Adam)(model.parameters()) traced_optimizer = nni.trace(torch.optim.Adam)(model.parameters())
config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }] config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }]
admm_params = { admm_params = {
......
...@@ -14,10 +14,10 @@ import torch ...@@ -14,10 +14,10 @@ import torch
from torchvision import datasets, transforms from torchvision import datasets, transforms
from torch.optim.lr_scheduler import MultiStepLR from torch.optim.lr_scheduler import MultiStepLR
import nni
from nni.compression.pytorch import ModelSpeedup from nni.compression.pytorch import ModelSpeedup
from nni.compression.pytorch.utils.counter import count_flops_params from nni.compression.pytorch.utils.counter import count_flops_params
from nni.algorithms.compression.v2.pytorch.pruning.basic_pruner import ActivationAPoZRankPruner, ActivationMeanRankPruner from nni.algorithms.compression.v2.pytorch.pruning.basic_pruner import ActivationAPoZRankPruner, ActivationMeanRankPruner
from nni.algorithms.compression.v2.pytorch.utils import trace_parameters
from pathlib import Path from pathlib import Path
sys.path.append(str(Path(__file__).absolute().parents[2] / 'models')) sys.path.append(str(Path(__file__).absolute().parents[2] / 'models'))
...@@ -114,8 +114,8 @@ if __name__ == '__main__': ...@@ -114,8 +114,8 @@ if __name__ == '__main__':
'op_types': ['Conv2d'], 'op_types': ['Conv2d'],
}] }]
# make sure you have used nni.algorithms.compression.v2.pytorch.utils.trace_parameters to wrap the optimizer class before initialize # make sure you have used nni.trace to wrap the optimizer class before initialize
traced_optimizer = trace_parameters(torch.optim.SGD)(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4) traced_optimizer = nni.trace(torch.optim.SGD)(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
if 'apoz' in args.pruner: if 'apoz' in args.pruner:
pruner = ActivationAPoZRankPruner(model, config_list, trainer, traced_optimizer, criterion, training_batches=20) pruner = ActivationAPoZRankPruner(model, config_list, trainer, traced_optimizer, criterion, training_batches=20)
else: else:
......
...@@ -14,9 +14,9 @@ import torch ...@@ -14,9 +14,9 @@ import torch
from torchvision import datasets, transforms from torchvision import datasets, transforms
from torch.optim.lr_scheduler import MultiStepLR from torch.optim.lr_scheduler import MultiStepLR
import nni
from nni.compression.pytorch.utils.counter import count_flops_params from nni.compression.pytorch.utils.counter import count_flops_params
from nni.algorithms.compression.v2.pytorch.pruning.basic_pruner import ADMMPruner from nni.algorithms.compression.v2.pytorch.pruning.basic_pruner import ADMMPruner
from nni.algorithms.compression.v2.pytorch.utils import trace_parameters
from pathlib import Path from pathlib import Path
sys.path.append(str(Path(__file__).absolute().parents[2] / 'models')) sys.path.append(str(Path(__file__).absolute().parents[2] / 'models'))
...@@ -113,8 +113,8 @@ if __name__ == '__main__': ...@@ -113,8 +113,8 @@ if __name__ == '__main__':
'op_types': ['Conv2d'], 'op_types': ['Conv2d'],
}] }]
# make sure you have used nni.algorithms.compression.v2.pytorch.utils.trace_parameters to wrap the optimizer class before initialize # make sure you have used nni.trace to wrap the optimizer class before initialize
traced_optimizer = trace_parameters(torch.optim.SGD)(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4) traced_optimizer = nni.trace(torch.optim.SGD)(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
pruner = ADMMPruner(model, config_list, trainer, traced_optimizer, criterion, iterations=2, training_epochs=2) pruner = ADMMPruner(model, config_list, trainer, traced_optimizer, criterion, iterations=2, training_epochs=2)
_, masks = pruner.compress() _, masks = pruner.compress()
pruner.show_pruned_weights() pruner.show_pruned_weights()
......
...@@ -4,8 +4,8 @@ from tqdm import tqdm ...@@ -4,8 +4,8 @@ from tqdm import tqdm
import torch import torch
from torchvision import datasets, transforms from torchvision import datasets, transforms
import nni
from nni.algorithms.compression.v2.pytorch.pruning import AutoCompressPruner from nni.algorithms.compression.v2.pytorch.pruning import AutoCompressPruner
from nni.algorithms.compression.v2.pytorch.utils import trace_parameters
from pathlib import Path from pathlib import Path
sys.path.append(str(Path(__file__).absolute().parents[2] / 'models')) sys.path.append(str(Path(__file__).absolute().parents[2] / 'models'))
...@@ -77,8 +77,8 @@ if __name__ == '__main__': ...@@ -77,8 +77,8 @@ if __name__ == '__main__':
config_list = [{'op_types': ['Conv2d'], 'total_sparsity': 0.8}] config_list = [{'op_types': ['Conv2d'], 'total_sparsity': 0.8}]
dummy_input = torch.rand(10, 3, 32, 32).to(device) dummy_input = torch.rand(10, 3, 32, 32).to(device)
# make sure you have used nni.algorithms.compression.v2.pytorch.utils.trace_parameters to wrap the optimizer class before initialize # make sure you have used nni.trace to wrap the optimizer class before initialize
traced_optimizer = trace_parameters(torch.optim.SGD)(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4) traced_optimizer = nni.trace(torch.optim.SGD)(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
admm_params = { admm_params = {
'trainer': trainer, 'trainer': trainer,
'traced_optimizer': traced_optimizer, 'traced_optimizer': traced_optimizer,
......
...@@ -13,8 +13,8 @@ from transformers import ( ...@@ -13,8 +13,8 @@ from transformers import (
set_seed set_seed
) )
import nni
from nni.algorithms.compression.v2.pytorch.pruning import MovementPruner from nni.algorithms.compression.v2.pytorch.pruning import MovementPruner
from nni.algorithms.compression.v2.pytorch.utils import trace_parameters
task_to_keys = { task_to_keys = {
...@@ -110,8 +110,8 @@ if __name__ == '__main__': ...@@ -110,8 +110,8 @@ if __name__ == '__main__':
config_list = [{'op_types': ['Linear'], 'op_partial_names': ['bert.encoder'], 'sparsity': 0.9}] config_list = [{'op_types': ['Linear'], 'op_partial_names': ['bert.encoder'], 'sparsity': 0.9}]
p_trainer = functools.partial(trainer, train_dataloader=train_dataloader) p_trainer = functools.partial(trainer, train_dataloader=train_dataloader)
# make sure you have used nni.algorithms.compression.v2.pytorch.utils.trace_parameters to wrap the optimizer class before initialize # make sure you have used nni.trace to wrap the optimizer class before initialize
traced_optimizer = trace_parameters(Adam)(model.parameters(), lr=2e-5) traced_optimizer = nni.trace(Adam)(model.parameters(), lr=2e-5)
pruner = MovementPruner(model, config_list, p_trainer, traced_optimizer, criterion, training_epochs=10, pruner = MovementPruner(model, config_list, p_trainer, traced_optimizer, criterion, training_epochs=10,
warm_up_step=3000, cool_down_beginning_step=27000) warm_up_step=3000, cool_down_beginning_step=27000)
......
...@@ -14,10 +14,10 @@ import torch ...@@ -14,10 +14,10 @@ import torch
from torchvision import datasets, transforms from torchvision import datasets, transforms
from torch.optim.lr_scheduler import MultiStepLR from torch.optim.lr_scheduler import MultiStepLR
import nni
from nni.compression.pytorch import ModelSpeedup from nni.compression.pytorch import ModelSpeedup
from nni.compression.pytorch.utils.counter import count_flops_params from nni.compression.pytorch.utils.counter import count_flops_params
from nni.algorithms.compression.v2.pytorch.pruning.basic_pruner import SlimPruner from nni.algorithms.compression.v2.pytorch.pruning.basic_pruner import SlimPruner
from nni.algorithms.compression.v2.pytorch.utils import trace_parameters
from pathlib import Path from pathlib import Path
sys.path.append(str(Path(__file__).absolute().parents[2] / 'models')) sys.path.append(str(Path(__file__).absolute().parents[2] / 'models'))
...@@ -112,8 +112,8 @@ if __name__ == '__main__': ...@@ -112,8 +112,8 @@ if __name__ == '__main__':
'max_sparsity_per_layer': 0.9 'max_sparsity_per_layer': 0.9
}] }]
# make sure you have used nni.algorithms.compression.v2.pytorch.utils.trace_parameters to wrap the optimizer class before initialize # make sure you have used nni.trace to wrap the optimizer class before initialize
traced_optimizer = trace_parameters(torch.optim.SGD)(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4) traced_optimizer = nni.trace(torch.optim.SGD)(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
pruner = SlimPruner(model, config_list, trainer, traced_optimizer, criterion, training_epochs=1, scale=0.0001, mode='global') pruner = SlimPruner(model, config_list, trainer, traced_optimizer, criterion, training_epochs=1, scale=0.0001, mode='global')
_, masks = pruner.compress() _, masks = pruner.compress()
pruner.show_pruned_weights() pruner.show_pruned_weights()
......
...@@ -14,10 +14,10 @@ import torch ...@@ -14,10 +14,10 @@ import torch
from torchvision import datasets, transforms from torchvision import datasets, transforms
from torch.optim.lr_scheduler import MultiStepLR from torch.optim.lr_scheduler import MultiStepLR
import nni
from nni.compression.pytorch import ModelSpeedup from nni.compression.pytorch import ModelSpeedup
from nni.compression.pytorch.utils.counter import count_flops_params from nni.compression.pytorch.utils.counter import count_flops_params
from nni.algorithms.compression.v2.pytorch.pruning.basic_pruner import TaylorFOWeightPruner from nni.algorithms.compression.v2.pytorch.pruning.basic_pruner import TaylorFOWeightPruner
from nni.algorithms.compression.v2.pytorch.utils import trace_parameters
from pathlib import Path from pathlib import Path
sys.path.append(str(Path(__file__).absolute().parents[2] / 'models')) sys.path.append(str(Path(__file__).absolute().parents[2] / 'models'))
...@@ -111,8 +111,8 @@ if __name__ == '__main__': ...@@ -111,8 +111,8 @@ if __name__ == '__main__':
'op_types': ['Conv2d'], 'op_types': ['Conv2d'],
}] }]
# make sure you have used nni.algorithms.compression.v2.pytorch.utils.trace_parameters to wrap the optimizer class before initialize # make sure you have used nni.trace to wrap the optimizer class before initialize
traced_optimizer = trace_parameters(torch.optim.SGD)(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4) traced_optimizer = nni.trace(torch.optim.SGD)(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
pruner = TaylorFOWeightPruner(model, config_list, trainer, traced_optimizer, criterion, training_batches=20) pruner = TaylorFOWeightPruner(model, config_list, trainer, traced_optimizer, criterion, training_batches=20)
_, masks = pruner.compress() _, masks = pruner.compress()
pruner.show_pruned_weights() pruner.show_pruned_weights()
......
...@@ -59,8 +59,8 @@ class AutoCompressPruner(IterativePruner): ...@@ -59,8 +59,8 @@ class AutoCompressPruner(IterativePruner):
A callable function used to train model or just inference. Take model, optimizer, criterion as input. A callable function used to train model or just inference. Take model, optimizer, criterion as input.
The model will be trained or inferenced `training_epochs` epochs. The model will be trained or inferenced `training_epochs` epochs.
- traced_optimizer : nni.common.serializer.Traceable(torch.optim.Optimizer) - traced_optimizer : nni.common.serializer.Traceable(torch.optim.Optimizer)
The traced optimizer instance which the optimizer class is wrapped by nni.algorithms.compression.v2.pytorch.utils.trace_parameters. The traced optimizer instance which the optimizer class is wrapped by nni.trace.
E.g. traced_optimizer = nni.algorithms.compression.v2.pytorch.utils.trace_parameters(torch.nn.Adam)(model.parameters()). E.g. traced_optimizer = nni.trace(torch.nn.Adam)(model.parameters()).
- criterion : Callable[[Tensor, Tensor], Tensor]. - criterion : Callable[[Tensor, Tensor], Tensor].
The criterion function used in trainer. Take model output and target value as input, and return the loss. The criterion function used in trainer. Take model output and target value as input, and return the loss.
- iterations : int. - iterations : int.
......
...@@ -379,8 +379,8 @@ class SlimPruner(BasicPruner): ...@@ -379,8 +379,8 @@ class SlimPruner(BasicPruner):
optimizer.step() optimizer.step()
model.train(mode=training) model.train(mode=training)
traced_optimizer : nni.common.serializer.Traceable(torch.optim.Optimizer) traced_optimizer : nni.common.serializer.Traceable(torch.optim.Optimizer)
The traced optimizer instance which the optimizer class is wrapped by nni.algorithms.compression.v2.pytorch.utils.trace_parameters. The traced optimizer instance which the optimizer class is wrapped by nni.trace.
E.g. traced_optimizer = nni.algorithms.compression.v2.pytorch.utils.trace_parameters(torch.nn.Adam)(model.parameters()). E.g. traced_optimizer = nni.trace(torch.nn.Adam)(model.parameters()).
criterion : Callable[[Tensor, Tensor], Tensor] criterion : Callable[[Tensor, Tensor], Tensor]
The criterion function used in trainer. Take model output and target value as input, and return the loss. The criterion function used in trainer. Take model output and target value as input, and return the loss.
training_epochs : int training_epochs : int
...@@ -484,8 +484,8 @@ class ActivationPruner(BasicPruner): ...@@ -484,8 +484,8 @@ class ActivationPruner(BasicPruner):
optimizer.step() optimizer.step()
model.train(mode=training) model.train(mode=training)
traced_optimizer : nni.common.serializer.Traceable(torch.optim.Optimizer) traced_optimizer : nni.common.serializer.Traceable(torch.optim.Optimizer)
The traced optimizer instance which the optimizer class is wrapped by nni.algorithms.compression.v2.pytorch.utils.trace_parameters. The traced optimizer instance which the optimizer class is wrapped by nni.trace.
E.g. traced_optimizer = nni.algorithms.compression.v2.pytorch.utils.trace_parameters(torch.nn.Adam)(model.parameters()). E.g. traced_optimizer = nni.trace(torch.nn.Adam)(model.parameters()).
criterion : Callable[[Tensor, Tensor], Tensor] criterion : Callable[[Tensor, Tensor], Tensor]
The criterion function used in trainer. Take model output and target value as input, and return the loss. The criterion function used in trainer. Take model output and target value as input, and return the loss.
training_batches training_batches
...@@ -628,8 +628,8 @@ class TaylorFOWeightPruner(BasicPruner): ...@@ -628,8 +628,8 @@ class TaylorFOWeightPruner(BasicPruner):
optimizer.step() optimizer.step()
model.train(mode=training) model.train(mode=training)
traced_optimizer : nni.common.serializer.Traceable(torch.optim.Optimizer) traced_optimizer : nni.common.serializer.Traceable(torch.optim.Optimizer)
The traced optimizer instance which the optimizer class is wrapped by nni.algorithms.compression.v2.pytorch.utils.trace_parameters. The traced optimizer instance which the optimizer class is wrapped by nni.trace.
E.g. traced_optimizer = nni.algorithms.compression.v2.pytorch.utils.trace_parameters(torch.nn.Adam)(model.parameters()). E.g. traced_optimizer = nni.trace(torch.nn.Adam)(model.parameters()).
criterion : Callable[[Tensor, Tensor], Tensor] criterion : Callable[[Tensor, Tensor], Tensor]
The criterion function used in trainer. Take model output and target value as input, and return the loss. The criterion function used in trainer. Take model output and target value as input, and return the loss.
training_batches : int training_batches : int
...@@ -761,8 +761,8 @@ class ADMMPruner(BasicPruner): ...@@ -761,8 +761,8 @@ class ADMMPruner(BasicPruner):
optimizer.step() optimizer.step()
model.train(mode=training) model.train(mode=training)
traced_optimizer : nni.common.serializer.Traceable(torch.optim.Optimizer) traced_optimizer : nni.common.serializer.Traceable(torch.optim.Optimizer)
The traced optimizer instance which the optimizer class is wrapped by nni.algorithms.compression.v2.pytorch.utils.trace_parameters. The traced optimizer instance which the optimizer class is wrapped by nni.trace.
E.g. traced_optimizer = nni.algorithms.compression.v2.pytorch.utils.trace_parameters(torch.nn.Adam)(model.parameters()). E.g. traced_optimizer = nni.trace(torch.nn.Adam)(model.parameters()).
criterion : Callable[[Tensor, Tensor], Tensor] criterion : Callable[[Tensor, Tensor], Tensor]
The criterion function used in trainer. Take model output and target value as input, and return the loss. The criterion function used in trainer. Take model output and target value as input, and return the loss.
iterations : int iterations : int
......
...@@ -157,8 +157,8 @@ class MovementPruner(BasicPruner): ...@@ -157,8 +157,8 @@ class MovementPruner(BasicPruner):
optimizer.step() optimizer.step()
model.train(mode=training) model.train(mode=training)
traced_optimizer : nni.common.serializer.Traceable(torch.optim.Optimizer) traced_optimizer : nni.common.serializer.Traceable(torch.optim.Optimizer)
The traced optimizer instance which the optimizer class is wrapped by nni.algorithms.compression.v2.pytorch.utils.trace_parameters. The traced optimizer instance which the optimizer class is wrapped by nni.trace.
E.g. traced_optimizer = nni.algorithms.compression.v2.pytorch.utils.trace_parameters(torch.nn.Adam)(model.parameters()). E.g. traced_optimizer = nni.trace(torch.nn.Adam)(model.parameters()).
criterion : Callable[[Tensor, Tensor], Tensor] criterion : Callable[[Tensor, Tensor], Tensor]
The criterion function used in trainer. Take model output and target value as input, and return the loss. The criterion function used in trainer. Take model output and target value as input, and return the loss.
training_epochs : int training_epochs : int
......
...@@ -12,14 +12,9 @@ from torch.optim.lr_scheduler import _LRScheduler ...@@ -12,14 +12,9 @@ from torch.optim.lr_scheduler import _LRScheduler
from nni.common.serializer import _trace_cls from nni.common.serializer import _trace_cls
from nni.common.serializer import Traceable from nni.common.serializer import Traceable
__all__ = ['OptimizerConstructHelper', 'LRSchedulerConstructHelper', 'trace_parameters'] __all__ = ['OptimizerConstructHelper', 'LRSchedulerConstructHelper']
def trace_parameters(base, kw_only=True):
if not isinstance(base, type):
raise Exception('Only class can be traced by this function.')
return _trace_cls(base, kw_only, call_super=False)
class ConstructHelper: class ConstructHelper:
def __init__(self, callable_obj: Callable, *args, **kwargs): def __init__(self, callable_obj: Callable, *args, **kwargs):
assert callable(callable_obj), '`callable_obj` must be a callable object.' assert callable(callable_obj), '`callable_obj` must be a callable object.'
...@@ -86,7 +81,7 @@ class OptimizerConstructHelper(ConstructHelper): ...@@ -86,7 +81,7 @@ class OptimizerConstructHelper(ConstructHelper):
@staticmethod @staticmethod
def from_trace(model: Module, optimizer_trace: Traceable): def from_trace(model: Module, optimizer_trace: Traceable):
assert isinstance(optimizer_trace, Traceable), \ assert isinstance(optimizer_trace, Traceable), \
'Please use nni.algorithms.compression.v2.pytorch.utils.trace_parameters to wrap the optimizer class before initialize the optimizer.' 'Please use nni.trace to wrap the optimizer class before initialize the optimizer.'
assert isinstance(optimizer_trace, Optimizer), \ assert isinstance(optimizer_trace, Optimizer), \
'It is not an instance of torch.nn.Optimizer.' 'It is not an instance of torch.nn.Optimizer.'
return OptimizerConstructHelper(model, return OptimizerConstructHelper(model,
...@@ -118,7 +113,7 @@ class LRSchedulerConstructHelper(ConstructHelper): ...@@ -118,7 +113,7 @@ class LRSchedulerConstructHelper(ConstructHelper):
@staticmethod @staticmethod
def from_trace(lr_scheduler_trace: Traceable): def from_trace(lr_scheduler_trace: Traceable):
assert isinstance(lr_scheduler_trace, Traceable), \ assert isinstance(lr_scheduler_trace, Traceable), \
'Please use nni.algorithms.compression.v2.pytorch.utils.trace_parameters to wrap the lr scheduler class before initialize the scheduler.' 'Please use nni.trace to wrap the lr scheduler class before initialize the scheduler.'
assert isinstance(lr_scheduler_trace, _LRScheduler), \ assert isinstance(lr_scheduler_trace, _LRScheduler), \
'It is not an instance of torch.nn.lr_scheduler._LRScheduler.' 'It is not an instance of torch.nn.lr_scheduler._LRScheduler.'
return LRSchedulerConstructHelper(lr_scheduler_trace.trace_symbol, return LRSchedulerConstructHelper(lr_scheduler_trace.trace_symbol,
......
...@@ -7,6 +7,7 @@ import unittest ...@@ -7,6 +7,7 @@ import unittest
import torch import torch
import torch.nn.functional as F import torch.nn.functional as F
import nni
from nni.algorithms.compression.v2.pytorch.pruning import ( from nni.algorithms.compression.v2.pytorch.pruning import (
LinearPruner, LinearPruner,
AGPPruner, AGPPruner,
...@@ -15,8 +16,7 @@ from nni.algorithms.compression.v2.pytorch.pruning import ( ...@@ -15,8 +16,7 @@ from nni.algorithms.compression.v2.pytorch.pruning import (
AutoCompressPruner, AutoCompressPruner,
AMCPruner AMCPruner
) )
from nni.algorithms.compression.v2.pytorch.utils import compute_sparsity_mask2compact
from nni.algorithms.compression.v2.pytorch.utils import compute_sparsity_mask2compact, trace_parameters
class TorchModel(torch.nn.Module): class TorchModel(torch.nn.Module):
...@@ -53,7 +53,7 @@ def trainer(model, optimizer, criterion): ...@@ -53,7 +53,7 @@ def trainer(model, optimizer, criterion):
def get_optimizer(model): def get_optimizer(model):
return trace_parameters(torch.optim.SGD)(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4) return nni.trace(torch.optim.SGD)(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
criterion = torch.nn.CrossEntropyLoss() criterion = torch.nn.CrossEntropyLoss()
......
...@@ -6,6 +6,7 @@ import unittest ...@@ -6,6 +6,7 @@ import unittest
import torch import torch
import torch.nn.functional as F import torch.nn.functional as F
import nni
from nni.algorithms.compression.v2.pytorch.pruning import ( from nni.algorithms.compression.v2.pytorch.pruning import (
LevelPruner, LevelPruner,
L1NormPruner, L1NormPruner,
...@@ -18,7 +19,7 @@ from nni.algorithms.compression.v2.pytorch.pruning import ( ...@@ -18,7 +19,7 @@ from nni.algorithms.compression.v2.pytorch.pruning import (
ADMMPruner, ADMMPruner,
MovementPruner MovementPruner
) )
from nni.algorithms.compression.v2.pytorch.utils import compute_sparsity_mask2compact, trace_parameters from nni.algorithms.compression.v2.pytorch.utils import compute_sparsity_mask2compact
class TorchModel(torch.nn.Module): class TorchModel(torch.nn.Module):
...@@ -55,7 +56,7 @@ def trainer(model, optimizer, criterion): ...@@ -55,7 +56,7 @@ def trainer(model, optimizer, criterion):
def get_optimizer(model): def get_optimizer(model):
return trace_parameters(torch.optim.SGD)(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4) return nni.trace(torch.optim.SGD)(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
criterion = torch.nn.CrossEntropyLoss() criterion = torch.nn.CrossEntropyLoss()
......
...@@ -6,6 +6,7 @@ import unittest ...@@ -6,6 +6,7 @@ import unittest
import torch import torch
import torch.nn.functional as F import torch.nn.functional as F
import nni
from nni.algorithms.compression.v2.pytorch.base import Pruner from nni.algorithms.compression.v2.pytorch.base import Pruner
from nni.algorithms.compression.v2.pytorch.pruning.tools import ( from nni.algorithms.compression.v2.pytorch.pruning.tools import (
WeightDataCollector, WeightDataCollector,
...@@ -24,7 +25,7 @@ from nni.algorithms.compression.v2.pytorch.pruning.tools import ( ...@@ -24,7 +25,7 @@ from nni.algorithms.compression.v2.pytorch.pruning.tools import (
GlobalSparsityAllocator GlobalSparsityAllocator
) )
from nni.algorithms.compression.v2.pytorch.pruning.tools.base import HookCollectorInfo from nni.algorithms.compression.v2.pytorch.pruning.tools.base import HookCollectorInfo
from nni.algorithms.compression.v2.pytorch.utils import get_module_by_name, trace_parameters from nni.algorithms.compression.v2.pytorch.utils import get_module_by_name
from nni.algorithms.compression.v2.pytorch.utils.constructor_helper import OptimizerConstructHelper from nni.algorithms.compression.v2.pytorch.utils.constructor_helper import OptimizerConstructHelper
...@@ -62,7 +63,7 @@ def trainer(model, optimizer, criterion): ...@@ -62,7 +63,7 @@ def trainer(model, optimizer, criterion):
def get_optimizer(model): def get_optimizer(model):
return trace_parameters(torch.optim.SGD)(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4) return nni.trace(torch.optim.SGD)(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
criterion = torch.nn.CrossEntropyLoss() criterion = torch.nn.CrossEntropyLoss()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment