Unverified Commit bc0f8f33 authored by liuzhe-lz's avatar liuzhe-lz Committed by GitHub
Browse files

Refactor code hierarchy part 3: Unit test (#3037)

parent 80b6cb3b
...@@ -5,6 +5,10 @@ ...@@ -5,6 +5,10 @@
/test/model_path/ /test/model_path/
/test/temp.json /test/temp.json
/test/ut/sdk/*.pth /test/ut/sdk/*.pth
/test/ut/tools/annotation/_generated/
/ts/nni_manager/exp_profile.json
/ts/nni_manager/metrics.json
/ts/nni_manager/trial_jobs.json
# Logs # Logs
......
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# pylint: skip-file
from .__init__ import *
import sys
import ast
import json
import os
import shutil
import tempfile
from unittest import TestCase, main, skipIf
class AnnotationTestCase(TestCase):
@classmethod
def setUpClass(cls):
os.chdir('nni_annotation')
if os.path.isdir('_generated'):
shutil.rmtree('_generated')
def test_search_space_generator(self):
shutil.copytree('testcase/annotated', '_generated/annotated')
search_space = generate_search_space('_generated/annotated')
with open('testcase/searchspace.json') as f:
self.assertEqual(search_space, json.load(f))
@skipIf(sys.version_info.major == 3 and sys.version_info.minor > 7, "skip for python3.8 temporarily")
def test_code_generator(self):
code_dir = expand_annotations('testcase/usercode', '_generated/usercode', nas_mode='classic_mode')
self.assertEqual(code_dir, '_generated/usercode')
self._assert_source_equal('testcase/annotated/nas.py', '_generated/usercode/nas.py')
self._assert_source_equal('testcase/annotated/mnist.py', '_generated/usercode/mnist.py')
self._assert_source_equal('testcase/annotated/dir/simple.py', '_generated/usercode/dir/simple.py')
with open('testcase/usercode/nonpy.txt') as src, open('_generated/usercode/nonpy.txt') as dst:
assert src.read() == dst.read()
def test_annotation_detecting(self):
dir_ = 'testcase/usercode/non_annotation'
code_dir = expand_annotations(dir_, tempfile.mkdtemp())
self.assertEqual(code_dir, dir_)
def _assert_source_equal(self, src1, src2):
with open(src1) as f1, open(src2) as f2:
ast1 = ast.dump(ast.parse(f1.read()))
ast2 = ast.dump(ast.parse(f2.read()))
self.assertEqual(ast1, ast2)
if __name__ == '__main__':
main()
{"xOpEwA5w": {"port": 8080, "startTime": "1970/01/1 01:01:01", "endTime": "1970-01-2 01:01:01", "status": "RUNNING", "fileName": "aGew0x", "platform": "local", "experimentName": "example_sklearn-classification"}}
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import json
import os
import random
import shutil
import string
import sys
import time
import unittest
from argparse import Namespace
from datetime import datetime
from tools.nni_trial_tool.base_channel import CommandType
from tools.nni_trial_tool.file_channel import (FileChannel, command_path,
manager_commands_file_name)
sys.path.append("..")
runner_file_name = "commands/runner_commands.txt"
manager_file_name = "commands/manager_commands.txt"
class FileChannelTest(unittest.TestCase):
def setUp(self):
self.args = Namespace()
self.args.node_count = 1
self.args.node_id = None
if os.path.exists(command_path):
shutil.rmtree(command_path)
def test_send(self):
fc = None
try:
fc = FileChannel(self.args)
fc.send(CommandType.ReportGpuInfo, "command1")
fc.send(CommandType.ReportGpuInfo, "command2")
self.check_timeout(2, lambda: os.path.exists(runner_file_name))
self.assertTrue(os.path.exists(runner_file_name))
with open(runner_file_name, "rb") as runner:
lines = runner.readlines()
self.assertListEqual(lines, [b'GI00000000000010"command1"\n', b'GI00000000000010"command2"\n'])
finally:
if fc is not None:
fc.close()
def test_send_multi_node(self):
fc1 = None
fc2 = None
try:
runner1_file_name = "commands/runner_commands_1.txt"
self.args.node_id = 1
fc1 = FileChannel(self.args)
fc1.send(CommandType.ReportGpuInfo, "command1")
# wait command have enough time to write before closed.
runner2_file_name = "commands/runner_commands_2.txt"
self.args.node_id = 2
fc2 = FileChannel(self.args)
fc2.send(CommandType.ReportGpuInfo, "command1")
self.check_timeout(2, lambda: os.path.exists(runner1_file_name) and os.path.exists(runner2_file_name))
self.assertTrue(os.path.exists(runner1_file_name))
with open(runner1_file_name, "rb") as runner:
lines1 = runner.readlines()
self.assertTrue(os.path.exists(runner2_file_name))
with open(runner2_file_name, "rb") as runner:
lines2 = runner.readlines()
self.assertListEqual(lines1, [b'GI00000000000010"command1"\n'])
self.assertListEqual(lines2, [b'GI00000000000010"command1"\n'])
finally:
if fc1 is not None:
fc1.close()
if fc2 is not None:
fc2.close()
def test_receive(self):
fc = None
manager_file = None
try:
fc = FileChannel(self.args)
message = fc.receive()
self.assertEqual(message, (None, None))
os.mkdir(command_path)
manager_file = open(manager_file_name, "wb")
manager_file.write(b'TR00000000000009"manager"\n')
manager_file.flush()
self.check_timeout(2, lambda: fc.received())
message = fc.receive()
self.assertEqual(message, (CommandType.NewTrialJob, "manager"))
manager_file.write(b'TR00000000000010"manager2"\n')
manager_file.flush()
self.check_timeout(2, lambda: fc.received())
message = fc.receive()
self.assertEqual(message, (CommandType.NewTrialJob, "manager2"))
finally:
if fc is not None:
fc.close()
if manager_file is not None:
manager_file.close()
def check_timeout(self, timeout, callback):
interval = 0.01
start = datetime.now().timestamp()
count = int(timeout / interval)
for x in range(count):
if callback():
break
time.sleep(interval)
print("checked {} times, {:3F} seconds".format(x, datetime.now().timestamp()-start))
if __name__ == '__main__':
unittest.main()
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import shutil
import random
import string
import unittest
import json
import sys
from pyhdfs import HdfsClient
from tools.nni_trial_tool.hdfsClientUtility import copyFileToHdfs, copyDirectoryToHdfs
sys.path.append("..")
class HDFSClientUtilityTest(unittest.TestCase):
'''Unit test for hdfsClientUtility.py'''
def setUp(self):
self.hdfs_file_path = '../../.vscode/hdfsInfo.json'
self.hdfs_config = None
try:
with open(self.hdfs_file_path, 'r') as file:
self.hdfs_config = json.load(file)
except Exception as exception:
print(exception)
self.hdfs_client = HdfsClient(hosts='{0}:{1}'.format(self.hdfs_config['host'], '50070'), user_name=self.hdfs_config['userName'])
def get_random_name(self, length):
return ''.join(random.sample(string.ascii_letters + string.digits, length))
def test_copy_file_run(self):
'''test copyFileToHdfs'''
file_name = self.get_random_name(8)
file_content = 'hello world!'
with open('./{}'.format(file_name), 'w') as file:
file.write(file_content)
result = copyFileToHdfs('./{}'.format(file_name), '/{0}/{1}'.format(self.hdfs_config['userName'], file_name), self.hdfs_client)
self.assertTrue(result)
file_list = self.hdfs_client.listdir('/{0}'.format(self.hdfs_config['userName']))
self.assertIn(file_name, file_list)
hdfs_file_name = self.get_random_name(8)
self.hdfs_client.copy_to_local('/{0}/{1}'.format(self.hdfs_config['userName'], file_name), './{}'.format(hdfs_file_name))
self.assertTrue(os.path.exists('./{}'.format(hdfs_file_name)))
with open('./{}'.format(hdfs_file_name), 'r') as file:
content = file.readline()
self.assertEqual(file_content, content)
#clean up
os.remove('./{}'.format(file_name))
os.remove('./{}'.format(hdfs_file_name))
self.hdfs_client.delete('/{0}/{1}'.format(self.hdfs_config['userName'], file_name))
def test_copy_directory_run(self):
'''test copyDirectoryToHdfs'''
directory_name = self.get_random_name(8)
file_name_list = [self.get_random_name(8), self.get_random_name(8)]
file_content = 'hello world!'
os.makedirs('./{}'.format(directory_name))
for file_name in file_name_list:
with open('./{0}/{1}'.format(directory_name, file_name), 'w') as file:
file.write(file_content)
result = copyDirectoryToHdfs('./{}'.format(directory_name),
'/{0}/{1}'.format(self.hdfs_config['userName'], directory_name), self.hdfs_client)
self.assertTrue(result)
directory_list = self.hdfs_client.listdir('/{0}'.format(self.hdfs_config['userName']))
self.assertIn(directory_name, directory_list)
sub_file_list = self.hdfs_client.listdir('/{0}/{1}'.format(self.hdfs_config['userName'], directory_name))
for file_name in file_name_list:
self.assertIn(file_name, sub_file_list)
#clean up
self.hdfs_client.delete('/{0}/{1}/{2}'.format(self.hdfs_config['userName'], directory_name, file_name))
self.hdfs_client.delete('/{0}/{1}'.format(self.hdfs_config['userName'], directory_name))
shutil.rmtree('./{}'.format(directory_name))
if __name__ == '__main__':
unittest.main()
...@@ -7,13 +7,13 @@ It's convenient to implement auto model pruning with NNI compression and NNI tun ...@@ -7,13 +7,13 @@ It's convenient to implement auto model pruning with NNI compression and NNI tun
You can easily compress a model with NNI compression. Take pruning for example, you can prune a pretrained model with LevelPruner like this You can easily compress a model with NNI compression. Take pruning for example, you can prune a pretrained model with LevelPruner like this
```python ```python
from nni.compression.torch import LevelPruner from nni.algorithms.compression.pytorch.pruning import LevelPruner
config_list = [{ 'sparsity': 0.8, 'op_types': ['default'] }] config_list = [{ 'sparsity': 0.8, 'op_types': ['default'] }]
pruner = LevelPruner(model, config_list) pruner = LevelPruner(model, config_list)
pruner.compress() pruner.compress()
``` ```
The 'default' op_type stands for the module types defined in [default_layers.py](https://github.com/microsoft/nni/blob/v1.9/src/sdk/pynni/nni/compression/torch/default_layers.py) for pytorch. The 'default' op_type stands for the module types defined in [default_layers.py](https://github.com/microsoft/nni/blob/v1.9/src/sdk/pynni/nni/compression/pytorch/default_layers.py) for pytorch.
Therefore ```{ 'sparsity': 0.8, 'op_types': ['default'] }```means that **all layers with specified op_types will be compressed with the same 0.8 sparsity**. When ```pruner.compress()``` called, the model is compressed with masks and after that you can normally fine tune this model and **pruned weights won't be updated** which have been masked. Therefore ```{ 'sparsity': 0.8, 'op_types': ['default'] }```means that **all layers with specified op_types will be compressed with the same 0.8 sparsity**. When ```pruner.compress()``` called, the model is compressed with masks and after that you can normally fine tune this model and **pruned weights won't be updated** which have been masked.
...@@ -71,7 +71,7 @@ Then we need to modify our codes for few lines ...@@ -71,7 +71,7 @@ Then we need to modify our codes for few lines
```python ```python
import nni import nni
from nni.compression.torch import * from nni.algorithms.compression.pytorch.pruning import *
params = nni.get_parameters() params = nni.get_parameters()
conv0_sparsity = params['prune_method']['conv0_sparsity'] conv0_sparsity = params['prune_method']['conv0_sparsity']
conv1_sparsity = params['prune_method']['conv1_sparsity'] conv1_sparsity = params['prune_method']['conv1_sparsity']
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
## Sensitivity Utilities ## Sensitivity Utilities
```eval_rst ```eval_rst
.. autoclass:: nni.compression.torch.utils.sensitivity_analysis.SensitivityAnalysis .. autoclass:: nni.compression.pytorch.utils.sensitivity_analysis.SensitivityAnalysis
:members: :members:
``` ```
...@@ -15,19 +15,19 @@ ...@@ -15,19 +15,19 @@
## Topology Utilities ## Topology Utilities
```eval_rst ```eval_rst
.. autoclass:: nni.compression.torch.utils.shape_dependency.ChannelDependency .. autoclass:: nni.compression.pytorch.utils.shape_dependency.ChannelDependency
:members: :members:
.. autoclass:: nni.compression.torch.utils.shape_dependency.GroupDependency .. autoclass:: nni.compression.pytorch.utils.shape_dependency.GroupDependency
:members: :members:
.. autoclass:: nni.compression.torch.utils.mask_conflict.CatMaskPadding .. autoclass:: nni.compression.pytorch.utils.mask_conflict.CatMaskPadding
:members: :members:
.. autoclass:: nni.compression.torch.utils.mask_conflict.GroupMaskConflict .. autoclass:: nni.compression.pytorch.utils.mask_conflict.GroupMaskConflict
:members: :members:
.. autoclass:: nni.compression.torch.utils.mask_conflict.ChannelMaskConflict .. autoclass:: nni.compression.pytorch.utils.mask_conflict.ChannelMaskConflict
:members: :members:
``` ```
...@@ -35,6 +35,6 @@ ...@@ -35,6 +35,6 @@
## Model FLOPs/Parameters Counter ## Model FLOPs/Parameters Counter
```eval_rst ```eval_rst
.. autofunction:: nni.compression.torch.utils.counter.count_flops_params .. autofunction:: nni.compression.pytorch.utils.counter.count_flops_params
``` ```
\ No newline at end of file
...@@ -13,7 +13,7 @@ First, we provide a sensitivity analysis tool (**SensitivityAnalysis**) for user ...@@ -13,7 +13,7 @@ First, we provide a sensitivity analysis tool (**SensitivityAnalysis**) for user
The following codes show the basic usage of the SensitivityAnalysis. The following codes show the basic usage of the SensitivityAnalysis.
```python ```python
from nni.compression.torch.utils.sensitivity_analysis import SensitivityAnalysis from nni.compression.pytorch.utils.sensitivity_analysis import SensitivityAnalysis
def val(model): def val(model):
model.eval() model.eval()
...@@ -88,7 +88,7 @@ If the layers have channel dependency are assigned with different sparsities (he ...@@ -88,7 +88,7 @@ If the layers have channel dependency are assigned with different sparsities (he
#### Usage #### Usage
```python ```python
from nni.compression.torch.utils.shape_dependency import ChannelDependency from nni.compression.pytorch.utils.shape_dependency import ChannelDependency
data = torch.ones(1, 3, 224, 224).cuda() data = torch.ones(1, 3, 224, 224).cuda()
channel_depen = ChannelDependency(net, data) channel_depen = ChannelDependency(net, data)
channel_depen.export('dependency.csv') channel_depen.export('dependency.csv')
...@@ -116,7 +116,7 @@ Set 12,layer4.1.conv1 ...@@ -116,7 +116,7 @@ Set 12,layer4.1.conv1
When the masks of different layers in a model have conflict (for example, assigning different sparsities for the layers that have channel dependency), we can fix the mask conflict by MaskConflict. Specifically, the MaskConflict loads the masks exported by the pruners(L1FilterPruner, etc), and check if there is mask conflict, if so, MaskConflict sets the conflicting masks to the same value. When the masks of different layers in a model have conflict (for example, assigning different sparsities for the layers that have channel dependency), we can fix the mask conflict by MaskConflict. Specifically, the MaskConflict loads the masks exported by the pruners(L1FilterPruner, etc), and check if there is mask conflict, if so, MaskConflict sets the conflicting masks to the same value.
``` ```
from nni.compression.torch.utils.mask_conflict import fix_mask_conflict from nni.compression.pytorch.utils.mask_conflict import fix_mask_conflict
fixed_mask = fix_mask_conflict('./resnet18_mask', net, data) fixed_mask = fix_mask_conflict('./resnet18_mask', net, data)
``` ```
...@@ -125,10 +125,10 @@ We provide a model counter for calculating the model FLOPs and parameters. This ...@@ -125,10 +125,10 @@ We provide a model counter for calculating the model FLOPs and parameters. This
### Usage ### Usage
``` ```
from nni.compression.torch.utils.counter import count_flops_params from nni.compression.pytorch.utils.counter import count_flops_params
# Given input size (1, 1, 28, 28) # Given input size (1, 1, 28, 28)
flops, params = count_flops_params(model, (1, 1, 28, 28)) flops, params = count_flops_params(model, (1, 1, 28, 28))
# Format output size to M (i.e., 10^6) # Format output size to M (i.e., 10^6)
print(f'FLOPs: {flops/1e6:.3f}M, Params: {params/1e6:.3f}M) print(f'FLOPs: {flops/1e6:.3f}M, Params: {params/1e6:.3f}M)
``` ```
\ No newline at end of file
...@@ -29,7 +29,7 @@ class MyMasker(WeightMasker): ...@@ -29,7 +29,7 @@ class MyMasker(WeightMasker):
return {'weight_mask': mask} return {'weight_mask': mask}
``` ```
You can reference nni provided [weight masker](https://github.com/microsoft/nni/blob/v1.9/src/sdk/pynni/nni/compression/torch/pruning/structured_pruning.py) implementations to implement your own weight masker. You can reference nni provided [weight masker](https://github.com/microsoft/nni/blob/v1.9/src/sdk/pynni/nni/compression/pytorch/pruning/structured_pruning.py) implementations to implement your own weight masker.
A basic `pruner` looks likes this: A basic `pruner` looks likes this:
...@@ -54,17 +54,17 @@ class MyPruner(Pruner): ...@@ -54,17 +54,17 @@ class MyPruner(Pruner):
``` ```
Reference nni provided [pruner](https://github.com/microsoft/nni/blob/v1.9/src/sdk/pynni/nni/compression/torch/pruning/one_shot.py) implementations to implement your own pruner class. Reference nni provided [pruner](https://github.com/microsoft/nni/blob/v1.9/src/sdk/pynni/nni/compression/pytorch/pruning/one_shot.py) implementations to implement your own pruner class.
*** ***
## Customize a new quantization algorithm ## Customize a new quantization algorithm
To write a new quantization algorithm, you can write a class that inherits `nni.compression.torch.Quantizer`. Then, override the member functions with the logic of your algorithm. The member function to override is `quantize_weight`. `quantize_weight` directly returns the quantized weights rather than mask, because for quantization the quantized weights cannot be obtained by applying mask. To write a new quantization algorithm, you can write a class that inherits `nni.compression.pytorch.Quantizer`. Then, override the member functions with the logic of your algorithm. The member function to override is `quantize_weight`. `quantize_weight` directly returns the quantized weights rather than mask, because for quantization the quantized weights cannot be obtained by applying mask.
```python ```python
from nni.compression.torch import Quantizer from nni.compression.pytorch import Quantizer
class YourQuantizer(Quantizer): class YourQuantizer(Quantizer):
def __init__(self, model, config_list): def __init__(self, model, config_list):
...@@ -140,7 +140,7 @@ class YourQuantizer(Quantizer): ...@@ -140,7 +140,7 @@ class YourQuantizer(Quantizer):
Sometimes it's necessary for a quantization operation to have a customized backward function, such as [Straight-Through Estimator](https://stackoverflow.com/questions/38361314/the-concept-of-straight-through-estimator-ste), user can customize a backward function as follow: Sometimes it's necessary for a quantization operation to have a customized backward function, such as [Straight-Through Estimator](https://stackoverflow.com/questions/38361314/the-concept-of-straight-through-estimator-ste), user can customize a backward function as follow:
```python ```python
from nni.compression.torch.compressor import Quantizer, QuantGrad, QuantType from nni.compression.pytorch.compressor import Quantizer, QuantGrad, QuantType
class ClipGrad(QuantGrad): class ClipGrad(QuantGrad):
@staticmethod @staticmethod
......
...@@ -29,7 +29,7 @@ In this section, we will show how to enable the dependency-aware mode for the fi ...@@ -29,7 +29,7 @@ In this section, we will show how to enable the dependency-aware mode for the fi
To enable the dependency-aware mode for `L1FilterPruner`: To enable the dependency-aware mode for `L1FilterPruner`:
```python ```python
from nni.compression.torch import L1FilterPruner from nni.algorithms.compression.pytorch.pruning import L1FilterPruner
config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }] config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }]
# dummy_input is necessary for the dependency_aware mode # dummy_input is necessary for the dependency_aware mode
dummy_input = torch.ones(1, 3, 224, 224).cuda() dummy_input = torch.ones(1, 3, 224, 224).cuda()
...@@ -54,4 +54,4 @@ We trained a Mobilenet_v2 model on the cifar10 dataset and prune the model based ...@@ -54,4 +54,4 @@ We trained a Mobilenet_v2 model on the cifar10 dataset and prune the model based
![](../../img/mobilev2_l1_cifar.jpg) ![](../../img/mobilev2_l1_cifar.jpg)
In the figure, the `Dependency-aware` represents the L1FilterPruner with dependency-aware mode enabled. `L1 Filter` is the normal `L1FilterPruner` without the dependency-aware mode, and the `No-Dependency` means pruner only prunes the layers that has no channel dependency with other layers. As we can see in the figure, when the dependency-aware mode enabled, the pruner can bring higher accuracy under the same Flops. In the figure, the `Dependency-aware` represents the L1FilterPruner with dependency-aware mode enabled. `L1 Filter` is the normal `L1FilterPruner` without the dependency-aware mode, and the `No-Dependency` means pruner only prunes the layers that has no channel dependency with other layers. As we can see in the figure, when the dependency-aware mode enabled, the pruner can bring higher accuracy under the same Flops.
\ No newline at end of file
...@@ -15,7 +15,7 @@ There are 3 major components/classes in NNI model compression framework: `Compre ...@@ -15,7 +15,7 @@ There are 3 major components/classes in NNI model compression framework: `Compre
Compressor is the base class for pruner and quntizer, it provides a unified interface for pruner and quantizer for end users, so that pruner and quantizer can be used in the same way. For example, to use a pruner: Compressor is the base class for pruner and quntizer, it provides a unified interface for pruner and quantizer for end users, so that pruner and quantizer can be used in the same way. For example, to use a pruner:
```python ```python
from nni.compression.torch import LevelPruner from nni.algorithms.compression.pytorch.pruning import LevelPruner
# load a pretrained model or train a model before using a pruner # load a pretrained model or train a model before using a pruner
...@@ -34,7 +34,7 @@ model = pruner.compress() ...@@ -34,7 +34,7 @@ model = pruner.compress()
To use a quantizer: To use a quantizer:
```python ```python
from nni.compression.torch import DoReFaQuantizer from nni.algorithms.compression.pytorch.pruning import DoReFaQuantizer
configure_list = [{ configure_list = [{
'quant_types': ['weight'], 'quant_types': ['weight'],
......
...@@ -21,7 +21,7 @@ For each module, we should prepare four functions, three for shape inference and ...@@ -21,7 +21,7 @@ For each module, we should prepare four functions, three for shape inference and
## Usage ## Usage
```python ```python
from nni.compression.torch import ModelSpeedup from nni.compression.pytorch import ModelSpeedup
# model: the model you want to speed up # model: the model you want to speed up
# dummy_input: dummy input of the model, given to `jit.trace` # dummy_input: dummy input of the model, given to `jit.trace`
# masks_file: the mask file created by pruning algorithms # masks_file: the mask file created by pruning algorithms
......
...@@ -37,7 +37,7 @@ We first sort the weights in the specified layer by their absolute values. And t ...@@ -37,7 +37,7 @@ We first sort the weights in the specified layer by their absolute values. And t
Tensorflow code Tensorflow code
```python ```python
from nni.compression.tensorflow import LevelPruner from nni.algorithms.compression.tensorflow.pruning import LevelPruner
config_list = [{ 'sparsity': 0.8, 'op_types': ['default'] }] config_list = [{ 'sparsity': 0.8, 'op_types': ['default'] }]
pruner = LevelPruner(model, config_list) pruner = LevelPruner(model, config_list)
pruner.compress() pruner.compress()
...@@ -45,7 +45,7 @@ pruner.compress() ...@@ -45,7 +45,7 @@ pruner.compress()
PyTorch code PyTorch code
```python ```python
from nni.compression.torch import LevelPruner from nni.algorithms.compression.pytorch.pruning import LevelPruner
config_list = [{ 'sparsity': 0.8, 'op_types': ['default'] }] config_list = [{ 'sparsity': 0.8, 'op_types': ['default'] }]
pruner = LevelPruner(model, config_list) pruner = LevelPruner(model, config_list)
pruner.compress() pruner.compress()
...@@ -56,13 +56,13 @@ pruner.compress() ...@@ -56,13 +56,13 @@ pruner.compress()
##### PyTorch ##### PyTorch
```eval_rst ```eval_rst
.. autoclass:: nni.compression.torch.LevelPruner .. autoclass:: nni.algorithms.compression.pytorch.pruning.LevelPruner
``` ```
##### Tensorflow ##### Tensorflow
```eval_rst ```eval_rst
.. autoclass:: nni.compression.tensorflow.LevelPruner .. autoclass:: nni.algorithms.compression.tensorflow.pruning.LevelPruner
``` ```
...@@ -79,7 +79,7 @@ This is an one-shot pruner, In ['Learning Efficient Convolutional Networks throu ...@@ -79,7 +79,7 @@ This is an one-shot pruner, In ['Learning Efficient Convolutional Networks throu
PyTorch code PyTorch code
```python ```python
from nni.compression.torch import SlimPruner from nni.algorithms.compression.pytorch.pruning import SlimPruner
config_list = [{ 'sparsity': 0.8, 'op_types': ['BatchNorm2d'] }] config_list = [{ 'sparsity': 0.8, 'op_types': ['BatchNorm2d'] }]
pruner = SlimPruner(model, config_list) pruner = SlimPruner(model, config_list)
pruner.compress() pruner.compress()
...@@ -90,7 +90,7 @@ pruner.compress() ...@@ -90,7 +90,7 @@ pruner.compress()
##### PyTorch ##### PyTorch
```eval_rst ```eval_rst
.. autoclass:: nni.compression.torch.SlimPruner .. autoclass:: nni.algorithms.compression.pytorch.pruning.SlimPruner
``` ```
### Reproduced Experiment ### Reproduced Experiment
...@@ -122,7 +122,7 @@ We also provide a dependency-aware mode for this pruner to get better speedup fr ...@@ -122,7 +122,7 @@ We also provide a dependency-aware mode for this pruner to get better speedup fr
PyTorch code PyTorch code
```python ```python
from nni.compression.torch import FPGMPruner from nni.algorithms.compression.pytorch.pruning import FPGMPruner
config_list = [{ config_list = [{
'sparsity': 0.5, 'sparsity': 0.5,
'op_types': ['Conv2d'] 'op_types': ['Conv2d']
...@@ -135,7 +135,7 @@ pruner.compress() ...@@ -135,7 +135,7 @@ pruner.compress()
##### PyTorch ##### PyTorch
```eval_rst ```eval_rst
.. autoclass:: nni.compression.torch.FPGMPruner .. autoclass:: nni.algorithms.compression.pytorch.pruning.FPGMPruner
``` ```
## L1Filter Pruner ## L1Filter Pruner
...@@ -163,7 +163,7 @@ In addition, we also provide a dependency-aware mode for the L1FilterPruner. For ...@@ -163,7 +163,7 @@ In addition, we also provide a dependency-aware mode for the L1FilterPruner. For
PyTorch code PyTorch code
```python ```python
from nni.compression.torch import L1FilterPruner from nni.algorithms.compression.pytorch.pruning import L1FilterPruner
config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }] config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }]
pruner = L1FilterPruner(model, config_list) pruner = L1FilterPruner(model, config_list)
pruner.compress() pruner.compress()
...@@ -173,7 +173,7 @@ pruner.compress() ...@@ -173,7 +173,7 @@ pruner.compress()
##### PyTorch ##### PyTorch
```eval_rst ```eval_rst
.. autoclass:: nni.compression.torch.L1FilterPruner .. autoclass:: nni.algorithms.compression.pytorch.pruning.L1FilterPruner
``` ```
### Reproduced Experiment ### Reproduced Experiment
...@@ -200,7 +200,7 @@ We also provide a dependency-aware mode for this pruner to get better speedup fr ...@@ -200,7 +200,7 @@ We also provide a dependency-aware mode for this pruner to get better speedup fr
PyTorch code PyTorch code
```python ```python
from nni.compression.torch import L2FilterPruner from nni.algorithms.compression.pytorch.pruning import L2FilterPruner
config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }] config_list = [{ 'sparsity': 0.8, 'op_types': ['Conv2d'] }]
pruner = L2FilterPruner(model, config_list) pruner = L2FilterPruner(model, config_list)
pruner.compress() pruner.compress()
...@@ -211,7 +211,7 @@ pruner.compress() ...@@ -211,7 +211,7 @@ pruner.compress()
##### PyTorch ##### PyTorch
```eval_rst ```eval_rst
.. autoclass:: nni.compression.torch.L2FilterPruner .. autoclass:: nni.algorithms.compression.pytorch.pruning.L2FilterPruner
``` ```
*** ***
...@@ -231,7 +231,7 @@ We also provide a dependency-aware mode for this pruner to get better speedup fr ...@@ -231,7 +231,7 @@ We also provide a dependency-aware mode for this pruner to get better speedup fr
PyTorch code PyTorch code
```python ```python
from nni.compression.torch import ActivationAPoZRankFilterPruner from nni.algorithms.compression.pytorch.pruning import ActivationAPoZRankFilterPruner
config_list = [{ config_list = [{
'sparsity': 0.5, 'sparsity': 0.5,
'op_types': ['Conv2d'] 'op_types': ['Conv2d']
...@@ -250,7 +250,7 @@ You can view [example](https://github.com/microsoft/nni/blob/v1.9/examples/model ...@@ -250,7 +250,7 @@ You can view [example](https://github.com/microsoft/nni/blob/v1.9/examples/model
##### PyTorch ##### PyTorch
```eval_rst ```eval_rst
.. autoclass:: nni.compression.torch.ActivationAPoZRankFilterPruner .. autoclass:: nni.algorithms.compression.pytorch.pruning.ActivationAPoZRankFilterPruner
``` ```
*** ***
...@@ -266,7 +266,7 @@ We also provide a dependency-aware mode for this pruner to get better speedup fr ...@@ -266,7 +266,7 @@ We also provide a dependency-aware mode for this pruner to get better speedup fr
PyTorch code PyTorch code
```python ```python
from nni.compression.torch import ActivationMeanRankFilterPruner from nni.algorithms.compression.pytorch.pruning import ActivationMeanRankFilterPruner
config_list = [{ config_list = [{
'sparsity': 0.5, 'sparsity': 0.5,
'op_types': ['Conv2d'] 'op_types': ['Conv2d']
...@@ -284,7 +284,7 @@ You can view [example](https://github.com/microsoft/nni/blob/v1.9/examples/model ...@@ -284,7 +284,7 @@ You can view [example](https://github.com/microsoft/nni/blob/v1.9/examples/model
##### PyTorch ##### PyTorch
```eval_rst ```eval_rst
.. autoclass:: nni.compression.torch.ActivationMeanRankFilterPruner .. autoclass:: nni.algorithms.compression.pytorch.pruning.ActivationMeanRankFilterPruner
``` ```
*** ***
...@@ -304,7 +304,7 @@ We also provide a dependency-aware mode for this pruner to get better speedup fr ...@@ -304,7 +304,7 @@ We also provide a dependency-aware mode for this pruner to get better speedup fr
PyTorch code PyTorch code
```python ```python
from nni.compression.torch import TaylorFOWeightFilterPruner from nni.algorithms.compression.pytorch.pruning import TaylorFOWeightFilterPruner
config_list = [{ config_list = [{
'sparsity': 0.5, 'sparsity': 0.5,
'op_types': ['Conv2d'] 'op_types': ['Conv2d']
...@@ -318,7 +318,7 @@ pruner.compress() ...@@ -318,7 +318,7 @@ pruner.compress()
##### PyTorch ##### PyTorch
```eval_rst ```eval_rst
.. autoclass:: nni.compression.torch.TaylorFOWeightFilterPruner .. autoclass:: nni.algorithms.compression.pytorch.pruning.TaylorFOWeightFilterPruner
``` ```
*** ***
...@@ -338,7 +338,7 @@ You can prune all weight from 0% to 80% sparsity in 10 epoch with the code below ...@@ -338,7 +338,7 @@ You can prune all weight from 0% to 80% sparsity in 10 epoch with the code below
PyTorch code PyTorch code
```python ```python
from nni.compression.torch import AGPPruner from nni.algorithms.compression.pytorch.pruning import AGPPruner
config_list = [{ config_list = [{
'initial_sparsity': 0, 'initial_sparsity': 0,
'final_sparsity': 0.8, 'final_sparsity': 0.8,
...@@ -383,7 +383,7 @@ You can view [example](https://github.com/microsoft/nni/blob/v1.9/examples/model ...@@ -383,7 +383,7 @@ You can view [example](https://github.com/microsoft/nni/blob/v1.9/examples/model
##### PyTorch ##### PyTorch
```eval_rst ```eval_rst
.. autoclass:: nni.compression.torch.AGPPruner .. autoclass:: nni.algorithms.compression.pytorch.pruning.AGPPruner
``` ```
*** ***
...@@ -401,7 +401,7 @@ For more details, please refer to [NetAdapt: Platform-Aware Neural Network Adapt ...@@ -401,7 +401,7 @@ For more details, please refer to [NetAdapt: Platform-Aware Neural Network Adapt
PyTorch code PyTorch code
```python ```python
from nni.compression.torch import NetAdaptPruner from nni.algorithms.compression.pytorch.pruning import NetAdaptPruner
config_list = [{ config_list = [{
'sparsity': 0.5, 'sparsity': 0.5,
'op_types': ['Conv2d'] 'op_types': ['Conv2d']
...@@ -417,7 +417,7 @@ You can view [example](https://github.com/microsoft/nni/blob/v1.9/examples/model ...@@ -417,7 +417,7 @@ You can view [example](https://github.com/microsoft/nni/blob/v1.9/examples/model
##### PyTorch ##### PyTorch
```eval_rst ```eval_rst
.. autoclass:: nni.compression.torch.NetAdaptPruner .. autoclass:: nni.algorithms.compression.pytorch.pruning.NetAdaptPruner
``` ```
...@@ -440,7 +440,7 @@ For more details, please refer to [AutoCompress: An Automatic DNN Structured Pru ...@@ -440,7 +440,7 @@ For more details, please refer to [AutoCompress: An Automatic DNN Structured Pru
PyTorch code PyTorch code
```python ```python
from nni.compression.torch import SimulatedAnnealingPruner from nni.algorithms.compression.pytorch.pruning import SimulatedAnnealingPruner
config_list = [{ config_list = [{
'sparsity': 0.5, 'sparsity': 0.5,
'op_types': ['Conv2d'] 'op_types': ['Conv2d']
...@@ -456,7 +456,7 @@ You can view [example](https://github.com/microsoft/nni/blob/v1.9/examples/model ...@@ -456,7 +456,7 @@ You can view [example](https://github.com/microsoft/nni/blob/v1.9/examples/model
##### PyTorch ##### PyTorch
```eval_rst ```eval_rst
.. autoclass:: nni.compression.torch.SimulatedAnnealingPruner .. autoclass:: nni.algorithms.compression.pytorch.pruning.SimulatedAnnealingPruner
``` ```
...@@ -473,7 +473,7 @@ For more details, please refer to [AutoCompress: An Automatic DNN Structured Pru ...@@ -473,7 +473,7 @@ For more details, please refer to [AutoCompress: An Automatic DNN Structured Pru
PyTorch code PyTorch code
```python ```python
from nni.compression.torch import ADMMPruner from nni.algorithms.compression.pytorch.pruning import ADMMPruner
config_list = [{ config_list = [{
'sparsity': 0.5, 'sparsity': 0.5,
'op_types': ['Conv2d'] 'op_types': ['Conv2d']
...@@ -492,7 +492,7 @@ You can view [example](https://github.com/microsoft/nni/blob/v1.9/examples/model ...@@ -492,7 +492,7 @@ You can view [example](https://github.com/microsoft/nni/blob/v1.9/examples/model
##### PyTorch ##### PyTorch
```eval_rst ```eval_rst
.. autoclass:: nni.compression.torch.AutoCompressPruner .. autoclass:: nni.algorithms.compression.pytorch.pruning.AutoCompressPruner
``` ```
## AMC Pruner ## AMC Pruner
...@@ -511,7 +511,7 @@ For more details, please refer to [AMC: AutoML for Model Compression and Acceler ...@@ -511,7 +511,7 @@ For more details, please refer to [AMC: AutoML for Model Compression and Acceler
PyTorch code PyTorch code
```python ```python
from nni.compression.torch import AMCPruner from nni.algorithms.compression.pytorch.pruning import AMCPruner
config_list = [{ config_list = [{
'op_types': ['Conv2d', 'Linear'] 'op_types': ['Conv2d', 'Linear']
}] }]
...@@ -526,7 +526,7 @@ You can view [example](https://github.com/microsoft/nni/blob/v1.9/examples/model ...@@ -526,7 +526,7 @@ You can view [example](https://github.com/microsoft/nni/blob/v1.9/examples/model
##### PyTorch ##### PyTorch
```eval_rst ```eval_rst
.. autoclass:: nni.compression.torch.AMCPruner .. autoclass:: nni.algorithms.compression.pytorch.pruning.AMCPruner
``` ```
### Reproduced Experiment ### Reproduced Experiment
...@@ -554,7 +554,7 @@ For more details, please refer to [A Systematic DNN Weight Pruning Framework usi ...@@ -554,7 +554,7 @@ For more details, please refer to [A Systematic DNN Weight Pruning Framework usi
PyTorch code PyTorch code
```python ```python
from nni.compression.torch import ADMMPruner from nni.algorithms.compression.pytorch.pruning import ADMMPruner
config_list = [{ config_list = [{
'sparsity': 0.8, 'sparsity': 0.8,
'op_types': ['Conv2d'], 'op_types': ['Conv2d'],
...@@ -575,7 +575,7 @@ You can view [example](https://github.com/microsoft/nni/blob/v1.9/examples/model ...@@ -575,7 +575,7 @@ You can view [example](https://github.com/microsoft/nni/blob/v1.9/examples/model
##### PyTorch ##### PyTorch
```eval_rst ```eval_rst
.. autoclass:: nni.compression.torch.ADMMPruner .. autoclass:: nni.algorithms.compression.pytorch.pruning.ADMMPruner
``` ```
...@@ -595,7 +595,7 @@ If the configured final sparsity is P (e.g., 0.8) and there are n times iterativ ...@@ -595,7 +595,7 @@ If the configured final sparsity is P (e.g., 0.8) and there are n times iterativ
PyTorch code PyTorch code
```python ```python
from nni.compression.torch import LotteryTicketPruner from nni.algorithms.compression.pytorch.pruning import LotteryTicketPruner
config_list = [{ config_list = [{
'prune_iterations': 5, 'prune_iterations': 5,
'sparsity': 0.8, 'sparsity': 0.8,
...@@ -619,7 +619,7 @@ The above configuration means that there are 5 times of iterative pruning. As th ...@@ -619,7 +619,7 @@ The above configuration means that there are 5 times of iterative pruning. As th
##### PyTorch ##### PyTorch
```eval_rst ```eval_rst
.. autoclass:: nni.compression.torch.LotteryTicketPruner .. autoclass:: nni.algorithms.compression.pytorch.pruning.LotteryTicketPruner
``` ```
### Reproduced Experiment ### Reproduced Experiment
...@@ -643,7 +643,7 @@ For more details, please refer to [Learning both Weights and Connections for Eff ...@@ -643,7 +643,7 @@ For more details, please refer to [Learning both Weights and Connections for Eff
PyTorch code PyTorch code
```python ```python
from nni.compression.torch import SensitivityPruner from nni.algorithms.compression.pytorch.pruning import SensitivityPruner
config_list = [{ config_list = [{
'sparsity': 0.5, 'sparsity': 0.5,
'op_types': ['Conv2d'] 'op_types': ['Conv2d']
...@@ -659,5 +659,5 @@ pruner.compress(eval_args=[model], finetune_args=[model]) ...@@ -659,5 +659,5 @@ pruner.compress(eval_args=[model], finetune_args=[model])
##### PyTorch ##### PyTorch
```eval_rst ```eval_rst
.. autoclass:: nni.compression.torch.SensitivityPruner .. autoclass:: nni.algorithms.compression.pytorch.pruning.SensitivityPruner
``` ```
...@@ -13,7 +13,7 @@ We provide Naive Quantizer to quantizer weight to default 8 bits, you can use it ...@@ -13,7 +13,7 @@ We provide Naive Quantizer to quantizer weight to default 8 bits, you can use it
### Usage ### Usage
pytorch pytorch
```python ```python
model = nni.compression.torch.NaiveQuantizer(model).compress() model = nni.algorithms.compression.pytorch.quantization.NaiveQuantizer(model).compress()
``` ```
*** ***
...@@ -31,7 +31,7 @@ You can quantize your model to 8 bits with the code below before your training c ...@@ -31,7 +31,7 @@ You can quantize your model to 8 bits with the code below before your training c
PyTorch code PyTorch code
```python ```python
from nni.compression.torch import QAT_Quantizer from nni.algorithms.compression.pytorch.quantization import QAT_Quantizer
model = Mnist() model = Mnist()
config_list = [{ config_list = [{
...@@ -79,7 +79,7 @@ To implement DoReFa Quantizer, you can add code below before your training code ...@@ -79,7 +79,7 @@ To implement DoReFa Quantizer, you can add code below before your training code
PyTorch code PyTorch code
```python ```python
from nni.compression.torch import DoReFaQuantizer from nni.algorithms.compression.pytorch.quantization import DoReFaQuantizer
config_list = [{ config_list = [{
'quant_types': ['weight'], 'quant_types': ['weight'],
'quant_bits': 8, 'quant_bits': 8,
...@@ -110,7 +110,7 @@ In [Binarized Neural Networks: Training Deep Neural Networks with Weights and Ac ...@@ -110,7 +110,7 @@ In [Binarized Neural Networks: Training Deep Neural Networks with Weights and Ac
PyTorch code PyTorch code
```python ```python
from nni.compression.torch import BNNQuantizer from nni.algorithms.compression.pytorch.quantization import BNNQuantizer
model = VGG_Cifar10(num_classes=10) model = VGG_Cifar10(num_classes=10)
configure_list = [{ configure_list = [{
...@@ -146,4 +146,4 @@ We implemented one of the experiments in [Binarized Neural Networks: Training De ...@@ -146,4 +146,4 @@ We implemented one of the experiments in [Binarized Neural Networks: Training De
| VGGNet | 86.93% | | VGGNet | 86.93% |
The experiments code can be found at [examples/model_compress/BNN_quantizer_cifar10.py]( https://github.com/microsoft/nni/tree/v1.9/examples/model_compress/BNN_quantizer_cifar10.py) The experiments code can be found at [examples/model_compress/BNN_quantizer_cifar10.py]( https://github.com/microsoft/nni/tree/v1.9/examples/model_compress/BNN_quantizer_cifar10.py)
\ No newline at end of file
...@@ -49,7 +49,7 @@ The complete code of model compression examples can be found [here](https://gith ...@@ -49,7 +49,7 @@ The complete code of model compression examples can be found [here](https://gith
Masks do not provide real speedup of your model. The model should be speeded up based on the exported masks, thus, we provide an API to speed up your model as shown below. After invoking `apply_compression_results` on your model, your model becomes a smaller one with shorter inference latency. Masks do not provide real speedup of your model. The model should be speeded up based on the exported masks, thus, we provide an API to speed up your model as shown below. After invoking `apply_compression_results` on your model, your model becomes a smaller one with shorter inference latency.
```python ```python
from nni.compression.torch import apply_compression_results from nni.compression.pytorch import apply_compression_results
apply_compression_results(model, 'mask_vgg19_cifar10.pth') apply_compression_results(model, 'mask_vgg19_cifar10.pth')
``` ```
...@@ -62,7 +62,7 @@ The example code for users to apply model compression on a user model can be fou ...@@ -62,7 +62,7 @@ The example code for users to apply model compression on a user model can be fou
PyTorch code PyTorch code
```python ```python
from nni.compression.torch import LevelPruner from nni.algorithms.compression.pytorch.pruning import LevelPruner
config_list = [{ 'sparsity': 0.8, 'op_types': ['default'] }] config_list = [{ 'sparsity': 0.8, 'op_types': ['default'] }]
pruner = LevelPruner(model, config_list) pruner = LevelPruner(model, config_list)
pruner.compress() pruner.compress()
...@@ -71,14 +71,14 @@ pruner.compress() ...@@ -71,14 +71,14 @@ pruner.compress()
Tensorflow code Tensorflow code
```python ```python
from nni.compression.tensorflow import LevelPruner from nni.algorithms.compression.tensorflow.pruning import LevelPruner
config_list = [{ 'sparsity': 0.8, 'op_types': ['default'] }] config_list = [{ 'sparsity': 0.8, 'op_types': ['default'] }]
pruner = LevelPruner(tf.get_default_graph(), config_list) pruner = LevelPruner(tf.get_default_graph(), config_list)
pruner.compress() pruner.compress()
``` ```
You can use other compression algorithms in the package of `nni.compression`. The algorithms are implemented in both PyTorch and TensorFlow (partial support on TensorFlow), under `nni.compression.torch` and `nni.compression.tensorflow` respectively. You can refer to [Pruner](./Pruner.md) and [Quantizer](./Quantizer.md) for detail description of supported algorithms. Also if you want to use knowledge distillation, you can refer to [KDExample](../TrialExample/KDExample.md) You can use other compression algorithms in the package of `nni.compression`. The algorithms are implemented in both PyTorch and TensorFlow (partial support on TensorFlow), under `nni.compression.pytorch` and `nni.compression.tensorflow` respectively. You can refer to [Pruner](./Pruner.md) and [Quantizer](./Quantizer.md) for detail description of supported algorithms. Also if you want to use knowledge distillation, you can refer to [KDExample](../TrialExample/KDExample.md)
A compression algorithm is first instantiated with a `config_list` passed in. The specification of this `config_list` will be described later. A compression algorithm is first instantiated with a `config_list` passed in. The specification of this `config_list` will be described later.
......
# CDARTS # CDARTS
## Introduction ## Introduction
CDARTS builds a cyclic feedback mechanism between the search and evaluation networks. First, the search network generates an initial topology for evaluation, so that the weights of the evaluation network can be optimized. Second, the architecture topology in the search network is further optimized by the label supervision in classification, as well as the regularization from the evaluation network through feature distillation. Repeating the above cycle results in a joint optimization of the search and evaluation networks, and thus enables the evolution of the topology to fit the final evaluation network. CDARTS builds a cyclic feedback mechanism between the search and evaluation networks. First, the search network generates an initial topology for evaluation, so that the weights of the evaluation network can be optimized. Second, the architecture topology in the search network is further optimized by the label supervision in classification, as well as the regularization from the evaluation network through feature distillation. Repeating the above cycle results in a joint optimization of the search and evaluation networks, and thus enables the evolution of the topology to fit the final evaluation network.
In implementation of `CdartsTrainer`, it first instantiates two models and two mutators (one for each). The first model is the so-called "search network", which is mutated with a `RegularizedDartsMutator` -- a mutator with subtle differences with `DartsMutator`. The second model is the "evaluation network", which is mutated with a discrete mutator that leverages the previous search network mutator, to sample a single path each time. Trainers train models and mutators alternatively. Users can refer to [references](#reference) if they are interested in more details on these trainers and mutators. In implementation of `CdartsTrainer`, it first instantiates two models and two mutators (one for each). The first model is the so-called "search network", which is mutated with a `RegularizedDartsMutator` -- a mutator with subtle differences with `DartsMutator`. The second model is the "evaluation network", which is mutated with a discrete mutator that leverages the previous search network mutator, to sample a single path each time. Trainers train models and mutators alternatively. Users can refer to [references](#reference) if they are interested in more details on these trainers and mutators.
## Reproduction Results ## Reproduction Results
This is CDARTS based on the NNI platform, which currently supports CIFAR10 search and retrain. ImageNet search and retrain should also be supported, and we provide corresponding interfaces. Our reproduced results on NNI are slightly lower than the paper, but much higher than the original DARTS. Here we show the results of three independent experiments on CIFAR10. This is CDARTS based on the NNI platform, which currently supports CIFAR10 search and retrain. ImageNet search and retrain should also be supported, and we provide corresponding interfaces. Our reproduced results on NNI are slightly lower than the paper, but much higher than the original DARTS. Here we show the results of three independent experiments on CIFAR10.
| Runs | Paper | NNI | | Runs | Paper | NNI |
| ---- |:-------------:| :-----:| | ---- |:-------------:| :-----:|
| 1 | 97.52 | 97.44 | | 1 | 97.52 | 97.44 |
| 2 | 97.53 | 97.48 | | 2 | 97.53 | 97.48 |
| 3 | 97.58 | 97.56 | | 3 | 97.58 | 97.56 |
## Examples ## Examples
[Example code](https://github.com/microsoft/nni/tree/v1.9/examples/nas/cdarts) [Example code](https://github.com/microsoft/nni/tree/v1.9/examples/nas/cdarts)
```bash ```bash
# In case NNI code is not cloned. If the code is cloned already, ignore this line and enter code folder. # In case NNI code is not cloned. If the code is cloned already, ignore this line and enter code folder.
git clone https://github.com/Microsoft/nni.git git clone https://github.com/Microsoft/nni.git
# install apex for distributed training. # install apex for distributed training.
git clone https://github.com/NVIDIA/apex git clone https://github.com/NVIDIA/apex
cd apex cd apex
python setup.py install --cpp_ext --cuda_ext python setup.py install --cpp_ext --cuda_ext
# search the best architecture # search the best architecture
cd examples/nas/cdarts cd examples/nas/cdarts
bash run_search_cifar.sh bash run_search_cifar.sh
# train the best architecture. # train the best architecture.
bash run_retrain_cifar.sh bash run_retrain_cifar.sh
``` ```
## Reference ## Reference
### PyTorch ### PyTorch
```eval_rst ```eval_rst
.. autoclass:: nni.nas.pytorch.cdarts.CdartsTrainer .. autoclass:: nni.algorithms.nas.pytorch.cdarts.CdartsTrainer
:members: :members:
.. autoclass:: nni.nas.pytorch.cdarts.RegularizedDartsMutator .. autoclass:: nni.algorithms.nas.pytorch.cdarts.RegularizedDartsMutator
:members: :members:
.. autoclass:: nni.nas.pytorch.cdarts.DartsDiscreteMutator .. autoclass:: nni.algorithms.nas.pytorch.cdarts.DartsDiscreteMutator
:members: :members:
.. autoclass:: nni.nas.pytorch.cdarts.RegularizedMutatorParallel .. autoclass:: nni.algorithms.nas.pytorch.cdarts.RegularizedMutatorParallel
:members: :members:
``` ```
...@@ -40,10 +40,10 @@ python3 retrain.py --arc-checkpoint ./checkpoints/epoch_49.json ...@@ -40,10 +40,10 @@ python3 retrain.py --arc-checkpoint ./checkpoints/epoch_49.json
### PyTorch ### PyTorch
```eval_rst ```eval_rst
.. autoclass:: nni.nas.pytorch.darts.DartsTrainer .. autoclass:: nni.algorithms.nas.pytorch.darts.DartsTrainer
:members: :members:
.. autoclass:: nni.nas.pytorch.darts.DartsMutator .. autoclass:: nni.algorithms.nas.pytorch.darts.DartsMutator
:members: :members:
``` ```
......
...@@ -34,9 +34,9 @@ python3 search.py -h ...@@ -34,9 +34,9 @@ python3 search.py -h
### PyTorch ### PyTorch
```eval_rst ```eval_rst
.. autoclass:: nni.nas.pytorch.enas.EnasTrainer .. autoclass:: nni.algorithms.nas.pytorch.enas.EnasTrainer
:members: :members:
.. autoclass:: nni.nas.pytorch.enas.EnasMutator .. autoclass:: nni.algorithms.nas.pytorch.enas.EnasMutator
:members: :members:
``` ```
...@@ -27,7 +27,7 @@ def metrics_fn(output, target): ...@@ -27,7 +27,7 @@ def metrics_fn(output, target):
# metrics function receives output and target and computes a dict of metrics # metrics function receives output and target and computes a dict of metrics
return {"acc1": top1_accuracy(output, target)} return {"acc1": top1_accuracy(output, target)}
from nni.nas.pytorch import enas from nni.algorithms.nas.pytorch import enas
trainer = enas.EnasTrainer(model, trainer = enas.EnasTrainer(model,
loss=criterion, loss=criterion,
metrics=metrics_fn, metrics=metrics_fn,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment