"docs/source/en/_toctree.yml" did not exist on "b9b7039f0e326f57be233cdcbcf4cda325100649"
Unverified Commit 0fb78620 authored by SparkSnail's avatar SparkSnail Committed by GitHub
Browse files

Merge pull request #238 from microsoft/master

merge master
parents 3ee09617 b8d19e45
...@@ -116,6 +116,10 @@ Copy-Item config -Destination .\dist\ -Recurse -Force ...@@ -116,6 +116,10 @@ Copy-Item config -Destination .\dist\ -Recurse -Force
cd ..\webui cd ..\webui
cmd /c $NNI_YARN cmd /c $NNI_YARN
cmd /c $NNI_YARN build cmd /c $NNI_YARN build
# Building NasUI
cd ..\nasui
cmd /c $NNI_YARN
cmd /c $NNI_YARN build
cd ..\.. cd ..\..
...@@ -130,4 +134,7 @@ $PKG_JSON = $NNI_PKG_FOLDER + "\package.json" ...@@ -130,4 +134,7 @@ $PKG_JSON = $NNI_PKG_FOLDER + "\package.json"
(Get-Content $PKG_JSON).replace($NNI_VERSION_TEMPLATE, $NNI_VERSION_VALUE) | Set-Content $PKG_JSON (Get-Content $PKG_JSON).replace($NNI_VERSION_TEMPLATE, $NNI_VERSION_VALUE) | Set-Content $PKG_JSON
cmd /c $NNI_YARN --prod --cwd $NNI_PKG_FOLDER cmd /c $NNI_YARN --prod --cwd $NNI_PKG_FOLDER
$NNI_PKG_FOLDER_STATIC = $NNI_PKG_FOLDER + "\static" $NNI_PKG_FOLDER_STATIC = $NNI_PKG_FOLDER + "\static"
$NASUI_PKG_FOLDER = $NNI_PKG_FOLDER + "\nasui"
Copy-Item "src\webui\build" $NNI_PKG_FOLDER_STATIC -Recurse Copy-Item "src\webui\build" $NNI_PKG_FOLDER_STATIC -Recurse
Copy-Item "src\nasui\build" $NASUI_PKG_FOLDER -Recurse
Copy-Item "src\nasui\server.js" $NASUI_PKG_FOLDER -Recurse
...@@ -65,7 +65,8 @@ ...@@ -65,7 +65,8 @@
"node.extend": "^1.1.7", "node.extend": "^1.1.7",
"hoek": "^4.2.1", "hoek": "^4.2.1",
"js-yaml": "^3.13.1", "js-yaml": "^3.13.1",
"npm": "^6.13.4" "npm": "^6.13.4",
"acorn": ">=7.1.1"
}, },
"engines": { "engines": {
"node": ">=10.0.0" "node": ">=10.0.0"
......
...@@ -92,6 +92,7 @@ export class PAIJobInfoCollector { ...@@ -92,6 +92,7 @@ export class PAIJobInfoCollector {
paiTrialJob.status = 'SUCCEEDED'; paiTrialJob.status = 'SUCCEEDED';
break; break;
case 'STOPPED': case 'STOPPED':
case 'STOPPING':
if (paiTrialJob.isEarlyStopped !== undefined) { if (paiTrialJob.isEarlyStopped !== undefined) {
paiTrialJob.status = paiTrialJob.isEarlyStopped === true ? paiTrialJob.status = paiTrialJob.isEarlyStopped === true ?
'EARLY_STOPPED' : 'USER_CANCELED'; 'EARLY_STOPPED' : 'USER_CANCELED';
......
...@@ -380,9 +380,10 @@ acorn-jsx@^5.1.0: ...@@ -380,9 +380,10 @@ acorn-jsx@^5.1.0:
version "5.1.0" version "5.1.0"
resolved "https://registry.yarnpkg.com/acorn-jsx/-/acorn-jsx-5.1.0.tgz#294adb71b57398b0680015f0a38c563ee1db5384" resolved "https://registry.yarnpkg.com/acorn-jsx/-/acorn-jsx-5.1.0.tgz#294adb71b57398b0680015f0a38c563ee1db5384"
acorn@^7.1.0: acorn@>=7.1.1, acorn@^7.1.0:
version "7.1.0" version "7.1.1"
resolved "https://registry.yarnpkg.com/acorn/-/acorn-7.1.0.tgz#949d36f2c292535da602283586c2477c57eb2d6c" resolved "https://registry.yarnpkg.com/acorn/-/acorn-7.1.1.tgz#e35668de0b402f359de515c5482a1ab9f89a69bf"
integrity sha512-add7dgA5ppRPxCFJoAGfMDi7PIBXq1RtGo7BhbLaxwrXPOmw8gq48Y9ozT01hUKy9byMjlR20EJhu5zlkErEkg==
agent-base@4, agent-base@^4.3.0: agent-base@4, agent-base@^4.3.0:
version "4.3.0" version "4.3.0"
......
...@@ -70,7 +70,7 @@ class ModelSpeedup: ...@@ -70,7 +70,7 @@ class ModelSpeedup:
This class is to speedup the model with provided weight mask This class is to speedup the model with provided weight mask
""" """
def __init__(self, model, dummy_input, masks_file): def __init__(self, model, dummy_input, masks_file, map_location=None):
""" """
Parameters Parameters
---------- ----------
...@@ -80,10 +80,12 @@ class ModelSpeedup: ...@@ -80,10 +80,12 @@ class ModelSpeedup:
The dummy input for ```jit.trace```, users should put it on right device before pass in The dummy input for ```jit.trace```, users should put it on right device before pass in
masks_file : str masks_file : str
The path of user provided mask file The path of user provided mask file
map_location : str
the device on which masks are placed, same to map_location in ```torch.load```
""" """
self.bound_model = model self.bound_model = model
self.dummy_input = dummy_input self.dummy_input = dummy_input
self.masks = torch.load(masks_file) self.masks = torch.load(masks_file, map_location)
self.is_training = model.training self.is_training = model.training
# to obtain forward graph, model should be in ```eval``` mode # to obtain forward graph, model should be in ```eval``` mode
if self.is_training: if self.is_training:
......
...@@ -7,3 +7,4 @@ from .weight_rank_filter_pruners import * ...@@ -7,3 +7,4 @@ from .weight_rank_filter_pruners import *
from .activation_rank_filter_pruners import * from .activation_rank_filter_pruners import *
from .quantizers import * from .quantizers import *
from .apply_compression import apply_compression_results from .apply_compression import apply_compression_results
from .gradient_rank_filter_pruners import *
...@@ -3,6 +3,8 @@ ...@@ -3,6 +3,8 @@
import logging import logging
import torch import torch
from schema import And, Optional
from .utils import CompressorSchema
from .compressor import Pruner from .compressor import Pruner
__all__ = ['ActivationAPoZRankFilterPruner', 'ActivationMeanRankFilterPruner'] __all__ = ['ActivationAPoZRankFilterPruner', 'ActivationMeanRankFilterPruner']
...@@ -35,13 +37,9 @@ class ActivationRankFilterPruner(Pruner): ...@@ -35,13 +37,9 @@ class ActivationRankFilterPruner(Pruner):
super().__init__(model, config_list, optimizer) super().__init__(model, config_list, optimizer)
self.set_wrappers_attribute("if_calculated", False) self.set_wrappers_attribute("if_calculated", False)
self.set_wrappers_attribute("collected_activation", [])
self.statistics_batch_num = statistics_batch_num self.statistics_batch_num = statistics_batch_num
self.hook_id = self._add_activation_collector()
def collector(module_, input_, output):
if len(module_.collected_activation) < self.statistics_batch_num:
module_.collected_activation.append(self.activation(output.detach().cpu()))
self.add_activation_collector(collector)
assert activation in ['relu', 'relu6'] assert activation in ['relu', 'relu6']
if activation == 'relu': if activation == 'relu':
self.activation = torch.nn.functional.relu self.activation = torch.nn.functional.relu
...@@ -50,27 +48,57 @@ class ActivationRankFilterPruner(Pruner): ...@@ -50,27 +48,57 @@ class ActivationRankFilterPruner(Pruner):
else: else:
self.activation = None self.activation = None
def _add_activation_collector(self):
def collector(collected_activation):
def hook(module_, input_, output):
collected_activation.append(self.activation(output.detach().cpu()))
return hook
self.collected_activation = {}
self._fwd_hook_id += 1
self._fwd_hook_handles[self._fwd_hook_id] = []
for wrapper_idx, wrapper in enumerate(self.get_modules_wrapper()):
self.collected_activation[wrapper_idx] = []
handle = wrapper.register_forward_hook(collector(self.collected_activation[wrapper_idx]))
self._fwd_hook_handles[self._fwd_hook_id].append(handle)
return self._fwd_hook_id
def validate_config(self, model, config_list):
"""
Parameters
----------
model : torch.nn.module
Model to be pruned
config_list : list
support key for each list item:
- sparsity: percentage of convolutional filters to be pruned.
"""
schema = CompressorSchema([{
'sparsity': And(float, lambda n: 0 < n < 1),
Optional('op_types'): [str],
Optional('op_names'): [str]
}], model, logger)
schema.validate(config_list)
def get_mask(self, base_mask, activations, num_prune): def get_mask(self, base_mask, activations, num_prune):
raise NotImplementedError('{} get_mask is not implemented'.format(self.__class__.__name__)) raise NotImplementedError('{} get_mask is not implemented'.format(self.__class__.__name__))
def calc_mask(self, wrapper, **kwargs): def calc_mask(self, wrapper, wrapper_idx, **kwargs):
""" """
Calculate the mask of given layer. Calculate the mask of given layer.
Filters with the smallest importance criterion which is calculated from the activation are masked. Filters with the smallest importance criterion which is calculated from the activation are masked.
Parameters Parameters
---------- ----------
layer : LayerInfo wrapper : Module
the layer to instrument the compression operation the layer to instrument the compression operation
config : dict
layer's pruning config
Returns Returns
------- -------
dict dict
dictionary for storing masks dictionary for storing masks
""" """
weight = wrapper.module.weight.data weight = wrapper.module.weight.data
op_type = wrapper.type op_type = wrapper.type
config = wrapper.config config = wrapper.config
...@@ -80,21 +108,27 @@ class ActivationRankFilterPruner(Pruner): ...@@ -80,21 +108,27 @@ class ActivationRankFilterPruner(Pruner):
if wrapper.if_calculated: if wrapper.if_calculated:
return None return None
mask_weight = torch.ones(weight.size()).type_as(weight).detach() mask_weight = torch.ones(weight.size()).type_as(weight).detach()
if hasattr(wrapper.module, 'bias') and wrapper.module.bias is not None: if hasattr(wrapper.module, 'bias') and wrapper.module.bias is not None:
mask_bias = torch.ones(wrapper.module.bias.size()).type_as(wrapper.module.bias).detach() mask_bias = torch.ones(wrapper.module.bias.size()).type_as(wrapper.module.bias).detach()
else: else:
mask_bias = None mask_bias = None
mask = {'weight_mask': mask_weight, 'bias_mask': mask_bias} mask = {'weight_mask': mask_weight, 'bias_mask': mask_bias}
try: try:
filters = weight.size(0) filters = weight.size(0)
num_prune = int(filters * config.get('sparsity')) num_prune = int(filters * config.get('sparsity'))
if filters < 2 or num_prune < 1 or len(wrapper.collected_activation) < self.statistics_batch_num: acts = self.collected_activation[wrapper_idx]
if filters < 2 or num_prune < 1 or len(acts) < self.statistics_batch_num:
return mask return mask
mask = self.get_mask(mask, wrapper.collected_activation, num_prune) mask = self.get_mask(mask, acts, num_prune)
finally: finally:
if len(wrapper.collected_activation) == self.statistics_batch_num: if len(acts) >= self.statistics_batch_num:
wrapper.if_calculated = True wrapper.if_calculated = True
if self.hook_id in self._fwd_hook_handles:
self.remove_activation_collector(self.hook_id)
return mask return mask
...@@ -128,7 +162,7 @@ class ActivationAPoZRankFilterPruner(ActivationRankFilterPruner): ...@@ -128,7 +162,7 @@ class ActivationAPoZRankFilterPruner(ActivationRankFilterPruner):
def get_mask(self, base_mask, activations, num_prune): def get_mask(self, base_mask, activations, num_prune):
""" """
Calculate the mask of given layer. Calculate the mask of given layer.
Filters with the smallest APoZ(average percentage of zeros) of output activations are masked. Filters with the largest APoZ(average percentage of zeros) of output activations are masked.
Parameters Parameters
---------- ----------
......
...@@ -3,13 +3,14 @@ ...@@ -3,13 +3,14 @@
import logging import logging
import torch import torch
from .compressor import Pruner
logger = logging.getLogger('torch apply compression') logger = logging.getLogger('torch apply compression')
def apply_compression_results(model, masks_file): def apply_compression_results(model, masks_file, map_location=None):
""" """
Apply the masks from ```masks_file``` to the model Apply the masks from ```masks_file``` to the model
Note: this API is for inference, because it simply multiplies weights with
corresponding masks when this API is called.
Parameters Parameters
---------- ----------
...@@ -17,54 +18,12 @@ def apply_compression_results(model, masks_file): ...@@ -17,54 +18,12 @@ def apply_compression_results(model, masks_file):
The model to be compressed The model to be compressed
masks_file : str masks_file : str
The path of the mask file The path of the mask file
map_location : str
the device on which masks are placed, same to map_location in ```torch.load```
""" """
apply_comp = ApplyCompression(model, masks_file) masks = torch.load(masks_file, map_location)
apply_comp.compress() for name, module in model.named_modules():
if name in masks:
class ApplyCompression(Pruner): module.weight.data = module.weight.data.mul_(masks[name]['weight'])
""" if hasattr(module, 'bias') and module.bias is not None and 'bias' in masks[name]:
This class is not to generate masks, but applying existing masks module.bias.data = module.bias.data.mul_(masks[name]['bias'])
""" \ No newline at end of file
def __init__(self, model, masks_file):
"""
Parameters
----------
model : torch.nn.module
Model to be masked
masks_file : str
The path of user provided mask file
"""
self.bound_model = model
self.masks = torch.load(masks_file)
for module_name in self.masks:
print('module_name: ', module_name)
config_list = self._build_config()
super().__init__(model, config_list)
def _build_config(self):
op_names = []
for module_name in self.masks:
op_names.append(module_name)
return [{'sparsity': 1, 'op_types': ['default', 'BatchNorm2d'], 'op_names': op_names}]
def calc_mask(self, layer, config, **kwargs):
"""
Directly return the corresponding mask
Parameters
----------
layer : LayerInfo
The layer to be pruned
config : dict
Pruning configurations for this weight
kwargs : dict
Auxiliary information
Returns
-------
dict
Mask of the layer
"""
assert layer.name in self.masks
return self.masks[layer.name]
...@@ -40,6 +40,9 @@ class Compressor: ...@@ -40,6 +40,9 @@ class Compressor:
optimizer: pytorch optimizer optimizer: pytorch optimizer
optimizer used to train the model optimizer used to train the model
""" """
assert isinstance(model, torch.nn.Module)
self.validate_config(model, config_list)
self.bound_model = model self.bound_model = model
self.config_list = config_list self.config_list = config_list
self.optimizer = optimizer self.optimizer = optimizer
...@@ -54,9 +57,17 @@ class Compressor: ...@@ -54,9 +57,17 @@ class Compressor:
for layer, config in self._detect_modules_to_compress(): for layer, config in self._detect_modules_to_compress():
wrapper = self._wrap_modules(layer, config) wrapper = self._wrap_modules(layer, config)
self.modules_wrapper.append(wrapper) self.modules_wrapper.append(wrapper)
if not self.modules_wrapper:
_logger.warning('Nothing is configured to compress, please check your model and config_list')
self._wrap_model() self._wrap_model()
def validate_config(self, model, config_list):
"""
subclass can optionally implement this method to check if config_list if valid
"""
pass
def _detect_modules_to_compress(self): def _detect_modules_to_compress(self):
""" """
detect all modules should be compressed, and save the result in `self.modules_to_compress`. detect all modules should be compressed, and save the result in `self.modules_to_compress`.
...@@ -65,6 +76,8 @@ class Compressor: ...@@ -65,6 +76,8 @@ class Compressor:
if self.modules_to_compress is None: if self.modules_to_compress is None:
self.modules_to_compress = [] self.modules_to_compress = []
for name, module in self.bound_model.named_modules(): for name, module in self.bound_model.named_modules():
if module == self.bound_model:
continue
layer = LayerInfo(name, module) layer = LayerInfo(name, module)
config = self.select_config(layer) config = self.select_config(layer)
if config is not None: if config is not None:
...@@ -301,8 +314,8 @@ class Pruner(Compressor): ...@@ -301,8 +314,8 @@ class Pruner(Compressor):
return self.bound_model return self.bound_model
def update_mask(self): def update_mask(self):
for wrapper in self.get_modules_wrapper(): for wrapper_idx, wrapper in enumerate(self.get_modules_wrapper()):
masks = self.calc_mask(wrapper) masks = self.calc_mask(wrapper, wrapper_idx=wrapper_idx)
if masks is not None: if masks is not None:
for k in masks: for k in masks:
assert hasattr(wrapper, k), "there is no attribute '%s' in wrapper" % k assert hasattr(wrapper, k), "there is no attribute '%s' in wrapper" % k
......
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import logging
import torch
from .compressor import Pruner
__all__ = ['TaylorFOWeightFilterPruner']
logger = logging.getLogger('torch gradient rank filter pruners')
class GradientRankFilterPruner(Pruner):
"""
A structured pruning base class that prunes the filters with the smallest
importance criterion in convolution layers (using gradient values)
to achieve a preset level of network sparsity.
"""
def __init__(self, model, config_list, optimizer, statistics_batch_num=1):
"""
Parameters
----------
model : torch.nn.module
Model to be pruned
config_list : list
support key for each list item:
- sparsity: percentage of convolutional filters to be pruned.
optimizer: torch.optim.Optimizer
Optimizer used to train model
statistics_batch_num : int
Num of batches for calculating contribution
"""
super().__init__(model, config_list, optimizer)
self.set_wrappers_attribute("if_calculated", False)
self.set_wrappers_attribute("contribution", None)
self.statistics_batch_num = statistics_batch_num
self.iterations = 0
self.old_step = self.optimizer.step
self.patch_optimizer(self.calc_contributions)
def calc_contributions(self):
raise NotImplementedError('{} calc_contributions is not implemented'.format(self.__class__.__name__))
def get_mask(self, base_mask, contribution, num_prune):
raise NotImplementedError('{} get_mask is not implemented'.format(self.__class__.__name__))
def calc_mask(self, wrapper, **kwargs):
"""
Calculate the mask of given layer.
Filters with the smallest importance criterion which is calculated from the activation are masked.
Parameters
----------
wrapper : Module
the layer to instrument the compression operation
Returns
-------
dict
dictionary for storing masks
"""
weight = wrapper.module.weight.data
op_type = wrapper.type
config = wrapper.config
assert 0 <= config.get('sparsity') < 1, "sparsity must in the range [0, 1)"
assert op_type in config.get('op_types')
if wrapper.if_calculated:
return None
mask_weight = torch.ones(weight.size()).type_as(weight).detach()
if hasattr(wrapper.module, 'bias') and wrapper.module.bias is not None:
mask_bias = torch.ones(wrapper.module.bias.size()).type_as(wrapper.module.bias).detach()
else:
mask_bias = None
mask = {'weight_mask': mask_weight, 'bias_mask': mask_bias}
try:
filters = weight.size(0)
num_prune = int(filters * config.get('sparsity'))
if filters < 2 or num_prune < 1 or self.iterations < self.statistics_batch_num:
return mask
mask = self.get_mask(mask, wrapper.contribution, num_prune)
finally:
if self.iterations >= self.statistics_batch_num:
wrapper.if_calculated = True
return mask
class TaylorFOWeightFilterPruner(GradientRankFilterPruner):
"""
A structured pruning algorithm that prunes the filters with the smallest
importance approximations based on the first order taylor expansion on the weight.
Molchanov, Pavlo and Mallya, Arun and Tyree, Stephen and Frosio, Iuri and Kautz, Jan,
"Importance Estimation for Neural Network Pruning", CVPR 2019.
http://jankautz.com/publications/Importance4NNPruning_CVPR19.pdf
"""
def __init__(self, model, config_list, optimizer, statistics_batch_num=1):
"""
Parameters
----------
model : torch.nn.module
Model to be pruned
config_list : list
support key for each list item:
- sparsity: percentage of convolutional filters to be pruned.
optimizer: torch.optim.Optimizer
Optimizer used to train model
statistics_batch_num : int
Num of batches for activation statistics
"""
super().__init__(model, config_list, optimizer, statistics_batch_num)
def get_mask(self, base_mask, contribution, num_prune):
"""
Calculate the mask of given layer.
Filters with the smallest importance approximations are masked.
Parameters
----------
base_mask : dict
The basic mask with the same shape of weight, all item in the basic mask is 1.
contribution : torch.Tensor
Layer's importance approximations
num_prune : int
Num of filters to prune
Returns
-------
dict
dictionary for storing masks
"""
prune_indices = torch.argsort(contribution)[:num_prune]
for idx in prune_indices:
base_mask['weight_mask'][idx] = 0.
if base_mask['bias_mask'] is not None:
base_mask['bias_mask'][idx] = 0.
return base_mask
def calc_contributions(self):
"""
Calculate the estimated importance of filters as a sum of individual contribution
based on the first order taylor expansion.
"""
if self.iterations >= self.statistics_batch_num:
return
for wrapper in self.get_modules_wrapper():
filters = wrapper.module.weight.size(0)
contribution = (wrapper.module.weight*wrapper.module.weight.grad).data.pow(2).view(filters, -1).sum(dim=1)
if wrapper.contribution is None:
wrapper.contribution = contribution
else:
wrapper.contribution += contribution
self.iterations += 1
...@@ -4,7 +4,9 @@ ...@@ -4,7 +4,9 @@
import copy import copy
import logging import logging
import torch import torch
from schema import And, Optional
from .compressor import Pruner from .compressor import Pruner
from .utils import CompressorSchema
__all__ = ['LevelPruner', 'AGP_Pruner', 'SlimPruner', 'LotteryTicketPruner'] __all__ = ['LevelPruner', 'AGP_Pruner', 'SlimPruner', 'LotteryTicketPruner']
...@@ -31,6 +33,23 @@ class LevelPruner(Pruner): ...@@ -31,6 +33,23 @@ class LevelPruner(Pruner):
super().__init__(model, config_list, optimizer) super().__init__(model, config_list, optimizer)
self.set_wrappers_attribute("if_calculated", False) self.set_wrappers_attribute("if_calculated", False)
def validate_config(self, model, config_list):
"""
Parameters
----------
model : torch.nn.module
Model to be pruned
config_list : list
List on pruning configs
"""
schema = CompressorSchema([{
'sparsity': And(float, lambda n: 0 < n < 1),
Optional('op_types'): [str],
Optional('op_names'): [str]
}], model, logger)
schema.validate(config_list)
def calc_mask(self, wrapper, **kwargs): def calc_mask(self, wrapper, **kwargs):
""" """
Calculate the mask of given layer Calculate the mask of given layer
...@@ -90,6 +109,27 @@ class AGP_Pruner(Pruner): ...@@ -90,6 +109,27 @@ class AGP_Pruner(Pruner):
self.now_epoch = 0 self.now_epoch = 0
self.set_wrappers_attribute("if_calculated", False) self.set_wrappers_attribute("if_calculated", False)
def validate_config(self, model, config_list):
"""
Parameters
----------
model : torch.nn.module
Model to be pruned
config_list : list
List on pruning configs
"""
schema = CompressorSchema([{
'initial_sparsity': And(float, lambda n: 0 <= n <= 1),
'final_sparsity': And(float, lambda n: 0 <= n <= 1),
'start_epoch': And(int, lambda n: n >= 0),
'end_epoch': And(int, lambda n: n >= 0),
'frequency': And(int, lambda n: n > 0),
Optional('op_types'): [str],
Optional('op_names'): [str]
}], model, logger)
schema.validate(config_list)
def calc_mask(self, wrapper, **kwargs): def calc_mask(self, wrapper, **kwargs):
""" """
Calculate the mask of given layer. Calculate the mask of given layer.
...@@ -208,6 +248,24 @@ class SlimPruner(Pruner): ...@@ -208,6 +248,24 @@ class SlimPruner(Pruner):
self.global_threshold = torch.topk(all_bn_weights.view(-1), k, largest=False)[0].max() self.global_threshold = torch.topk(all_bn_weights.view(-1), k, largest=False)[0].max()
self.set_wrappers_attribute("if_calculated", False) self.set_wrappers_attribute("if_calculated", False)
def validate_config(self, model, config_list):
"""
Parameters
----------
model : torch.nn.module
Model to be pruned
config_list : list
support key for each list item:
- sparsity: percentage of convolutional filters to be pruned.
"""
schema = CompressorSchema([{
'sparsity': And(float, lambda n: 0 < n < 1),
'op_types': ['BatchNorm2d'],
Optional('op_names'): [str]
}], model, logger)
schema.validate(config_list)
def calc_mask(self, wrapper, **kwargs): def calc_mask(self, wrapper, **kwargs):
""" """
Calculate the mask of given layer. Calculate the mask of given layer.
...@@ -273,7 +331,7 @@ class LotteryTicketPruner(Pruner): ...@@ -273,7 +331,7 @@ class LotteryTicketPruner(Pruner):
""" """
super().__init__(model, config_list, optimizer) super().__init__(model, config_list, optimizer)
self.curr_prune_iteration = None self.curr_prune_iteration = None
self.prune_iterations = self._validate_config(config_list) self.prune_iterations = config_list[0]['prune_iterations']
# save init weights and optimizer # save init weights and optimizer
self.reset_weights = reset_weights self.reset_weights = reset_weights
...@@ -286,16 +344,26 @@ class LotteryTicketPruner(Pruner): ...@@ -286,16 +344,26 @@ class LotteryTicketPruner(Pruner):
if lr_scheduler is not None: if lr_scheduler is not None:
self._scheduler_state = copy.deepcopy(lr_scheduler.state_dict()) self._scheduler_state = copy.deepcopy(lr_scheduler.state_dict())
def _validate_config(self, config_list): def validate_config(self, model, config_list):
prune_iterations = None """
for config in config_list: Parameters
assert 'prune_iterations' in config, 'prune_iterations must exist in your config' ----------
assert 'sparsity' in config, 'sparsity must exist in your config' model : torch.nn.module
if prune_iterations is not None: Model to be pruned
assert prune_iterations == config[ config_list : list
'prune_iterations'], 'The values of prune_iterations must be equal in your config' Supported keys:
prune_iterations = config['prune_iterations'] - prune_iterations : The number of rounds for the iterative pruning.
return prune_iterations - sparsity : The final sparsity when the compression is done.
"""
schema = CompressorSchema([{
'sparsity': And(float, lambda n: 0 < n < 1),
'prune_iterations': And(int, lambda n: n > 0),
Optional('op_types'): [str],
Optional('op_names'): [str]
}], model, logger)
schema.validate(config_list)
assert len(set([x['prune_iterations'] for x in config_list])) == 1, 'The values of prune_iterations must be equal in your config'
def _calc_sparsity(self, sparsity): def _calc_sparsity(self, sparsity):
keep_ratio_once = (1 - sparsity) ** (1 / self.prune_iterations) keep_ratio_once = (1 - sparsity) ** (1 / self.prune_iterations)
......
...@@ -3,6 +3,8 @@ ...@@ -3,6 +3,8 @@
import logging import logging
import torch import torch
from schema import Schema, And, Or, Optional
from .utils import CompressorSchema
from .compressor import Quantizer, QuantGrad, QuantType from .compressor import Quantizer, QuantGrad, QuantType
__all__ = ['NaiveQuantizer', 'QAT_Quantizer', 'DoReFaQuantizer', 'BNNQuantizer'] __all__ = ['NaiveQuantizer', 'QAT_Quantizer', 'DoReFaQuantizer', 'BNNQuantizer']
...@@ -17,6 +19,16 @@ class NaiveQuantizer(Quantizer): ...@@ -17,6 +19,16 @@ class NaiveQuantizer(Quantizer):
super().__init__(model, config_list, optimizer) super().__init__(model, config_list, optimizer)
self.layer_scale = {} self.layer_scale = {}
def validate_config(self, model, config_list):
schema = CompressorSchema([{
Optional('quant_types'): ['weight'],
Optional('quant_bits'): Or(8, {'weight': 8}),
Optional('op_types'): [str],
Optional('op_names'): [str]
}], model, logger)
schema.validate(config_list)
def quantize_weight(self, weight, wrapper, **kwargs): def quantize_weight(self, weight, wrapper, **kwargs):
new_scale = weight.abs().max() / 127 new_scale = weight.abs().max() / 127
scale = max(self.layer_scale.get(wrapper.name, 0), new_scale) scale = max(self.layer_scale.get(wrapper.name, 0), new_scale)
...@@ -137,6 +149,28 @@ class QAT_Quantizer(Quantizer): ...@@ -137,6 +149,28 @@ class QAT_Quantizer(Quantizer):
layer.module.register_buffer('tracked_max_biased', torch.zeros(1)) layer.module.register_buffer('tracked_max_biased', torch.zeros(1))
layer.module.register_buffer('tracked_max', torch.zeros(1)) layer.module.register_buffer('tracked_max', torch.zeros(1))
def validate_config(self, model, config_list):
"""
Parameters
----------
model : torch.nn.module
Model to be pruned
config_list : list of dict
List of configurations
"""
schema = CompressorSchema([{
Optional('quant_types'): Schema([lambda x: x in ['weight', 'output']]),
Optional('quant_bits'): Or(And(int, lambda n: 0 < n < 32), Schema({
Optional('weight'): And(int, lambda n: 0 < n < 32),
Optional('output'): And(int, lambda n: 0 < n < 32),
})),
Optional('quant_start_step'): And(int, lambda n: n >= 0),
Optional('op_types'): [str],
Optional('op_names'): [str]
}], model, logger)
schema.validate(config_list)
def _quantize(self, bits, op, real_val): def _quantize(self, bits, op, real_val):
""" """
quantize real value. quantize real value.
...@@ -233,6 +267,26 @@ class DoReFaQuantizer(Quantizer): ...@@ -233,6 +267,26 @@ class DoReFaQuantizer(Quantizer):
def __init__(self, model, config_list, optimizer=None): def __init__(self, model, config_list, optimizer=None):
super().__init__(model, config_list, optimizer) super().__init__(model, config_list, optimizer)
def validate_config(self, model, config_list):
"""
Parameters
----------
model : torch.nn.module
Model to be pruned
config_list : list of dict
List of configurations
"""
schema = CompressorSchema([{
Optional('quant_types'): Schema([lambda x: x in ['weight']]),
Optional('quant_bits'): Or(And(int, lambda n: 0 < n < 32), Schema({
Optional('weight'): And(int, lambda n: 0 < n < 32)
})),
Optional('op_types'): [str],
Optional('op_names'): [str]
}], model, logger)
schema.validate(config_list)
def quantize_weight(self, weight, wrapper, **kwargs): def quantize_weight(self, weight, wrapper, **kwargs):
weight_bits = get_bits_length(wrapper.config, 'weight') weight_bits = get_bits_length(wrapper.config, 'weight')
out = weight.tanh() out = weight.tanh()
...@@ -264,6 +318,27 @@ class BNNQuantizer(Quantizer): ...@@ -264,6 +318,27 @@ class BNNQuantizer(Quantizer):
super().__init__(model, config_list, optimizer) super().__init__(model, config_list, optimizer)
self.quant_grad = ClipGrad self.quant_grad = ClipGrad
def validate_config(self, model, config_list):
"""
Parameters
----------
model : torch.nn.module
Model to be pruned
config_list : list of dict
List of configurations
"""
schema = CompressorSchema([{
Optional('quant_types'): Schema([lambda x: x in ['weight', 'output']]),
Optional('quant_bits'): Or(And(int, lambda n: 0 < n < 32), Schema({
Optional('weight'): And(int, lambda n: 0 < n < 32),
Optional('output'): And(int, lambda n: 0 < n < 32),
})),
Optional('op_types'): [str],
Optional('op_names'): [str]
}], model, logger)
schema.validate(config_list)
def quantize_weight(self, weight, wrapper, **kwargs): def quantize_weight(self, weight, wrapper, **kwargs):
out = torch.sign(weight) out = torch.sign(weight)
# remove zeros # remove zeros
......
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from schema import Schema, And, SchemaError
def validate_op_names(model, op_names, logger):
found_names = set(map(lambda x: x[0], model.named_modules()))
not_found_op_names = list(set(op_names) - found_names)
if not_found_op_names:
logger.warning('op_names %s not found in model', not_found_op_names)
return True
def validate_op_types(model, op_types, logger):
found_types = set(['default']) | set(map(lambda x: type(x[1]).__name__, model.named_modules()))
not_found_op_types = list(set(op_types) - found_types)
if not_found_op_types:
logger.warning('op_types %s not found in model', not_found_op_types)
return True
def validate_op_types_op_names(data):
if not ('op_types' in data or 'op_names' in data):
raise SchemaError('Either op_types or op_names must be specified.')
return True
class CompressorSchema:
def __init__(self, data_schema, model, logger):
assert isinstance(data_schema, list) and len(data_schema) <= 1
self.data_schema = data_schema
self.compressor_schema = Schema(self._modify_schema(data_schema, model, logger))
def _modify_schema(self, data_schema, model, logger):
if not data_schema:
return data_schema
for k in data_schema[0]:
old_schema = data_schema[0][k]
if k == 'op_types' or (isinstance(k, Schema) and k._schema == 'op_types'):
new_schema = And(old_schema, lambda n: validate_op_types(model, n, logger))
data_schema[0][k] = new_schema
if k == 'op_names' or (isinstance(k, Schema) and k._schema == 'op_names'):
new_schema = And(old_schema, lambda n: validate_op_names(model, n, logger))
data_schema[0][k] = new_schema
data_schema[0] = And(data_schema[0], lambda d: validate_op_types_op_names(d))
return data_schema
def validate(self, data):
self.compressor_schema.validate(data)
...@@ -3,6 +3,8 @@ ...@@ -3,6 +3,8 @@
import logging import logging
import torch import torch
from schema import And, Optional
from .utils import CompressorSchema
from .compressor import Pruner from .compressor import Pruner
__all__ = ['L1FilterPruner', 'L2FilterPruner', 'FPGMPruner'] __all__ = ['L1FilterPruner', 'L2FilterPruner', 'FPGMPruner']
...@@ -31,6 +33,24 @@ class WeightRankFilterPruner(Pruner): ...@@ -31,6 +33,24 @@ class WeightRankFilterPruner(Pruner):
super().__init__(model, config_list, optimizer) super().__init__(model, config_list, optimizer)
self.set_wrappers_attribute("if_calculated", False) self.set_wrappers_attribute("if_calculated", False)
def validate_config(self, model, config_list):
"""
Parameters
----------
model : torch.nn.module
Model to be pruned
config_list : list
support key for each list item:
- sparsity: percentage of convolutional filters to be pruned.
"""
schema = CompressorSchema([{
'sparsity': And(float, lambda n: 0 < n < 1),
Optional('op_types'): ['Conv2d'],
Optional('op_names'): [str]
}], model, logger)
schema.validate(config_list)
def get_mask(self, base_mask, weight, num_prune): def get_mask(self, base_mask, weight, num_prune):
raise NotImplementedError('{} get_mask is not implemented'.format(self.__class__.__name__)) raise NotImplementedError('{} get_mask is not implemented'.format(self.__class__.__name__))
...@@ -40,10 +60,8 @@ class WeightRankFilterPruner(Pruner): ...@@ -40,10 +60,8 @@ class WeightRankFilterPruner(Pruner):
Filters with the smallest importance criterion of the kernel weights are masked. Filters with the smallest importance criterion of the kernel weights are masked.
Parameters Parameters
---------- ----------
layer : LayerInfo wrapper : Module
the layer to instrument the compression operation the module to instrument the compression operation
config : dict
layer's pruning config
Returns Returns
------- -------
dict dict
......
...@@ -63,9 +63,8 @@ def get_next_parameter(): ...@@ -63,9 +63,8 @@ def get_next_parameter():
def send_metric(string): def send_metric(string):
if _nni_platform != 'local': if _nni_platform != 'local':
data = (string).encode('utf8') assert len(string) < 1000000, 'Metric too long'
assert len(data) < 1000000, 'Metric too long' print("NNISDK_MEb'%s'" % (string), flush=True)
print('NNISDK_ME%s' % (data), flush=True)
else: else:
data = (string + '\n').encode('utf8') data = (string + '\n').encode('utf8')
assert len(data) < 1000000, 'Metric too long' assert len(data) < 1000000, 'Metric too long'
......
...@@ -125,8 +125,9 @@ def report_final_result(metric): ...@@ -125,8 +125,9 @@ def report_final_result(metric):
Parameters Parameters
---------- ----------
metric: metric: serializable object
serializable object. Usually (for built-in tuners to work), it should be a number, or
a dict with key "default" (a number), and any other extra keys.
""" """
assert _params or trial_env_vars.NNI_PLATFORM is None, \ assert _params or trial_env_vars.NNI_PLATFORM is None, \
'nni.get_next_parameter() needs to be called before report_final_result' 'nni.get_next_parameter() needs to be called before report_final_result'
......
...@@ -6,6 +6,7 @@ import numpy as np ...@@ -6,6 +6,7 @@ import numpy as np
import tensorflow as tf import tensorflow as tf
import torch import torch
import torch.nn.functional as F import torch.nn.functional as F
import schema
import nni.compression.torch as torch_compressor import nni.compression.torch as torch_compressor
import math import math
...@@ -227,6 +228,52 @@ class CompressorTestCase(TestCase): ...@@ -227,6 +228,52 @@ class CompressorTestCase(TestCase):
assert all(mask1['bias_mask'].numpy() == np.array([0., 0., 0., 1., 1.])) assert all(mask1['bias_mask'].numpy() == np.array([0., 0., 0., 1., 1.]))
assert all(mask2['bias_mask'].numpy() == np.array([0., 0., 0., 1., 1.])) assert all(mask2['bias_mask'].numpy() == np.array([0., 0., 0., 1., 1.]))
def test_torch_taylorFOweight_pruner(self):
"""
Filters with the minimum importance approxiamtion based on the first order
taylor expansion on the weights (w*grad)**2 are pruned in this paper:
Importance Estimation for Neural Network Pruning,
http://jankautz.com/publications/Importance4NNPruning_CVPR19.pdf
So if sparsity of conv1 is 0.2, the expected masks should mask out filter 0, this can be verified through:
`all(torch.sum(mask1['weight_mask'], (1, 2, 3)).numpy() == np.array([0., 25., 25., 25., 25.]))`
If sparsity of conv2 is 0.6, the expected masks should mask out filter 4,5,6,7,8,9 this can be verified through:
`all(torch.sum(mask2['weight_mask'], (1, 2, 3)).numpy() == np.array([125., 125., 125., 125., 0., 0., 0., 0., 0., 0., ]))`
"""
w1 = np.array([np.zeros((1, 5, 5)), np.ones((1, 5, 5)), np.ones((1, 5, 5)) * 2,
np.ones((1, 5, 5)) * 3, np.ones((1, 5, 5)) * 4])
w2 = np.array([[[[i + 1] * 5] * 5] * 5 for i in range(10)[::-1]])
grad1 = np.array([np.ones((1, 5, 5)) * -1, np.ones((1, 5, 5)) * 1, np.ones((1, 5, 5)) * -1,
np.ones((1, 5, 5)) * 1, np.ones((1, 5, 5)) * -1])
grad2 = np.array([[[[(-1)**i] * 5] * 5] * 5 for i in range(10)])
config_list = [{'sparsity': 0.2, 'op_types': ['Conv2d'], 'op_names': ['conv1']},
{'sparsity': 0.6, 'op_types': ['Conv2d'], 'op_names': ['conv2']}]
model = TorchModel()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
pruner = torch_compressor.TaylorFOWeightFilterPruner(model, config_list, optimizer, statistics_batch_num=1)
x = torch.rand((1, 1, 28, 28), requires_grad=True)
model.conv1.module.weight.data = torch.tensor(w1).float()
model.conv2.module.weight.data = torch.tensor(w2).float()
y = model(x)
y.backward(torch.ones_like(y))
model.conv1.module.weight.grad.data = torch.tensor(grad1).float()
model.conv2.module.weight.grad.data = torch.tensor(grad2).float()
optimizer.step()
mask1 = pruner.calc_mask(model.conv1)
mask2 = pruner.calc_mask(model.conv2)
assert all(torch.sum(mask1['weight_mask'], (1, 2, 3)).numpy() == np.array([0., 25., 25., 25., 25.]))
assert all(torch.sum(mask2['weight_mask'], (1, 2, 3)).numpy() == np.array([125., 125., 125., 125., 0., 0., 0., 0., 0., 0., ]))
def test_torch_QAT_quantizer(self): def test_torch_QAT_quantizer(self):
model = TorchModel() model = TorchModel()
config_list = [{ config_list = [{
...@@ -267,6 +314,79 @@ class CompressorTestCase(TestCase): ...@@ -267,6 +314,79 @@ class CompressorTestCase(TestCase):
assert math.isclose(model.relu.module.tracked_min_biased, 0.002, abs_tol=eps) assert math.isclose(model.relu.module.tracked_min_biased, 0.002, abs_tol=eps)
assert math.isclose(model.relu.module.tracked_max_biased, 0.00998, abs_tol=eps) assert math.isclose(model.relu.module.tracked_max_biased, 0.00998, abs_tol=eps)
def test_torch_pruner_validation(self):
# test bad configuraiton
pruner_classes = [torch_compressor.__dict__[x] for x in \
['LevelPruner', 'SlimPruner', 'FPGMPruner', 'L1FilterPruner', 'L2FilterPruner', 'AGP_Pruner', \
'ActivationMeanRankFilterPruner', 'ActivationAPoZRankFilterPruner']]
bad_configs = [
[
{'sparsity': '0.2'},
{'sparsity': 0.6 }
],
[
{'sparsity': 0.2},
{'sparsity': 1.6 }
],
[
{'sparsity': 0.2, 'op_types': 'default'},
{'sparsity': 0.6 }
],
[
{'sparsity': 0.2 },
{'sparsity': 0.6, 'op_names': 'abc' }
]
]
model = TorchModel()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
for pruner_class in pruner_classes:
for config_list in bad_configs:
try:
pruner_class(model, config_list, optimizer)
print(config_list)
assert False, 'Validation error should be raised for bad configuration'
except schema.SchemaError:
pass
except:
print('FAILED:', pruner_class, config_list)
raise
def test_torch_quantizer_validation(self):
# test bad configuraiton
quantizer_classes = [torch_compressor.__dict__[x] for x in \
['NaiveQuantizer', 'QAT_Quantizer', 'DoReFaQuantizer', 'BNNQuantizer']]
bad_configs = [
[
{'bad_key': 'abc'}
],
[
{'quant_types': 'abc'}
],
[
{'quant_bits': 34}
],
[
{'op_types': 'default'}
],
[
{'quant_bits': {'abc': 123}}
]
]
model = TorchModel()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
for quantizer_class in quantizer_classes:
for config_list in bad_configs:
try:
quantizer_class(model, config_list, optimizer)
print(config_list)
assert False, 'Validation error should be raised for bad configuration'
except schema.SchemaError:
pass
except:
print('FAILED:', quantizer_class, config_list)
raise
if __name__ == '__main__': if __name__ == '__main__':
main() main()
...@@ -34,7 +34,7 @@ prune_config = { ...@@ -34,7 +34,7 @@ prune_config = {
'agp': { 'agp': {
'pruner_class': AGP_Pruner, 'pruner_class': AGP_Pruner,
'config_list': [{ 'config_list': [{
'initial_sparsity': 0, 'initial_sparsity': 0.,
'final_sparsity': 0.8, 'final_sparsity': 0.8,
'start_epoch': 0, 'start_epoch': 0,
'end_epoch': 10, 'end_epoch': 10,
......
...@@ -43,3 +43,8 @@ ...@@ -43,3 +43,8 @@
color: #333; color: #333;
} }
} }
.warning{
padding-bottom: 15px;
background-color: #f2f2f2;
}
...@@ -3,6 +3,7 @@ import { Stack } from 'office-ui-fabric-react'; ...@@ -3,6 +3,7 @@ import { Stack } from 'office-ui-fabric-react';
import { COLUMN } from './static/const'; import { COLUMN } from './static/const';
import { EXPERIMENT, TRIALS } from './static/datamodel'; import { EXPERIMENT, TRIALS } from './static/datamodel';
import NavCon from './components/NavCon'; import NavCon from './components/NavCon';
import MessageInfo from './components/Modals/MessageInfo';
import './App.scss'; import './App.scss';
interface AppState { interface AppState {
...@@ -11,10 +12,13 @@ interface AppState { ...@@ -11,10 +12,13 @@ interface AppState {
experimentUpdateBroadcast: number; experimentUpdateBroadcast: number;
trialsUpdateBroadcast: number; trialsUpdateBroadcast: number;
metricGraphMode: 'max' | 'min'; // tuner's optimize_mode filed metricGraphMode: 'max' | 'min'; // tuner's optimize_mode filed
isillegalFinal: boolean;
expWarningMessage: string;
} }
class App extends React.Component<{}, AppState> { class App extends React.Component<{}, AppState> {
private timerId!: number | null; private timerId!: number | null;
private dataFormatimer!: number;
constructor(props: {}) { constructor(props: {}) {
super(props); super(props);
...@@ -23,7 +27,9 @@ class App extends React.Component<{}, AppState> { ...@@ -23,7 +27,9 @@ class App extends React.Component<{}, AppState> {
columnList: COLUMN, columnList: COLUMN,
experimentUpdateBroadcast: 0, experimentUpdateBroadcast: 0,
trialsUpdateBroadcast: 0, trialsUpdateBroadcast: 0,
metricGraphMode: 'max' metricGraphMode: 'max',
isillegalFinal: false,
expWarningMessage: ''
}; };
} }
...@@ -33,8 +39,34 @@ class App extends React.Component<{}, AppState> { ...@@ -33,8 +39,34 @@ class App extends React.Component<{}, AppState> {
this.setState(state => ({ trialsUpdateBroadcast: state.trialsUpdateBroadcast + 1 })); this.setState(state => ({ trialsUpdateBroadcast: state.trialsUpdateBroadcast + 1 }));
this.timerId = window.setTimeout(this.refresh, this.state.interval * 1000); this.timerId = window.setTimeout(this.refresh, this.state.interval * 1000);
this.setState({ metricGraphMode: (EXPERIMENT.optimizeMode === 'minimize' ? 'min' : 'max') }); this.setState({ metricGraphMode: (EXPERIMENT.optimizeMode === 'minimize' ? 'min' : 'max') });
// final result is legal
// get a succeed trial,see final result data's format
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
this.dataFormatimer = window.setInterval(this.getFinalDataFormat, this.state.interval * 1000);
} }
getFinalDataFormat = (): void => {
for(let i = 0; this.state.isillegalFinal === false; i++){
if(TRIALS.succeededTrials()[0] !== undefined && TRIALS.succeededTrials()[0].final !== undefined){
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
const oneSucceedTrial = JSON.parse(JSON.parse(TRIALS.succeededTrials()[0].final!.data));
if (typeof oneSucceedTrial === 'number' || oneSucceedTrial.hasOwnProperty('default')) {
window.clearInterval(this.dataFormatimer);
break;
} else {
// illegal final data
this.setState(() => ({
isillegalFinal: true,
expWarningMessage: 'WebUI support final result as number and dictornary includes default keys, your experiment final result is illegal, please check your data.'
}));
window.clearInterval(this.dataFormatimer);
}
} else {
break;
}
}
}
changeInterval = (interval: number): void => { changeInterval = (interval: number): void => {
this.setState({ interval }); this.setState({ interval });
if (this.timerId === null && interval !== 0) { if (this.timerId === null && interval !== 0) {
...@@ -54,7 +86,9 @@ class App extends React.Component<{}, AppState> { ...@@ -54,7 +86,9 @@ class App extends React.Component<{}, AppState> {
} }
render(): React.ReactNode { render(): React.ReactNode {
const { interval, columnList, experimentUpdateBroadcast, trialsUpdateBroadcast, metricGraphMode } = this.state; const { interval, columnList, experimentUpdateBroadcast, trialsUpdateBroadcast,
metricGraphMode, isillegalFinal, expWarningMessage
} = this.state;
if (experimentUpdateBroadcast === 0 || trialsUpdateBroadcast === 0) { if (experimentUpdateBroadcast === 0 || trialsUpdateBroadcast === 0) {
return null; // TODO: render a loading page return null; // TODO: render a loading page
} }
...@@ -73,11 +107,14 @@ class App extends React.Component<{}, AppState> { ...@@ -73,11 +107,14 @@ class App extends React.Component<{}, AppState> {
<Stack className="nni" style={{ minHeight: window.innerHeight }}> <Stack className="nni" style={{ minHeight: window.innerHeight }}>
<div className="header"> <div className="header">
<div className="headerCon"> <div className="headerCon">
<NavCon changeInterval={this.changeInterval} refreshFunction={this.lastRefresh}/> <NavCon changeInterval={this.changeInterval} refreshFunction={this.lastRefresh} />
</div> </div>
</div> </div>
<Stack className="contentBox"> <Stack className="contentBox">
<Stack className="content"> <Stack className="content">
{isillegalFinal && <div className="warning">
<MessageInfo info={expWarningMessage} typeInfo="warning" />
</div>}
{reactPropsChildren} {reactPropsChildren}
</Stack> </Stack>
</Stack> </Stack>
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment