Unverified Commit 4773c918 authored by SparkSnail's avatar SparkSnail Committed by GitHub
Browse files

Merge pull request #236 from microsoft/master

merge master
parents 75028bd7 3c0ef842
...@@ -641,6 +641,7 @@ class BOHB(MsgDispatcherBase): ...@@ -641,6 +641,7 @@ class BOHB(MsgDispatcherBase):
if not _value: if not _value:
logger.info("Useless trial data, value is %s, skip this trial data.", _value) logger.info("Useless trial data, value is %s, skip this trial data.", _value)
continue continue
_value = extract_scalar_reward(_value)
budget_exist_flag = False budget_exist_flag = False
barely_params = dict() barely_params = dict()
for keys in _params: for keys in _params:
......
...@@ -124,9 +124,9 @@ class L1FilterPruner(WeightRankFilterPruner): ...@@ -124,9 +124,9 @@ class L1FilterPruner(WeightRankFilterPruner):
w_abs_structured = w_abs.view(filters, -1).sum(dim=1) w_abs_structured = w_abs.view(filters, -1).sum(dim=1)
threshold = torch.topk(w_abs_structured.view(-1), num_prune, largest=False)[0].max() threshold = torch.topk(w_abs_structured.view(-1), num_prune, largest=False)[0].max()
mask_weight = torch.gt(w_abs_structured, threshold)[:, None, None, None].expand_as(weight).type_as(weight) mask_weight = torch.gt(w_abs_structured, threshold)[:, None, None, None].expand_as(weight).type_as(weight)
mask_bias = torch.gt(w_abs_structured, threshold).type_as(weight) mask_bias = torch.gt(w_abs_structured, threshold).type_as(weight).detach() if base_mask['bias_mask'] is not None else None
return {'weight_mask': mask_weight.detach(), 'bias_mask': mask_bias.detach()} return {'weight_mask': mask_weight.detach(), 'bias_mask': mask_bias}
class L2FilterPruner(WeightRankFilterPruner): class L2FilterPruner(WeightRankFilterPruner):
...@@ -172,9 +172,9 @@ class L2FilterPruner(WeightRankFilterPruner): ...@@ -172,9 +172,9 @@ class L2FilterPruner(WeightRankFilterPruner):
w_l2_norm = torch.sqrt((w ** 2).sum(dim=1)) w_l2_norm = torch.sqrt((w ** 2).sum(dim=1))
threshold = torch.topk(w_l2_norm.view(-1), num_prune, largest=False)[0].max() threshold = torch.topk(w_l2_norm.view(-1), num_prune, largest=False)[0].max()
mask_weight = torch.gt(w_l2_norm, threshold)[:, None, None, None].expand_as(weight).type_as(weight) mask_weight = torch.gt(w_l2_norm, threshold)[:, None, None, None].expand_as(weight).type_as(weight)
mask_bias = torch.gt(w_l2_norm, threshold).type_as(weight) mask_bias = torch.gt(w_l2_norm, threshold).type_as(weight).detach() if base_mask['bias_mask'] is not None else None
return {'weight_mask': mask_weight.detach(), 'bias_mask': mask_bias.detach()} return {'weight_mask': mask_weight.detach(), 'bias_mask': mask_bias}
class FPGMPruner(WeightRankFilterPruner): class FPGMPruner(WeightRankFilterPruner):
......
...@@ -309,6 +309,7 @@ class SMACTuner(Tuner): ...@@ -309,6 +309,7 @@ class SMACTuner(Tuner):
if not _value: if not _value:
self.logger.info("Useless trial data, value is %s, skip this trial data.", _value) self.logger.info("Useless trial data, value is %s, skip this trial data.", _value)
continue continue
_value = extract_scalar_reward(_value)
# convert the keys in loguniform and categorical types # convert the keys in loguniform and categorical types
valid_entry = True valid_entry = True
for key, value in _params.items(): for key, value in _params.items():
......
...@@ -34,7 +34,7 @@ class BuiltinTunersTestCase(TestCase): ...@@ -34,7 +34,7 @@ class BuiltinTunersTestCase(TestCase):
- [ ] save_checkpoint - [ ] save_checkpoint
- [X] update_search_space - [X] update_search_space
- [X] generate_multiple_parameters - [X] generate_multiple_parameters
- [ ] import_data - [X] import_data
- [ ] trial_end - [ ] trial_end
- [x] receive_trial_result - [x] receive_trial_result
""" """
...@@ -141,50 +141,128 @@ class BuiltinTunersTestCase(TestCase): ...@@ -141,50 +141,128 @@ class BuiltinTunersTestCase(TestCase):
logger.info("Full supported search space: %s", full_supported_search_space) logger.info("Full supported search space: %s", full_supported_search_space)
self.search_space_test_one(tuner_factory, full_supported_search_space) self.search_space_test_one(tuner_factory, full_supported_search_space)
def import_data_test(self, tuner_factory, stype="choice_str"):
"""
import data at the beginning with number value and dict value
import data in the middle also with number value and dict value, and duplicate data record
generate parameters after data import
Parameters
----------
tuner_factory : lambda
a lambda for instantiate a tuner
stype : str
the value type of hp choice, support "choice_str" and "choice_num"
"""
if stype == "choice_str":
search_space = {
"choice_str": {
"_type": "choice",
"_value": ["cat", "dog", "elephant", "cow", "sheep", "panda"]
}
}
elif stype == "choice_num":
search_space = {
"choice_num": {
"_type": "choice",
"_value": [10, 20, 30, 40, 50, 60]
}
}
else:
raise RuntimeError("Unexpected stype")
tuner = tuner_factory()
self.assertIsInstance(tuner, Tuner)
tuner.update_search_space(search_space)
# import data at the beginning
if stype == "choice_str":
data = [{"parameter": {"choice_str": "cat"}, "value": 1.1},
{"parameter": {"choice_str": "dog"}, "value": {"default": 1.2, "tmp": 2}}]
else:
data = [{"parameter": {"choice_num": 20}, "value": 1.1},
{"parameter": {"choice_num": 60}, "value": {"default": 1.2, "tmp": 2}}]
tuner.import_data(data)
logger.info("Imported data successfully at the beginning")
# generate parameters
parameters = tuner.generate_multiple_parameters(list(range(3)))
for i in range(3):
tuner.receive_trial_result(i, parameters[i], random.uniform(-100, 100))
# import data in the middle
if stype == "choice_str":
data = [{"parameter": {"choice_str": "cat"}, "value": 1.1},
{"parameter": {"choice_str": "dog"}, "value": {"default": 1.2, "tmp": 2}},
{"parameter": {"choice_str": "cow"}, "value": 1.3}]
else:
data = [{"parameter": {"choice_num": 20}, "value": 1.1},
{"parameter": {"choice_num": 60}, "value": {"default": 1.2, "tmp": 2}},
{"parameter": {"choice_num": 50}, "value": 1.3}]
tuner.import_data(data)
logger.info("Imported data successfully in the middle")
# generate parameters again
parameters = tuner.generate_multiple_parameters([3])
tuner.receive_trial_result(3, parameters[0], random.uniform(-100, 100))
def test_grid_search(self): def test_grid_search(self):
self.exhaustive = True self.exhaustive = True
self.search_space_test_all(lambda: GridSearchTuner(), tuner_fn = lambda: GridSearchTuner()
self.search_space_test_all(tuner_fn,
supported_types=["choice", "randint", "quniform"]) supported_types=["choice", "randint", "quniform"])
self.import_data_test(tuner_fn)
def test_tpe(self): def test_tpe(self):
self.search_space_test_all(lambda: HyperoptTuner("tpe"), tuner_fn = lambda: HyperoptTuner("tpe")
self.search_space_test_all(tuner_fn,
ignore_types=["uniform_equal", "qloguniform_equal", "loguniform_equal", "quniform_clip_2"]) ignore_types=["uniform_equal", "qloguniform_equal", "loguniform_equal", "quniform_clip_2"])
# NOTE: types are ignored because `tpe.py line 465, in adaptive_parzen_normal assert prior_sigma > 0` # NOTE: types are ignored because `tpe.py line 465, in adaptive_parzen_normal assert prior_sigma > 0`
self.import_data_test(tuner_fn)
def test_random_search(self): def test_random_search(self):
self.search_space_test_all(lambda: HyperoptTuner("random_search")) tuner_fn = lambda: HyperoptTuner("random_search")
self.search_space_test_all(tuner_fn)
self.import_data_test(tuner_fn)
def test_anneal(self): def test_anneal(self):
self.search_space_test_all(lambda: HyperoptTuner("anneal")) tuner_fn = lambda: HyperoptTuner("anneal")
self.search_space_test_all(tuner_fn)
self.import_data_test(tuner_fn)
def test_smac(self): def test_smac(self):
if sys.platform == "win32": if sys.platform == "win32":
return # smac doesn't work on windows return # smac doesn't work on windows
self.search_space_test_all(lambda: SMACTuner(), tuner_fn = lambda: SMACTuner()
self.search_space_test_all(tuner_fn,
supported_types=["choice", "randint", "uniform", "quniform", "loguniform"]) supported_types=["choice", "randint", "uniform", "quniform", "loguniform"])
self.import_data_test(tuner_fn)
def test_batch(self): def test_batch(self):
self.exhaustive = True self.exhaustive = True
self.search_space_test_all(lambda: BatchTuner(), tuner_fn = lambda: BatchTuner()
self.search_space_test_all(tuner_fn,
supported_types=["choice"]) supported_types=["choice"])
self.import_data_test(tuner_fn)
def test_evolution(self): def test_evolution(self):
# Needs enough population size, otherwise it will throw a runtime error # Needs enough population size, otherwise it will throw a runtime error
self.search_space_test_all(lambda: EvolutionTuner(population_size=100)) tuner_fn = lambda: EvolutionTuner(population_size=100)
self.search_space_test_all(tuner_fn)
self.import_data_test(tuner_fn)
def test_gp(self): def test_gp(self):
self.test_round = 1 # NOTE: GP tuner got hanged for multiple testing round self.test_round = 1 # NOTE: GP tuner got hanged for multiple testing round
self.search_space_test_all(lambda: GPTuner(), tuner_fn = lambda: GPTuner()
self.search_space_test_all(tuner_fn,
supported_types=["choice", "randint", "uniform", "quniform", "loguniform", supported_types=["choice", "randint", "uniform", "quniform", "loguniform",
"qloguniform"], "qloguniform"],
ignore_types=["normal", "lognormal", "qnormal", "qlognormal"], ignore_types=["normal", "lognormal", "qnormal", "qlognormal"],
fail_types=["choice_str", "choice_mixed"]) fail_types=["choice_str", "choice_mixed"])
self.import_data_test(tuner_fn, "choice_num")
def test_metis(self): def test_metis(self):
self.test_round = 1 # NOTE: Metis tuner got hanged for multiple testing round self.test_round = 1 # NOTE: Metis tuner got hanged for multiple testing round
self.search_space_test_all(lambda: MetisTuner(), tuner_fn = lambda: MetisTuner()
self.search_space_test_all(tuner_fn,
supported_types=["choice", "randint", "uniform", "quniform"], supported_types=["choice", "randint", "uniform", "quniform"],
fail_types=["choice_str", "choice_mixed"]) fail_types=["choice_str", "choice_mixed"])
self.import_data_test(tuner_fn, "choice_num")
def test_networkmorphism(self): def test_networkmorphism(self):
pass pass
......
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from unittest import TestCase, main
from nni.compression.torch import LevelPruner, SlimPruner, FPGMPruner, L1FilterPruner, \
L2FilterPruner, AGP_Pruner, ActivationMeanRankFilterPruner, ActivationAPoZRankFilterPruner
def validate_sparsity(wrapper, sparsity, bias=False):
masks = [wrapper.weight_mask]
if bias and wrapper.bias_mask is not None:
masks.append(wrapper.bias_mask)
for m in masks:
actual_sparsity = (m == 0).sum().item() / m.numel()
msg = 'actual sparsity: {:.2f}, target sparsity: {:.2f}'.format(actual_sparsity, sparsity)
assert math.isclose(actual_sparsity, sparsity, abs_tol=0.1), msg
prune_config = {
'level': {
'pruner_class': LevelPruner,
'config_list': [{
'sparsity': 0.5,
'op_types': ['default'],
}],
'validators': [
lambda model: validate_sparsity(model.conv1, 0.5, False),
lambda model: validate_sparsity(model.fc, 0.5, False)
]
},
'agp': {
'pruner_class': AGP_Pruner,
'config_list': [{
'initial_sparsity': 0,
'final_sparsity': 0.8,
'start_epoch': 0,
'end_epoch': 10,
'frequency': 1,
'op_types': ['default']
}],
'validators': []
},
'slim': {
'pruner_class': SlimPruner,
'config_list': [{
'sparsity': 0.7,
'op_types': ['BatchNorm2d']
}],
'validators': [
lambda model: validate_sparsity(model.bn1, 0.7, model.bias)
]
},
'fpgm': {
'pruner_class': FPGMPruner,
'config_list':[{
'sparsity': 0.5,
'op_types': ['Conv2d']
}],
'validators': [
lambda model: validate_sparsity(model.conv1, 0.5, model.bias)
]
},
'l1': {
'pruner_class': L1FilterPruner,
'config_list': [{
'sparsity': 0.5,
'op_types': ['Conv2d'],
}],
'validators': [
lambda model: validate_sparsity(model.conv1, 0.5, model.bias)
]
},
'l2': {
'pruner_class': L2FilterPruner,
'config_list': [{
'sparsity': 0.5,
'op_types': ['Conv2d'],
}],
'validators': [
lambda model: validate_sparsity(model.conv1, 0.5, model.bias)
]
},
'mean_activation': {
'pruner_class': ActivationMeanRankFilterPruner,
'config_list': [{
'sparsity': 0.5,
'op_types': ['Conv2d'],
}],
'validators': [
lambda model: validate_sparsity(model.conv1, 0.5, model.bias)
]
},
'apoz': {
'pruner_class': ActivationAPoZRankFilterPruner,
'config_list': [{
'sparsity': 0.5,
'op_types': ['Conv2d'],
}],
'validators': [
lambda model: validate_sparsity(model.conv1, 0.5, model.bias)
]
}
}
class Model(nn.Module):
def __init__(self, bias=True):
super(Model, self).__init__()
self.conv1 = nn.Conv2d(1, 8, kernel_size=3, padding=1, bias=bias)
self.bn1 = nn.BatchNorm2d(8)
self.pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(8, 2, bias=bias)
self.bias = bias
def forward(self, x):
return self.fc(self.pool(self.bn1(self.conv1(x))).view(x.size(0), -1))
def pruners_test(pruner_names=['level', 'agp', 'slim', 'fpgm', 'l1', 'l2', 'mean_activation', 'apoz'], bias=True):
for pruner_name in pruner_names:
print('testing {}...'.format(pruner_name))
model = Model(bias=bias)
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
config_list = prune_config[pruner_name]['config_list']
x = torch.randn(2, 1, 28, 28)
y = torch.tensor([0, 1]).long()
out = model(x)
loss = F.cross_entropy(out, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
pruner = prune_config[pruner_name]['pruner_class'](model, config_list, optimizer)
pruner.compress()
x = torch.randn(2, 1, 28, 28)
y = torch.tensor([0, 1]).long()
out = model(x)
loss = F.cross_entropy(out, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
pruner.export_model('./model_tmp.pth', './mask_tmp.pth', './onnx_tmp.pth', input_shape=(2,1,28,28))
for v in prune_config[pruner_name]['validators']:
v(model)
os.remove('./model_tmp.pth')
os.remove('./mask_tmp.pth')
os.remove('./onnx_tmp.pth')
class PrunerTestCase(TestCase):
def test_pruners(self):
pruners_test(bias=True)
def test_pruners_no_bias(self):
pruners_test(bias=False)
if __name__ == '__main__':
main()
...@@ -282,7 +282,7 @@ class Intermediate extends React.Component<IntermediateProps, IntermediateState> ...@@ -282,7 +282,7 @@ class Intermediate extends React.Component<IntermediateProps, IntermediateState>
notMerge={true} // update now notMerge={true} // update now
onEvents={IntermediateEvents} onEvents={IntermediateEvents}
/> />
<div className="yAxis"># Intermediate result</div> <div className="xAxis"># Intermediate result</div>
</div> </div>
</div> </div>
); );
......
...@@ -245,8 +245,8 @@ class TableList extends React.Component<TableListProps, TableListState> { ...@@ -245,8 +245,8 @@ class TableList extends React.Component<TableListProps, TableListState> {
if (res.data.length !== 0) { if (res.data.length !== 0) {
// just add type=number keys // just add type=number keys
const intermediateMetrics = parseMetrics(res.data[0].data); const intermediateMetrics = parseMetrics(res.data[0].data);
for(const key in intermediateMetrics){ for (const key in intermediateMetrics) {
if(typeof intermediateMetrics[key] === 'number') { if (typeof intermediateMetrics[key] === 'number') {
otherkeys.push(key); otherkeys.push(key);
} }
} }
...@@ -622,15 +622,18 @@ class TableList extends React.Component<TableListProps, TableListState> { ...@@ -622,15 +622,18 @@ class TableList extends React.Component<TableListProps, TableListState> {
: :
null null
} }
<ReactEcharts <div className="intermediate-graph">
option={intermediateOption} <ReactEcharts
style={{ option={intermediateOption}
width: 0.5 * modalIntermediateWidth, style={{
height: 0.7 * modalIntermediateHeight, width: 0.5 * modalIntermediateWidth,
padding: 20 height: 0.7 * modalIntermediateHeight,
}} padding: 20
theme="my_theme" }}
/> theme="my_theme"
/>
<div className="xAxis">#Intermediate result</div>
</div>
</Modal> </Modal>
{/* Add Column Modal */} {/* Add Column Modal */}
{ {
......
...@@ -110,7 +110,7 @@ const intermediateGraphOption = (intermediateArr: number[], id: string): any => ...@@ -110,7 +110,7 @@ const intermediateGraphOption = (intermediateArr: number[], id: string): any =>
trigger: 'item' trigger: 'item'
}, },
xAxis: { xAxis: {
name: 'Trial', // name: '#Intermediate result',
data: sequence data: sequence
}, },
yAxis: { yAxis: {
......
...@@ -108,7 +108,7 @@ $bg: #b3b3b3; ...@@ -108,7 +108,7 @@ $bg: #b3b3b3;
/* for yAxis # intermediate position in intermediate graph*/ /* for yAxis # intermediate position in intermediate graph*/
.intermediate-graph{ .intermediate-graph{
position: relative; position: relative;
.yAxis{ .xAxis{
color: #333; color: #333;
position: absolute; position: absolute;
left: 50%; left: 50%;
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
jobs: jobs:
- job: 'integration_test_frameworkController' - job: 'integration_test_frameworkController'
timeoutInMinutes: 0 timeoutInMinutes: 120
steps: steps:
- script: python3 -m pip install --upgrade pip setuptools --user - script: python3 -m pip install --upgrade pip setuptools --user
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
jobs: jobs:
- job: 'integration_test_kubeflow' - job: 'integration_test_kubeflow'
timeoutInMinutes: 0 timeoutInMinutes: 120
steps: steps:
- script: python3 -m pip install --upgrade pip setuptools --user - script: python3 -m pip install --upgrade pip setuptools --user
......
jobs: jobs:
- job: 'integration_test_local_windows' - job: 'integration_test_local_windows'
timeoutInMinutes: 0 timeoutInMinutes: 120
steps: steps:
- script: | - script: |
......
jobs: jobs:
- job: 'integration_test_local_ubuntu' - job: 'integration_test_local_ubuntu'
timeoutInMinutes: 0 timeoutInMinutes: 120
steps: steps:
- script: python3 -m pip install --upgrade pip setuptools --user - script: python3 -m pip install --upgrade pip setuptools --user
......
jobs: jobs:
- job: 'build_docker_image' - job: 'build_docker_image'
timeoutInMinutes: 0 timeoutInMinutes: 120
pool: pool:
vmImage: 'Ubuntu 16.04' vmImage: 'Ubuntu 16.04'
steps: steps:
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
jobs: jobs:
- job: 'integration_test_pai' - job: 'integration_test_pai'
timeoutInMinutes: 0 timeoutInMinutes: 120
steps: steps:
- script: python3 -m pip install --upgrade pip setuptools --user - script: python3 -m pip install --upgrade pip setuptools --user
......
jobs: jobs:
- job: 'integration_test_remote_windows' - job: 'integration_test_remote_windows'
timeoutInMinutes: 0 timeoutInMinutes: 120
steps: steps:
- script: python -m pip install --upgrade pip setuptools - script: python -m pip install --upgrade pip setuptools
...@@ -11,6 +11,7 @@ jobs: ...@@ -11,6 +11,7 @@ jobs:
targetFolder: /tmp/nnitest/$(Build.BuildId)/nni-remote targetFolder: /tmp/nnitest/$(Build.BuildId)/nni-remote
overwrite: true overwrite: true
displayName: 'Copy all files to remote machine' displayName: 'Copy all files to remote machine'
timeoutInMinutes: 10
- script: | - script: |
powershell.exe -file install.ps1 powershell.exe -file install.ps1
displayName: 'Install nni toolkit via source code' displayName: 'Install nni toolkit via source code'
......
jobs: jobs:
- job: 'integration_test_remote' - job: 'integration_test_remote'
timeoutInMinutes: 0 timeoutInMinutes: 120
steps: steps:
- script: python3 -m pip install --upgrade pip setuptools --user - script: python3 -m pip install --upgrade pip setuptools --user
...@@ -26,6 +26,7 @@ jobs: ...@@ -26,6 +26,7 @@ jobs:
targetFolder: /tmp/nnitest/$(Build.BuildId)/dist targetFolder: /tmp/nnitest/$(Build.BuildId)/dist
overwrite: true overwrite: true
displayName: 'Copy dist files to remote machine' displayName: 'Copy dist files to remote machine'
timeoutInMinutes: 10
- task: CopyFilesOverSSH@0 - task: CopyFilesOverSSH@0
inputs: inputs:
sshEndpoint: $(end_point) sshEndpoint: $(end_point)
...@@ -33,6 +34,7 @@ jobs: ...@@ -33,6 +34,7 @@ jobs:
targetFolder: /tmp/nnitest/$(Build.BuildId)/test targetFolder: /tmp/nnitest/$(Build.BuildId)/test
overwrite: true overwrite: true
displayName: 'Copy test files to remote machine' displayName: 'Copy test files to remote machine'
timeoutInMinutes: 10
- task: SSH@0 - task: SSH@0
inputs: inputs:
sshEndpoint: $(end_point) sshEndpoint: $(end_point)
......
...@@ -36,7 +36,7 @@ class StdOutputType(Enum): ...@@ -36,7 +36,7 @@ class StdOutputType(Enum):
def nni_log(log_type, log_message): def nni_log(log_type, log_message):
'''Log message into stdout''' '''Log message into stdout'''
dt = datetime.now() dt = datetime.now()
print('[{0}] {1} {2}'.format(dt, log_type.value, log_message)) print('[{0}] {1} {2}'.format(dt, log_type.value, log_message), flush=True)
class NNIRestLogHanlder(StreamHandler): class NNIRestLogHanlder(StreamHandler):
def __init__(self, host, port, tag, std_output_type=StdOutputType.Stdout): def __init__(self, host, port, tag, std_output_type=StdOutputType.Stdout):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment