"host/host_tensor/src/device.cpp" did not exist on "88b77181aab1198b41b612f6d03b6dfb2d32bd40"
Commit ba8dccd6 authored by suiguoxin's avatar suiguoxin
Browse files

Merge branch 'master' of https://github.com/microsoft/nni

parents 56a1575b 150ee83a
......@@ -76,7 +76,7 @@ def iou(preds, labels, C, EMPTY=1., ignore=None, per_image=False):
preds, labels = (preds,), (labels,)
ious = []
for pred, label in zip(preds, labels):
iou = []
iou = []
for i in range(C):
if i != ignore: # The ignored label is sometimes among predicted classes (ENet - CityScapes)
intersection = ((label == i) & (pred == i)).sum()
......@@ -127,7 +127,7 @@ def lovasz_hinge_flat(logits, labels):
grad = lovasz_grad(gt_sorted)
loss = torch.dot(F.elu(errors_sorted)+1, Variable(grad))
#loss = torch.dot(F.relu(errors_sorted), Variable(grad))
return loss
......
......@@ -201,12 +201,12 @@ class UNetResNetV4(nn.Module):
F.upsample(d3, scale_factor=4, mode='bilinear', align_corners=False),
F.upsample(d4, scale_factor=8, mode='bilinear', align_corners=False),
F.upsample(d5, scale_factor=16, mode='bilinear', align_corners=False),
], 1)
], 1)
f = F.dropout2d(f, p=self.dropout_2d)
return self.logit(f), None
def freeze_bn(self):
'''Freeze BatchNorm layers.'''
for layer in self.modules():
......@@ -221,7 +221,7 @@ class UNetResNetV4(nn.Module):
for x in group1:
for p in x.parameters():
params1.append(p)
param_group1 = {'params': params1, 'lr': base_lr / 5}
params2 = []
......@@ -321,7 +321,7 @@ class UNetResNetV5(nn.Module):
F.interpolate(d3, scale_factor=4, mode='bilinear', align_corners=False),
F.interpolate(d4, scale_factor=8, mode='bilinear', align_corners=False),
F.interpolate(d5, scale_factor=16, mode='bilinear', align_corners=False),
], 1)
], 1)
f = F.dropout2d(f, p=self.dropout_2d)
......@@ -331,7 +331,7 @@ class UNetResNetV6(nn.Module):
'''
1. Remove first pool from UNetResNetV5, such that resolution is doubled
2. Remove scSE from center block
3. Increase default dropout
3. Increase default dropout
'''
def __init__(self, encoder_depth, num_filters=32, dropout_2d=0.5):
super(UNetResNetV6, self).__init__()
......@@ -400,7 +400,7 @@ class UNetResNetV6(nn.Module):
F.interpolate(d4, scale_factor=4, mode='bilinear', align_corners=False),
F.interpolate(d5, scale_factor=8, mode='bilinear', align_corners=False),
F.interpolate(center, scale_factor=16, mode='bilinear', align_corners=False),
], 1)
], 1)
f = F.dropout2d(f, p=self.dropout_2d, training=self.training)
......
......@@ -71,7 +71,7 @@ def do_tta_predict(args, model, ckp_path, tta_num=4):
outputs = np.flip(outputs, 1)
#print(outputs.shape)
preds.append(outputs)
parent_dir = ckp_path+'_out'
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
......@@ -142,9 +142,9 @@ def generate_preds(outputs, target_size, pad_mode, threshold=0.5):
def ensemble_predict(args):
model = eval(args.model_name)(args.layers, num_filters=args.nf)
checkpoints = [
r'D:\data\salt\models\pseudo\UNetResNetV4_34\edge\best_5.pth',
r'D:\data\salt\models\pseudo\UNetResNetV4_34\edge\best_5.pth',
r'D:\data\salt\models\pseudo\UNetResNetV4_34\edge\best_6.pth',
r'D:\data\salt\models\pseudo\UNetResNetV4_34\edge\best_8.pth',
r'D:\data\salt\models\pseudo\UNetResNetV4_34\edge\best_9.pth'
......@@ -160,7 +160,7 @@ def ensemble_np_results(args):
np_files6 = glob.glob(r'D:\data\salt\models\ensemble\*.npy')
np_files = np_files1 + np_files2 + np_files3 + np_files6
print(np_files)
ensemble_np(args, np_files)
ensemble_np(args, np_files)
def predict_model(args):
model = eval(args.model_name)(args.layers, num_filters=args.nf)
......
......@@ -40,7 +40,7 @@ def prepare_metadata():
)
meta.to_csv(settings.META_FILE, index=None)
def cov_to_class(val):
def cov_to_class(val):
for i in range(0, 11):
if val * 10 <= i :
return i
......@@ -57,7 +57,7 @@ def generate_stratified_metadata():
train_df["file_path_image"] = train_df.index.map(lambda x: os.path.join(settings.TRAIN_IMG_DIR, '{}.png'.format(x)))
train_df["file_path_mask"] = train_df.index.map(lambda x: os.path.join(settings.TRAIN_MASK_DIR, '{}.png'.format(x)))
train_df.to_csv(os.path.join(settings.DATA_DIR, 'train_meta2.csv'),
train_df.to_csv(os.path.join(settings.DATA_DIR, 'train_meta2.csv'),
columns=['file_path_image','file_path_mask','is_train','z','salt_exists', 'coverage_class', 'coverage'])
train_splits = {}
......@@ -65,7 +65,7 @@ def generate_stratified_metadata():
for i, (train_index, valid_index) in enumerate(kf.split(train_df.index.values.reshape(-1), train_df.coverage_class.values.reshape(-1))):
train_splits[str(i)] = {
'train_index': train_index.tolist(),
'val_index': valid_index.tolist()
'val_index': valid_index.tolist()
}
with open(os.path.join(settings.DATA_DIR, 'train_split.json'), 'w') as f:
json.dump(train_splits, f, indent=4)
......
......@@ -57,7 +57,7 @@ def weighted_loss(args, output, target, epoch=0):
def train(args):
print('start training...')
"""@nni.variable(nni.choice('UNetResNetV4', 'UNetResNetV5', 'UNetResNetV6'), name=model_name)"""
model_name = args.model_name
......@@ -123,10 +123,10 @@ def train(args):
img, target, salt_target = img.cuda(), target.cuda(), salt_target.cuda()
optimizer.zero_grad()
output, salt_out = model(img)
loss, *_ = weighted_loss(args, (output, salt_out), (target, salt_target), epoch=epoch)
loss.backward()
if args.optim == 'Adam' and args.adamw:
wd = 0.0001
for group in optimizer.param_groups:
......@@ -141,7 +141,7 @@ def train(args):
iout, iou, focal_loss, lovaz_loss, salt_loss, mix_score = validate(args, model, val_loader, epoch=epoch)
"""@nni.report_intermediate_result(iout)"""
_save_ckp = ''
if iout > best_iout:
best_iout = iout
......@@ -155,7 +155,7 @@ def train(args):
focal_loss, lovaz_loss, iou, iout, best_iout, (time.time() - bg) / 60, _save_ckp, salt_loss))
model.train()
if args.lrs == 'plateau':
lr_scheduler.step(best_iout)
else:
......@@ -163,7 +163,7 @@ def train(args):
del model, train_loader, val_loader, optimizer, lr_scheduler
"""@nni.report_final_result(best_iout)"""
def get_lrs(optimizer):
lrs = []
for pgs in optimizer.state_dict()['param_groups']:
......@@ -188,7 +188,7 @@ def validate(args, model, val_loader, epoch=0, threshold=0.5):
salt_loss += _salt_loss
w_loss += _w_loss
output = torch.sigmoid(output)
for o in output.cpu():
outputs.append(o.squeeze().numpy())
......@@ -217,7 +217,7 @@ def generate_preds(args, outputs, target_size, threshold=0.5):
return preds
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='TGS Salt segmentation')
parser.add_argument('--layers', default=34, type=int, help='model layers')
parser.add_argument('--nf', default=32, type=int, help='num_filters param for model')
......@@ -244,7 +244,7 @@ if __name__ == '__main__':
parser.add_argument('--depths', action='store_true')
parser.add_argument('--dev_mode', action='store_true')
parser.add_argument('--adamw', action='store_true')
args = parser.parse_args()
'''@nni.get_next_parameter()'''
......
......@@ -166,7 +166,7 @@ def get_nfold_split2(ifold, nfold=10):
train_index = train_splits[str(ifold)]['train_index']
valid_index = train_splits[str(ifold)]['val_index']
return meta_train.iloc[train_index], meta_train.iloc[valid_index]
return meta_train.iloc[train_index], meta_train.iloc[valid_index]
def get_test_meta():
......
......@@ -93,7 +93,7 @@ def train(args, params):
x_train, y_train, x_test, y_test = load_mnist_data(args)
model = create_mnist_model(params)
# nni
# nni
model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs, verbose=1,
validation_data=(x_test, y_test), callbacks=[SendMetrics(), TensorBoard(log_dir=TENSORBOARD_DIR)])
......
......@@ -5,8 +5,8 @@
"_value" : [{"optimizer": "Adam", "learning_rate": 0.00001},
{"optimizer": "Adam", "learning_rate": 0.0001},
{"optimizer": "Adam", "learning_rate": 0.001},
{"optimizer": "SGD", "learning_rate": 0.01},
{"optimizer": "SGD", "learning_rate": 0.005},
{"optimizer": "SGD", "learning_rate": 0.01},
{"optimizer": "SGD", "learning_rate": 0.005},
{"optimizer": "SGD", "learning_rate": 0.0002}]
}
}
\ No newline at end of file
......@@ -12,10 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NNI (https://github.com/Microsoft/nni) modified this code to show how to
#
# NNI (https://github.com/Microsoft/nni) modified this code to show how to
# integrate distributed pytorch training with NNI SDK
#
#
import os
import torch
......
......@@ -121,7 +121,7 @@ def main(params):
for i in range(params['batch_num']):
batch = mnist.train.next_batch(params['batch_size'])
mnist_network.train_step.run(feed_dict={mnist_network.x: batch[0], mnist_network.y: batch[1]})
if i % 100 == 0:
train_accuracy = mnist_network.accuracy.eval(feed_dict={
mnist_network.x: batch[0], mnist_network.y: batch[1]})
......
**在 NNI 中运行神经网络架构搜索**
===
参考 [NNI-NAS-Example](https://github.com/Crysple/NNI-NAS-Example),来使用贡献者提供的 NAS 接口。
谢谢可爱的贡献者!
欢迎越来越多的人加入我们!
\ No newline at end of file
......@@ -10,7 +10,7 @@ trainingServicePlatform: local
useAnnotation: false
tuner:
#choice: TPE, Random, Anneal, Evolution, BatchTuner, NetworkMorphism
#SMAC (SMAC should be installed through nnictl)
#SMAC (SMAC should be installed through nnictl)
builtinTunerName: NetworkMorphism
classArgs:
#choice: maximize, minimize
......
......@@ -9,7 +9,7 @@ trainingServicePlatform: pai
useAnnotation: false
tuner:
#choice: TPE, Random, Anneal, Evolution, BatchTuner, NetworkMorphism
#SMAC (SMAC should be installed through nnictl)
#SMAC (SMAC should be installed through nnictl)
builtinTunerName: NetworkMorphism
classArgs:
#choice: maximize, minimize
......
......@@ -8,7 +8,7 @@ The Network Morphism is a build-in Tuner using network morphism techniques to se
The network morphism now is framework-based, and we have not implemented the framework-free methods. The training frameworks which we have supported yet are PyTorch and Keras. If you get familiar with the intermediate JSON format, you can build your own model in your own training framework. In the future, we will change to intermediate format from JSON to ONNX in order to get a [standard intermediate representation spec](https://github.com/onnx/onnx/blob/master/docs/IR.md).
### 2. Install the requirements
### 2. Install the requirements
```bash
# install the requirements packages
......@@ -32,7 +32,7 @@ trainingServicePlatform: local
useAnnotation: false
tuner:
#choice: TPE, Random, Anneal, Evolution, BatchTuner, NetworkMorphism
#SMAC (SMAC should be installed through nnictl)
#SMAC (SMAC should be installed through nnictl)
builtinTunerName: NetworkMorphism
classArgs:
#choice: maximize, minimize
......@@ -83,7 +83,7 @@ net = build_graph_from_json(RCV_CONFIG)
nni.report_final_result(best_acc)
```
### 5. Submit this job
### 5. Submit this job
```bash
# You can use NNI command tool "nnictl" to create the a job which submit to the NNI
......
......@@ -10,7 +10,7 @@ trainingServicePlatform: local
useAnnotation: false
tuner:
#choice: TPE, Random, Anneal, Evolution, BatchTuner, NetworkMorphism
#SMAC (SMAC should be installed through nnictl)
#SMAC (SMAC should be installed through nnictl)
builtinTunerName: NetworkMorphism
classArgs:
#choice: maximize, minimize
......
......@@ -9,7 +9,7 @@ trainingServicePlatform: pai
useAnnotation: false
tuner:
#choice: TPE, Random, Anneal, Evolution, BatchTuner, NetworkMorphism
#SMAC (SMAC should be installed through nnictl)
#SMAC (SMAC should be installed through nnictl)
builtinTunerName: NetworkMorphism
classArgs:
#choice: maximize, minimize
......
......@@ -56,7 +56,7 @@ def get_model(PARAMS):
model.degree = PARAMS.get('degree')
model.gamma = PARAMS.get('gamma')
model.coef0 = PARAMS.get('coef0')
return model
def run(X_train, X_test, y_train, y_test, PARAMS):
......
......@@ -63,9 +63,9 @@ def get_model(PARAMS):
if not model_dict.get(PARAMS['model_name']):
LOG.exception('Not supported model!')
exit(1)
model = model_dict[PARAMS['model_name']]
try:
if PARAMS['model_name'] == 'SVR':
model.kernel = PARAMS['svr_kernel']
......
......@@ -10,7 +10,7 @@ useAnnotation: false
multiThread: true
tuner:
codeDir: ../../../tuners/weight_sharing/ga_customer_tuner
classFileName: customer_tuner.py
classFileName: customer_tuner.py
className: CustomerTuner
classArgs:
optimize_mode: maximize
......@@ -23,9 +23,9 @@ trial:
machineList:
- ip: remote-ip-0
port: 8022
username: root
username: root
passwd: screencast
- ip: remote-ip-1
port: 8022
username: root
username: root
passwd: screencast
......@@ -290,7 +290,7 @@ def graph_to_network(input1,
if topo_i == '|':
continue
# Note: here we use the `hash_id` of layer as scope name,
# Note: here we use the `hash_id` of layer as scope name,
# so that we can automatically load sharable weights from previous trained models
with tf.variable_scope(p_graph.layers[topo_i].hash_id, reuse=tf.AUTO_REUSE):
if p_graph.layers[topo_i].graph_type == LayerType.input.value:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment