Commit ae7a72bc authored by Hongarc's avatar Hongarc Committed by Chi Song
Browse files

Remove all whitespace at end of line (#1162)

parent 14c1b31c
......@@ -239,7 +239,7 @@ class CustomerTuner(Tuner):
indiv.mutation()
graph = indiv.config
temp = json.loads(graph_dumps(graph))
# ......
```
......
## 33rd place solution code for Kaggle [TGS Salt Identification Chanllenge](https://www.kaggle.com/c/tgs-salt-identification-challenge)
This example shows how to enable AutoML for competition code by running it on NNI without any code change.
This example shows how to enable AutoML for competition code by running it on NNI without any code change.
To run this code on NNI, firstly you need to run it standalone, then configure the config.yml and:
```
nnictl create --config config.yml
......@@ -18,7 +18,7 @@ Stage 1:
Train fold 0-3 for 100 epochs, for each fold, train 3 models:
```
python3 train.py --ifolds 0 --epochs 100 --model_name UNetResNetV4
python3 train.py --ifolds 0 --epochs 100 --model_name UNetResNetV4
python3 train.py --ifolds 0 --epochs 100 --model_name UNetResNetV5 --layers 50
python3 train.py --ifolds 0 --epochs 100 --model_name UNetResNetV6
```
......@@ -28,7 +28,7 @@ Stage 2:
Fine tune stage 1 models for 300 epochs with cosine annealing lr scheduler:
```
python3 train.py --ifolds 0 --epochs 300 --lrs cosine --lr 0.001 --min_lr 0.0001 --model_name UNetResNetV4
python3 train.py --ifolds 0 --epochs 300 --lrs cosine --lr 0.001 --min_lr 0.0001 --model_name UNetResNetV4
```
Stage 3:
......
......@@ -165,7 +165,7 @@ def test_transform():
RandomHFlipWithMask(),
RandomVFlipWithMask(),
RandomRotateWithMask([0, 90, 180, 270]),
#RandomRotateWithMask(15),
#RandomRotateWithMask(15),
RandomResizedCropWithMask(768, scale=(0.81, 1))
])
......
......@@ -33,7 +33,7 @@ class FocalLoss2d(nn.Module):
def forward(self, logit, target, class_weight=None, type='sigmoid'):
target = target.view(-1, 1).long()
if type=='sigmoid':
if class_weight is None:
class_weight = [1]*2 #[0.5, 0.5]
......
......@@ -40,11 +40,11 @@ class ImageDataset(data.Dataset):
self.train_mode = train_mode
self.meta = meta
self.img_ids = meta[ID_COLUMN].values
self.salt_exists = meta['salt_exists'].values
self.is_train = meta['is_train'].values
if self.train_mode:
self.mask_filenames = meta[Y_COLUMN].values
......@@ -207,7 +207,7 @@ def get_train_loaders(ifold, batch_size=8, dev_mode=False, pad_mode='edge', meta
val_set = ImageDataset(True, val_meta,
augment_with_target=img_mask_aug_val,
image_augment=None,
image_augment=None,
image_transform=get_image_transform(pad_mode),
mask_transform=get_mask_transform(pad_mode))
val_loader = data.DataLoader(val_set, batch_size=batch_size, shuffle=False, num_workers=4, collate_fn=val_set.collate_fn)
......@@ -221,7 +221,7 @@ def get_test_loader(batch_size=16, index=0, dev_mode=False, pad_mode='edge'):
if dev_mode:
test_meta = test_meta.iloc[:10]
test_set = ImageDataset(False, test_meta,
image_augment=None if pad_mode == 'resize' else transforms.Pad((13,13,14,14), padding_mode=pad_mode),
image_augment=None if pad_mode == 'resize' else transforms.Pad((13,13,14,14), padding_mode=pad_mode),
image_transform=get_tta_transforms(index, pad_mode))
test_loader = data.DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=4, collate_fn=test_set.collate_fn, drop_last=False)
test_loader.num = len(test_set)
......@@ -236,13 +236,13 @@ def get_depth_tensor(pad_mode):
if depth_channel_tensor is not None:
return depth_channel_tensor
depth_tensor = None
if pad_mode == 'resize':
depth_tensor = np.zeros((H, W))
for row, const in enumerate(np.linspace(0, 1, H)):
depth_tensor[row, :] = const
depth_tensor[row, :] = const
else:
depth_tensor = np.zeros((ORIG_H, ORIG_W))
for row, const in enumerate(np.linspace(0, 1, ORIG_H)):
......
......@@ -76,7 +76,7 @@ def iou(preds, labels, C, EMPTY=1., ignore=None, per_image=False):
preds, labels = (preds,), (labels,)
ious = []
for pred, label in zip(preds, labels):
iou = []
iou = []
for i in range(C):
if i != ignore: # The ignored label is sometimes among predicted classes (ENet - CityScapes)
intersection = ((label == i) & (pred == i)).sum()
......@@ -127,7 +127,7 @@ def lovasz_hinge_flat(logits, labels):
grad = lovasz_grad(gt_sorted)
loss = torch.dot(F.elu(errors_sorted)+1, Variable(grad))
#loss = torch.dot(F.relu(errors_sorted), Variable(grad))
return loss
......
......@@ -201,12 +201,12 @@ class UNetResNetV4(nn.Module):
F.upsample(d3, scale_factor=4, mode='bilinear', align_corners=False),
F.upsample(d4, scale_factor=8, mode='bilinear', align_corners=False),
F.upsample(d5, scale_factor=16, mode='bilinear', align_corners=False),
], 1)
], 1)
f = F.dropout2d(f, p=self.dropout_2d)
return self.logit(f), None
def freeze_bn(self):
'''Freeze BatchNorm layers.'''
for layer in self.modules():
......@@ -221,7 +221,7 @@ class UNetResNetV4(nn.Module):
for x in group1:
for p in x.parameters():
params1.append(p)
param_group1 = {'params': params1, 'lr': base_lr / 5}
params2 = []
......@@ -321,7 +321,7 @@ class UNetResNetV5(nn.Module):
F.interpolate(d3, scale_factor=4, mode='bilinear', align_corners=False),
F.interpolate(d4, scale_factor=8, mode='bilinear', align_corners=False),
F.interpolate(d5, scale_factor=16, mode='bilinear', align_corners=False),
], 1)
], 1)
f = F.dropout2d(f, p=self.dropout_2d)
......@@ -331,7 +331,7 @@ class UNetResNetV6(nn.Module):
'''
1. Remove first pool from UNetResNetV5, such that resolution is doubled
2. Remove scSE from center block
3. Increase default dropout
3. Increase default dropout
'''
def __init__(self, encoder_depth, num_filters=32, dropout_2d=0.5):
super(UNetResNetV6, self).__init__()
......@@ -400,7 +400,7 @@ class UNetResNetV6(nn.Module):
F.interpolate(d4, scale_factor=4, mode='bilinear', align_corners=False),
F.interpolate(d5, scale_factor=8, mode='bilinear', align_corners=False),
F.interpolate(center, scale_factor=16, mode='bilinear', align_corners=False),
], 1)
], 1)
f = F.dropout2d(f, p=self.dropout_2d, training=self.training)
......
......@@ -71,7 +71,7 @@ def do_tta_predict(args, model, ckp_path, tta_num=4):
outputs = np.flip(outputs, 1)
#print(outputs.shape)
preds.append(outputs)
parent_dir = ckp_path+'_out'
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
......@@ -142,9 +142,9 @@ def generate_preds(outputs, target_size, pad_mode, threshold=0.5):
def ensemble_predict(args):
model = eval(args.model_name)(args.layers, num_filters=args.nf)
checkpoints = [
r'D:\data\salt\models\pseudo\UNetResNetV4_34\edge\best_5.pth',
r'D:\data\salt\models\pseudo\UNetResNetV4_34\edge\best_5.pth',
r'D:\data\salt\models\pseudo\UNetResNetV4_34\edge\best_6.pth',
r'D:\data\salt\models\pseudo\UNetResNetV4_34\edge\best_8.pth',
r'D:\data\salt\models\pseudo\UNetResNetV4_34\edge\best_9.pth'
......@@ -160,7 +160,7 @@ def ensemble_np_results(args):
np_files6 = glob.glob(r'D:\data\salt\models\ensemble\*.npy')
np_files = np_files1 + np_files2 + np_files3 + np_files6
print(np_files)
ensemble_np(args, np_files)
ensemble_np(args, np_files)
def predict_model(args):
model = eval(args.model_name)(args.layers, num_filters=args.nf)
......
......@@ -40,7 +40,7 @@ def prepare_metadata():
)
meta.to_csv(settings.META_FILE, index=None)
def cov_to_class(val):
def cov_to_class(val):
for i in range(0, 11):
if val * 10 <= i :
return i
......@@ -57,7 +57,7 @@ def generate_stratified_metadata():
train_df["file_path_image"] = train_df.index.map(lambda x: os.path.join(settings.TRAIN_IMG_DIR, '{}.png'.format(x)))
train_df["file_path_mask"] = train_df.index.map(lambda x: os.path.join(settings.TRAIN_MASK_DIR, '{}.png'.format(x)))
train_df.to_csv(os.path.join(settings.DATA_DIR, 'train_meta2.csv'),
train_df.to_csv(os.path.join(settings.DATA_DIR, 'train_meta2.csv'),
columns=['file_path_image','file_path_mask','is_train','z','salt_exists', 'coverage_class', 'coverage'])
train_splits = {}
......@@ -65,7 +65,7 @@ def generate_stratified_metadata():
for i, (train_index, valid_index) in enumerate(kf.split(train_df.index.values.reshape(-1), train_df.coverage_class.values.reshape(-1))):
train_splits[str(i)] = {
'train_index': train_index.tolist(),
'val_index': valid_index.tolist()
'val_index': valid_index.tolist()
}
with open(os.path.join(settings.DATA_DIR, 'train_split.json'), 'w') as f:
json.dump(train_splits, f, indent=4)
......
......@@ -57,7 +57,7 @@ def weighted_loss(args, output, target, epoch=0):
def train(args):
print('start training...')
"""@nni.variable(nni.choice('UNetResNetV4', 'UNetResNetV5', 'UNetResNetV6'), name=model_name)"""
model_name = args.model_name
......@@ -123,10 +123,10 @@ def train(args):
img, target, salt_target = img.cuda(), target.cuda(), salt_target.cuda()
optimizer.zero_grad()
output, salt_out = model(img)
loss, *_ = weighted_loss(args, (output, salt_out), (target, salt_target), epoch=epoch)
loss.backward()
if args.optim == 'Adam' and args.adamw:
wd = 0.0001
for group in optimizer.param_groups:
......@@ -141,7 +141,7 @@ def train(args):
iout, iou, focal_loss, lovaz_loss, salt_loss, mix_score = validate(args, model, val_loader, epoch=epoch)
"""@nni.report_intermediate_result(iout)"""
_save_ckp = ''
if iout > best_iout:
best_iout = iout
......@@ -155,7 +155,7 @@ def train(args):
focal_loss, lovaz_loss, iou, iout, best_iout, (time.time() - bg) / 60, _save_ckp, salt_loss))
model.train()
if args.lrs == 'plateau':
lr_scheduler.step(best_iout)
else:
......@@ -163,7 +163,7 @@ def train(args):
del model, train_loader, val_loader, optimizer, lr_scheduler
"""@nni.report_final_result(best_iout)"""
def get_lrs(optimizer):
lrs = []
for pgs in optimizer.state_dict()['param_groups']:
......@@ -188,7 +188,7 @@ def validate(args, model, val_loader, epoch=0, threshold=0.5):
salt_loss += _salt_loss
w_loss += _w_loss
output = torch.sigmoid(output)
for o in output.cpu():
outputs.append(o.squeeze().numpy())
......@@ -217,7 +217,7 @@ def generate_preds(args, outputs, target_size, threshold=0.5):
return preds
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='TGS Salt segmentation')
parser.add_argument('--layers', default=34, type=int, help='model layers')
parser.add_argument('--nf', default=32, type=int, help='num_filters param for model')
......@@ -244,7 +244,7 @@ if __name__ == '__main__':
parser.add_argument('--depths', action='store_true')
parser.add_argument('--dev_mode', action='store_true')
parser.add_argument('--adamw', action='store_true')
args = parser.parse_args()
'''@nni.get_next_parameter()'''
......
......@@ -166,7 +166,7 @@ def get_nfold_split2(ifold, nfold=10):
train_index = train_splits[str(ifold)]['train_index']
valid_index = train_splits[str(ifold)]['val_index']
return meta_train.iloc[train_index], meta_train.iloc[valid_index]
return meta_train.iloc[train_index], meta_train.iloc[valid_index]
def get_test_meta():
......
......@@ -93,7 +93,7 @@ def train(args, params):
x_train, y_train, x_test, y_test = load_mnist_data(args)
model = create_mnist_model(params)
# nni
# nni
model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs, verbose=1,
validation_data=(x_test, y_test), callbacks=[SendMetrics(), TensorBoard(log_dir=TENSORBOARD_DIR)])
......
......@@ -5,8 +5,8 @@
"_value" : [{"optimizer": "Adam", "learning_rate": 0.00001},
{"optimizer": "Adam", "learning_rate": 0.0001},
{"optimizer": "Adam", "learning_rate": 0.001},
{"optimizer": "SGD", "learning_rate": 0.01},
{"optimizer": "SGD", "learning_rate": 0.005},
{"optimizer": "SGD", "learning_rate": 0.01},
{"optimizer": "SGD", "learning_rate": 0.005},
{"optimizer": "SGD", "learning_rate": 0.0002}]
}
}
\ No newline at end of file
......@@ -12,10 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NNI (https://github.com/Microsoft/nni) modified this code to show how to
#
# NNI (https://github.com/Microsoft/nni) modified this code to show how to
# integrate distributed pytorch training with NNI SDK
#
#
import os
import torch
......
......@@ -121,7 +121,7 @@ def main(params):
for i in range(params['batch_num']):
batch = mnist.train.next_batch(params['batch_size'])
mnist_network.train_step.run(feed_dict={mnist_network.x: batch[0], mnist_network.y: batch[1]})
if i % 100 == 0:
train_accuracy = mnist_network.accuracy.eval(feed_dict={
mnist_network.x: batch[0], mnist_network.y: batch[1]})
......
......@@ -10,7 +10,7 @@ trainingServicePlatform: local
useAnnotation: false
tuner:
#choice: TPE, Random, Anneal, Evolution, BatchTuner, NetworkMorphism
#SMAC (SMAC should be installed through nnictl)
#SMAC (SMAC should be installed through nnictl)
builtinTunerName: NetworkMorphism
classArgs:
#choice: maximize, minimize
......
......@@ -9,7 +9,7 @@ trainingServicePlatform: pai
useAnnotation: false
tuner:
#choice: TPE, Random, Anneal, Evolution, BatchTuner, NetworkMorphism
#SMAC (SMAC should be installed through nnictl)
#SMAC (SMAC should be installed through nnictl)
builtinTunerName: NetworkMorphism
classArgs:
#choice: maximize, minimize
......
......@@ -8,7 +8,7 @@ The Network Morphism is a build-in Tuner using network morphism techniques to se
The network morphism now is framework-based, and we have not implemented the framework-free methods. The training frameworks which we have supported yet are PyTorch and Keras. If you get familiar with the intermediate JSON format, you can build your own model in your own training framework. In the future, we will change to intermediate format from JSON to ONNX in order to get a [standard intermediate representation spec](https://github.com/onnx/onnx/blob/master/docs/IR.md).
### 2. Install the requirements
### 2. Install the requirements
```bash
# install the requirements packages
......@@ -32,7 +32,7 @@ trainingServicePlatform: local
useAnnotation: false
tuner:
#choice: TPE, Random, Anneal, Evolution, BatchTuner, NetworkMorphism
#SMAC (SMAC should be installed through nnictl)
#SMAC (SMAC should be installed through nnictl)
builtinTunerName: NetworkMorphism
classArgs:
#choice: maximize, minimize
......@@ -83,7 +83,7 @@ net = build_graph_from_json(RCV_CONFIG)
nni.report_final_result(best_acc)
```
### 5. Submit this job
### 5. Submit this job
```bash
# You can use NNI command tool "nnictl" to create the a job which submit to the NNI
......
......@@ -10,7 +10,7 @@ trainingServicePlatform: local
useAnnotation: false
tuner:
#choice: TPE, Random, Anneal, Evolution, BatchTuner, NetworkMorphism
#SMAC (SMAC should be installed through nnictl)
#SMAC (SMAC should be installed through nnictl)
builtinTunerName: NetworkMorphism
classArgs:
#choice: maximize, minimize
......
......@@ -9,7 +9,7 @@ trainingServicePlatform: pai
useAnnotation: false
tuner:
#choice: TPE, Random, Anneal, Evolution, BatchTuner, NetworkMorphism
#SMAC (SMAC should be installed through nnictl)
#SMAC (SMAC should be installed through nnictl)
builtinTunerName: NetworkMorphism
classArgs:
#choice: maximize, minimize
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment