Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
yaoyuping
nnDetection
Commits
aeb83030
Commit
aeb83030
authored
Apr 22, 2021
by
mibaumgartner
Browse files
add configs
parent
4116e6ad
Changes
26
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
470 additions
and
0 deletions
+470
-0
nndet/conf/train/deprecated/h0c04SGD.yaml
nndet/conf/train/deprecated/h0c04SGD.yaml
+60
-0
nndet/conf/train/deprecated/h0c05Deeplesion.yaml
nndet/conf/train/deprecated/h0c05Deeplesion.yaml
+61
-0
nndet/conf/train/deprecated/h0c06.yaml
nndet/conf/train/deprecated/h0c06.yaml
+60
-0
nndet/conf/train/deprecated/h0c06SWA.yaml
nndet/conf/train/deprecated/h0c06SWA.yaml
+63
-0
nndet/conf/train/smoke.yaml
nndet/conf/train/smoke.yaml
+115
-0
nndet/conf/train/v001.yaml
nndet/conf/train/v001.yaml
+111
-0
No files found.
nndet/conf/train/deprecated/h0c04SGD.yaml
0 → 100644
View file @
aeb83030
# @package __global__
defaults
:
-
augmentation
:
base_more
model
:
"
RetinaUNetC009"
trainer
:
"
DetectionTrainerPolyLR_SGD090"
predictor
:
"
BoxPredictorSelective"
plan
:
D3C002_3d
planners
:
2d
:
[
D2C002
]
# D2C002 D2C002LR20
3d
:
[
D2C002
,
D3C002
]
# [D3C002LR15, D3C002LR20] [D3C002NR, D3C002RibFrac] [D2C002, D3C002]
augment_cfg
:
oversample_foreground_percent
:
0.5
# ratio of fg and bg in batches
augmentation
:
${augmentation}
dataloader
:
"
DataLoader{}DFast"
dataloader_kwargs
:
{}
trainer_cfg
:
# Per default training is deterministic, non-deterministic allows
# cudnn.benchmark which can give up to 20% performance. Set this to false
# to perform non-deterministic training
deterministic
:
True
fp16
:
True
# enable fp16 training. Makes sense for supported hardware only!
eval_score_key
:
"
mAP_IoU_0.10_0.50_0.05_MaxDet_100"
# metric to optimize
num_batches_per_epoch
:
2500
# number of train batches per epoch
num_val_batches_per_epoch
:
100
# number of val batches per epoch
max_num_epochs
:
50
# max number of epochs
overwrites
:
{}
initial_lr
:
0.01
# initial learning rate to start with
weight_decay
:
3.e-5
# weight decay for optimizer
warmup
:
4000
# number of iterations with warmup
warmup_lr
:
1.e-6
# learning rate to start warmup from
model_cfg
:
matching
:
# IoU Matcher Parameters
fg_iou_thresh
:
0.4
# IoU threshold for anchors to be matched positive
bg_iou_thresh
:
0.3
# IoU threshold for anchors to be matched negative
# If ground truth has no matched anchors, use the best anchor which was found
allow_low_quality_matches
:
True
# ATSS matching
num_candidates
:
4
center_in_gt
:
False
hnm
:
# parameters for hard negative mining
batch_size_per_image
:
32
# number of anchors sampled per image
positive_fraction
:
0.33
# defines ratio between positive and negative anchors
# hard negatives are sampled from a pool of size:
# batch_size_per_image * (1 - positive_fraction) * pool_size
pool_size
:
20
min_neg
:
1
# minimum number of negative anchors sampled per image
plan_arch_overwrites
:
{}
# overwrite arguments of architecture
plan_anchors_overwrites
:
{}
# overwrite arguments of anchors
nndet/conf/train/deprecated/h0c05Deeplesion.yaml
0 → 100644
View file @
aeb83030
# @package __global__
defaults
:
-
augmentation
:
mirror_only
model
:
"
RetinaUNetC009"
trainer
:
"
DetectionTrainerPolyLR_SGD090"
predictor
:
"
BoxPredictorSelective"
plan
:
D2C002_2d
planners
:
2d
:
[
D2C002
]
3d
:
[
D2C002
,
D3C002
]
# [D3C002LR15, D3C002LR20] [D3C002NR, D3C002RibFrac] [D2C002, D3C002]
augment_cfg
:
oversample_foreground_percent
:
0.5
# ratio of fg and bg in batches
augmentation
:
${augmentation}
dataloader
:
"
DataLoader2DDeeplesion"
dataloader_kwargs
:
{}
trainer_cfg
:
# Per default training is deterministic, non-deterministic allows
# cudnn.benchmark which can give up to 20% performance. Set this to false
# to perform non-deterministic training
deterministic
:
True
fp16
:
True
# enable fp16 training. Makes sense for supported hardware only!
eval_score_key
:
"
mAP_IoU_0.10_0.50_0.05_MaxDet_100"
# metric to optimize
num_batches_per_epoch
:
2500
# number of train batches per epoch
num_val_batches_per_epoch
:
100
# number of val batches per epoch
max_num_epochs
:
200
# max number of epochs
overwrites
:
{}
initial_lr
:
0.01
# initial learning rate to start with
weight_decay
:
3.e-5
# weight decay for optimizer
warmup
:
4000
# number of iterations with warmup
warmup_lr
:
1.e-6
# learning rate to start warmup from
model_cfg
:
matching
:
# IoU Matcher Parameters
fg_iou_thresh
:
0.4
# IoU threshold for anchors to be matched positive
bg_iou_thresh
:
0.3
# IoU threshold for anchors to be matched negative
# If ground truth has no matched anchors, use the best anchor which was found
allow_low_quality_matches
:
True
# ATSS matching
num_candidates
:
4
center_in_gt
:
False
hnm
:
# parameters for hard negative mining
batch_size_per_image
:
32
# number of anchors sampled per image
positive_fraction
:
0.33
# defines ratio between positive and negative anchors
# hard negatives are sampled from a pool of size:
# batch_size_per_image * (1 - positive_fraction) * pool_size
pool_size
:
20
min_neg
:
1
# minimum number of negative anchors sampled per image
plan_arch_overwrites
:
# overwrite arguments of architecture
in_channels
:
4
plan_anchors_overwrites
:
{}
# overwrite arguments of anchors
nndet/conf/train/deprecated/h0c06.yaml
0 → 100644
View file @
aeb83030
# @package __global__
defaults
:
-
augmentation
:
base_more
model
:
"
RetinaUNetC009LH1"
trainer
:
"
BoxTrainer"
predictor
:
"
BoxPredictorSelective"
plan
:
D3C002_3d
planners
:
2d
:
[
D2C002
]
3d
:
[
D3C003FDR
]
# [D3C002LR15, D3C002LR20] [D3C002NR, D3C002RibFrac] [D2C002, D3C002]
augment_cfg
:
oversample_foreground_percent
:
0.5
# ratio of fg and bg in batches
augmentation
:
${augmentation}
dataloader
:
"
DataLoader{}DOffset"
dataloader_kwargs
:
{}
trainer_cfg
:
# Per default training is deterministic, non-deterministic allows
# cudnn.benchmark which can give up to 20% performance. Set this to false
# to perform non-deterministic training
deterministic
:
True
fp16
:
True
# enable fp16 training. Makes sense for supported hardware only!
eval_score_key
:
"
mAP_IoU_0.10_0.50_0.05_MaxDet_100"
# metric to optimize
num_batches_per_epoch
:
2500
# number of train batches per epoch
num_val_batches_per_epoch
:
100
# number of val batches per epoch
max_num_epochs
:
50
# max number of epochs
overwrites
:
{}
initial_lr
:
0.01
# initial learning rate to start with
weight_decay
:
3.e-5
# weight decay for optimizer
warmup
:
4000
# number of iterations with warmup
warmup_lr
:
1.e-6
# learning rate to start warmup from
model_cfg
:
matching
:
# IoU Matcher Parameters
fg_iou_thresh
:
0.4
# IoU threshold for anchors to be matched positive
bg_iou_thresh
:
0.3
# IoU threshold for anchors to be matched negative
# If ground truth has no matched anchors, use the best anchor which was found
allow_low_quality_matches
:
True
# ATSS matching
num_candidates
:
4
center_in_gt
:
False
hnm
:
# parameters for hard negative mining
batch_size_per_image
:
32
# number of anchors sampled per image
positive_fraction
:
0.33
# defines ratio between positive and negative anchors
# hard negatives are sampled from a pool of size:
# batch_size_per_image * (1 - positive_fraction) * pool_size
pool_size
:
20
min_neg
:
1
# minimum number of negative anchors sampled per image
plan_arch_overwrites
:
{}
# overwrite arguments of architecture
plan_anchors_overwrites
:
{}
# overwrite arguments of anchors
nndet/conf/train/deprecated/h0c06SWA.yaml
0 → 100644
View file @
aeb83030
# @package __global__
defaults
:
-
augmentation
:
base_more
model
:
"
RetinaUNetC009LH1"
trainer
:
"
BoxTrainerSWA"
predictor
:
"
BoxPredictorSelective"
plan
:
D3C002_3d
planners
:
2d
:
[
D2C002
]
3d
:
[
D3C003FD
]
# [D3C002LR15, D3C002LR20] [D3C002NR, D3C002RibFrac] [D2C002, D3C002]
augment_cfg
:
oversample_foreground_percent
:
0.5
# ratio of fg and bg in batches
augmentation
:
${augmentation}
dataloader
:
"
DataLoader{}DOffset"
dataloader_kwargs
:
{}
trainer_cfg
:
# Per default training is deterministic, non-deterministic allows
# cudnn.benchmark which can give up to 20% performance. Set this to false
# to perform non-deterministic training
deterministic
:
True
fp16
:
True
# enable fp16 training. Makes sense for supported hardware only!
eval_score_key
:
"
mAP_IoU_0.10_0.50_0.05_MaxDet_100"
# metric to optimize
num_batches_per_epoch
:
2500
# number of train batches per epoch
num_val_batches_per_epoch
:
100
# number of val batches per epoch
max_num_epochs
:
60
# max number of epochs
overwrites
:
{}
initial_lr
:
0.01
# initial learning rate to start with
weight_decay
:
3.e-5
# weight decay for optimizer
warmup
:
4000
# number of iterations with warmup
warmup_lr
:
1.e-6
# learning rate to start warmup from
swa_epochs
:
10
# number of epochs to run swa with cyclic learning rate
swa_snapshots
:
10
# number of swa snapshots
model_cfg
:
matching
:
# IoU Matcher Parameters
fg_iou_thresh
:
0.4
# IoU threshold for anchors to be matched positive
bg_iou_thresh
:
0.3
# IoU threshold for anchors to be matched negative
# If ground truth has no matched anchors, use the best anchor which was found
allow_low_quality_matches
:
True
# ATSS matching
num_candidates
:
4
center_in_gt
:
False
hnm
:
# parameters for hard negative mining
batch_size_per_image
:
32
# number of anchors sampled per image
positive_fraction
:
0.33
# defines ratio between positive and negative anchors
# hard negatives are sampled from a pool of size:
# batch_size_per_image * (1 - positive_fraction) * pool_size
pool_size
:
20
min_neg
:
1
# minimum number of negative anchors sampled per image
plan_arch_overwrites
:
{}
# overwrite arguments of architecture
plan_anchors_overwrites
:
{}
# overwrite arguments of anchors
nndet/conf/train/smoke.yaml
0 → 100644
View file @
aeb83030
# @package __global__
defaults
:
-
augmentation
:
base_more
module
:
"
RetinaUNetC010"
predictor
:
"
BoxPredictorSelective"
plan
:
D3V001_3d
planners
:
2d
:
[
D2C002
]
3d
:
[
D3V001
]
# [D3C002LR15, D3C002LR20] [D3C002NR, D3C002RibFrac] [D2C002, D3C002]
augment_cfg
:
augmentation
:
${augmentation}
num_train_batches_per_epoch
:
${trainer_cfg.num_train_batches_per_epoch}
num_val_batches_per_epoch
:
${trainer_cfg.num_val_batches_per_epoch}
dataloader
:
"
DataLoader{}DOffset"
oversample_foreground_percent
:
0.5
# ratio of fg and bg in batches
dataloader_kwargs
:
{}
num_threads
:
${oc.env:det_num_threads, "12"}
num_cached_per_thread
:
2
multiprocessing
:
True
# only deactivate this if debugging
trainer_cfg
:
gpus
:
1
# number of gpus
accelerator
:
ddp
# distributed backend
precision
:
16
# mixed precision
amp_backend
:
native
# mixed precision backend
amp_level
:
O1
# when mixed precision backend is APEX use O1
# Per default training is deterministic, non-deterministic allows
# cudnn.benchmark which can give up to 20% performance. Set this to false
# to perform non-deterministic training
deterministic
:
False
benchmark
:
False
# fp16: True # enable fp16 training. Makes sense for supported hardware only!
monitor_key
:
"
mAP_IoU_0.10_0.50_0.05_MaxDet_100"
# used to determine the best model
monitor_mode
:
"
max"
# metric operation mode "min" or "max"
max_num_epochs
:
2
# max number of epochs
num_train_batches_per_epoch
:
20
# number of train batches per epoch
num_val_batches_per_epoch
:
10
# number of val batches per epoch
initial_lr
:
0.01
# initial learning rate to start with
sgd_momentum
:
0.9
# momentum term
sgd_nesterov
:
True
# nesterov momentum
weight_decay
:
3.e-5
# weight decay for optimizer
momentum
:
0.9
# momentum term
warm_iterations
:
4000
# number of iterations with warmup
warm_lr
:
1.e-6
# learning rate to start warmup from
poly_gamma
:
0.9
swa_epochs
:
2
# number of epochs to run swa with cyclic learning rate
model_cfg
:
encoder_kwargs
:
{}
# keyword arguments passed to encoder
decoder_kwargs
:
# keyword arguments passed to decoder
min_out_channels
:
8
upsampling_mode
:
"
transpose"
num_lateral
:
1
norm_lateral
:
False
activation_lateral
:
False
num_out
:
1
norm_out
:
False
activation_out
:
False
head_kwargs
:
{}
# keyword arguments to passed to head
head_classifier_kwargs
:
# keyword arguments passed to classifier in head
num_convs
:
2
norm_channels_per_group
:
16
norm_affine
:
True
reduction
:
"
mean"
loss_weight
:
1.
# gamma: 1.
# alpha: 0.75
# reduction: "sum"
# loss_weight: 0.3
prior_prob
:
0.01
head_regressor_kwargs
:
# keyword arguments passed to regressor in head
num_convs
:
2
norm_channels_per_group
:
16
norm_affine
:
True
reduction
:
"
sum"
loss_weight
:
1.
learn_scale
:
True
head_sampler_kwargs
:
# keyword arguments passed to sampler
batch_size_per_image
:
32
# number of anchors sampled per image
positive_fraction
:
0.33
# defines ratio between positive and negative anchors
# hard negatives are sampled from a pool of size:
# batch_size_per_image * (1 - positive_fraction) * pool_size
pool_size
:
20
min_neg
:
1
# minimum number of negative anchors sampled per image
segmenter_kwargs
:
dice_kwargs
:
batch_dice
:
True
matcher_kwargs
:
# keyword arguments passed to matcher
num_candidates
:
4
center_in_gt
:
False
plan_arch_overwrites
:
{}
# overwrite arguments of architecture
plan_anchors_overwrites
:
{}
# overwrite arguments of anchors
debug
:
num_cases_val
:
2
# only predict two cases for validation results
nndet/conf/train/v001.yaml
0 → 100644
View file @
aeb83030
# @package __global__
defaults
:
-
augmentation
:
base_more
module
:
RetinaUNetV001
predictor
:
BoxPredictorSelective
plan
:
D3V001_3d
planners
:
2d
:
[
D2C002
]
3d
:
[
D3V001
]
# [D3C002LR15, D3C002LR20] [D3C002NR, D3C002RibFrac] [D2C002, D3C002]
augment_cfg
:
augmentation
:
${augmentation}
num_train_batches_per_epoch
:
${trainer_cfg.num_train_batches_per_epoch}
num_val_batches_per_epoch
:
${trainer_cfg.num_val_batches_per_epoch}
dataloader
:
"
DataLoader{}DOffset"
oversample_foreground_percent
:
0.5
# ratio of fg and bg in batches
dataloader_kwargs
:
{}
num_threads
:
${oc.env:det_num_threads, "12"}
num_cached_per_thread
:
2
multiprocessing
:
True
# only deactivate this if debugging
trainer_cfg
:
gpus
:
1
# number of gpus
accelerator
:
ddp
# distributed backend
precision
:
16
# mixed precision
amp_backend
:
native
# mixed precision backend
amp_level
:
O1
# when mixed precision backend is APEX use O1
# Per default training is deterministic, non-deterministic allows
# cudnn.benchmark which can give up to 20% performance. Set this to false
# to perform non-deterministic training
deterministic
:
False
benchmark
:
False
# fp16: True # enable fp16 training. Makes sense for supported hardware only!
monitor_key
:
"
mAP_IoU_0.10_0.50_0.05_MaxDet_100"
# used to determine the best model
monitor_mode
:
"
max"
# metric operation mode "min" or "max"
max_num_epochs
:
50
# max number of epochs
num_train_batches_per_epoch
:
2500
# number of train batches per epoch
num_val_batches_per_epoch
:
100
# number of val batches per epoch
initial_lr
:
0.01
# initial learning rate to start with
sgd_momentum
:
0.9
# momentum term
sgd_nesterov
:
True
# nesterov momentum
weight_decay
:
3.e-5
# weight decay for optimizer
warm_iterations
:
4000
# number of iterations with warmup
warm_lr
:
1.e-6
# learning rate to start warmup from
poly_gamma
:
0.9
swa_epochs
:
10
# number of epochs to run swa with cyclic learning rate
model_cfg
:
encoder_kwargs
:
{}
# keyword arguments passed to encoder
decoder_kwargs
:
# keyword arguments passed to decoder
min_out_channels
:
8
upsampling_mode
:
"
transpose"
num_lateral
:
1
norm_lateral
:
False
activation_lateral
:
False
num_out
:
1
norm_out
:
False
activation_out
:
False
head_kwargs
:
{}
# keyword arguments to passed to head
head_classifier_kwargs
:
# keyword arguments passed to classifier in head
num_convs
:
1
norm
:
"
Group"
norm_kwargs
:
channels_per_group
:
16
affine
:
True
reduction
:
"
mean"
loss_weight
:
1.
prior_prob
:
0.01
head_regressor_kwargs
:
# keyword arguments passed to regressor in head
num_convs
:
1
norm
:
"
Group"
norm_kwargs
:
channels_per_group
:
16
affine
:
True
reduction
:
"
sum"
loss_weight
:
1.
learn_scale
:
True
head_sampler_kwargs
:
# keyword arguments passed to sampler
batch_size_per_image
:
32
# number of anchors sampled per image
positive_fraction
:
0.33
# defines ratio between positive and negative anchors
# hard negatives are sampled from a pool of size:
# batch_size_per_image * (1 - positive_fraction) * pool_size
pool_size
:
20
min_neg
:
1
# minimum number of negative anchors sampled per image
segmenter_kwargs
:
dice_kwargs
:
batch_dice
:
True
matcher_kwargs
:
# keyword arguments passed to matcher
num_candidates
:
4
center_in_gt
:
False
plan_arch_overwrites
:
{}
# overwrite arguments of architecture
plan_anchors_overwrites
:
{}
# overwrite arguments of anchors
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment