Commit e2696ece authored by mashun1's avatar mashun1
Browse files

controlnet

parents
Pipeline #643 canceled with stages
# general settings
name: BasicVSR_REDS
model_type: VideoRecurrentModel
scale: 4
num_gpu: auto # official: 8 GPUs
manual_seed: 0
# dataset and data loader settings
datasets:
train:
name: REDS
type: REDSRecurrentDataset
dataroot_gt: datasets/REDS/train_sharp
dataroot_lq: datasets/REDS/train_sharp_bicubic/X4
meta_info_file: basicsr/data/meta_info/meta_info_REDS_GT.txt
val_partition: REDS4 # set to 'official' when use the official validation partition
test_mode: False
io_backend:
type: disk
num_frame: 15
gt_size: 256
interval_list: [1]
random_reverse: false
use_hflip: true
use_rot: true
# data loader
num_worker_per_gpu: 6
batch_size_per_gpu: 4
dataset_enlarge_ratio: 200
prefetch_mode: ~
val:
name: REDS4
type: VideoRecurrentTestDataset
dataroot_gt: datasets/REDS4/GT
dataroot_lq: datasets/REDS4/sharp_bicubic
cache_data: true
io_backend:
type: disk
num_frame: -1 # not needed
# network structures
network_g:
type: BasicVSR
num_feat: 64
num_block: 30
spynet_path: experiments/pretrained_models/flownet/spynet_sintel_final-3d2a1287.pth
# path
path:
pretrain_network_g: ~
strict_load_g: true
resume_state: ~
# training settings
train:
ema_decay: 0.999
optim_g:
type: Adam
lr: !!float 2e-4
weight_decay: 0
betas: [0.9, 0.99]
scheduler:
type: CosineAnnealingRestartLR
periods: [300000]
restart_weights: [1]
eta_min: !!float 1e-7
total_iter: 300000
warmup_iter: -1 # no warm up
fix_flow: 5000
flow_lr_mul: 0.125
# losses
pixel_opt:
type: CharbonnierLoss
loss_weight: 1.0
reduction: mean
# validation settings
val:
val_freq: !!float 5e3
save_img: false
metrics:
psnr: # metric name, can be arbitrary
type: calculate_psnr
crop_border: 0
test_y_channel: false
# logging settings
logger:
print_freq: 100
save_checkpoint_freq: !!float 5e3
use_tb_logger: true
wandb:
project: ~
resume_id: ~
# dist training settings
dist_params:
backend: nccl
port: 29500
find_unused_parameters: true
# general settings
name: BasicVSR_Vimeo90K_BDx4
model_type: VideoRecurrentModel
scale: 4
num_gpu: 2 # set num_gpu: 0 for cpu mode
manual_seed: 0
# dataset and data loader settings
datasets:
train:
name: Vimeo90K
type: Vimeo90KRecurrentDataset
dataroot_gt: datasets/vimeo90k/vimeo_septuplet/sequences
dataroot_lq: datasets/vimeo90k/BDx4
meta_info_file: basicsr/data/meta_info/meta_info_Vimeo90K_train_GT.txt
io_backend:
type: disk
num_frame: -1
gt_size: 256
interval_list: [1]
random_reverse: false
use_hflip: true
use_rot: true
flip_sequence: true
# data loader
num_worker_per_gpu: 6
batch_size_per_gpu: 4
dataset_enlarge_ratio: 200
prefetch_mode: ~
val:
name: Vid4
type: VideoRecurrentTestDataset
dataroot_gt: datasets/Vid4/GT
dataroot_lq: datasets/Vid4/BDx4
cache_data: True
io_backend:
type: disk
num_frame: -1 # not needed
# network structures
network_g:
type: BasicVSR
num_feat: 64
num_block: 30
spynet_path: experiments/pretrained_models/flownet/spynet_sintel_final-3d2a1287.pth
# path
path:
pretrain_network_g: ~
strict_load_g: true
resume_state: ~
# training settings
train:
ema_decay: 0.999
optim_g:
type: Adam
lr: !!float 2e-4
weight_decay: 0
betas: [0.9, 0.99]
scheduler:
type: CosineAnnealingRestartLR
periods: [300000]
restart_weights: [1]
eta_min: !!float 1e-7
total_iter: 300000
warmup_iter: -1 # no warm up
fix_flow: 5000
flow_lr_mul: 0.125
# losses
pixel_opt:
type: CharbonnierLoss
loss_weight: 1.0
reduction: mean
# validation settings
val:
val_freq: !!float 5e3
save_img: false
metrics:
psnr: # metric name, can be arbitrary
type: calculate_psnr
crop_border: 0
test_y_channel: true
# logging settings
logger:
print_freq: 100
save_checkpoint_freq: !!float 5e3
use_tb_logger: true
wandb:
project: ~
resume_id: ~
# dist training settings
dist_params:
backend: nccl
port: 29500
find_unused_parameters: true
# general settings
name: BasicVSR_Vimeo90K_BIx4
model_type: VideoRecurrentModel
scale: 4
num_gpu: 2 # set num_gpu: 0 for cpu mode
manual_seed: 0
# dataset and data loader settings
datasets:
train:
name: Vimeo90K
type: Vimeo90KRecurrentDataset
dataroot_gt: datasets/vimeo90k/vimeo_septuplet/sequences
dataroot_lq: datasets/vimeo90k/BIx4
meta_info_file: basicsr/data/meta_info/meta_info_Vimeo90K_train_GT.txt
io_backend:
type: disk
num_frame: -1
gt_size: 256
interval_list: [1]
random_reverse: false
use_hflip: true
use_rot: true
flip_sequence: true
# data loader
num_worker_per_gpu: 6
batch_size_per_gpu: 4
dataset_enlarge_ratio: 200
prefetch_mode: ~
val:
name: Vid4
type: VideoRecurrentTestDataset
dataroot_gt: datasets/Vid4/GT
dataroot_lq: datasets/Vid4/BIx4
cache_data: True
io_backend:
type: disk
num_frame: -1 # not needed
# network structures
network_g:
type: BasicVSR
num_feat: 64
num_block: 30
spynet_path: experiments/pretrained_models/flownet/spynet_sintel_final-3d2a1287.pth
# path
path:
pretrain_network_g: ~
strict_load_g: true
resume_state: ~
# training settings
train:
ema_decay: 0.999
optim_g:
type: Adam
lr: !!float 2e-4
weight_decay: 0
betas: [0.9, 0.99]
scheduler:
type: CosineAnnealingRestartLR
periods: [300000]
restart_weights: [1]
eta_min: !!float 1e-7
total_iter: 300000
warmup_iter: -1 # no warm up
fix_flow: 5000
flow_lr_mul: 0.125
# losses
pixel_opt:
type: CharbonnierLoss
loss_weight: 1.0
reduction: mean
# validation settings
val:
val_freq: !!float 5e3
save_img: false
metrics:
psnr: # metric name, can be arbitrary
type: calculate_psnr
crop_border: 0
test_y_channel: true
# logging settings
logger:
print_freq: 100
save_checkpoint_freq: !!float 5e3
use_tb_logger: true
wandb:
project: ~
resume_id: ~
# dist training settings
dist_params:
backend: nccl
port: 29500
find_unused_parameters: true
# general settings
name: IconVSR_REDS
model_type: VideoRecurrentModel
scale: 4
num_gpu: 2 # set num_gpu: 0 for cpu mode
manual_seed: 0
# dataset and data loader settings
datasets:
train:
name: REDS
type: REDSRecurrentDataset
dataroot_gt: datasets/REDS/train_sharp
dataroot_lq: datasets/REDS/train_sharp_bicubic/X4
meta_info_file: basicsr/data/meta_info/meta_info_REDS_GT.txt
val_partition: REDS4 # set to 'official' when use the official validation partition
test_mode: False
io_backend:
type: disk
num_frame: 15
gt_size: 256
interval_list: [1]
random_reverse: false
use_hflip: true
use_rot: true
# data loader
num_worker_per_gpu: 6
batch_size_per_gpu: 4
dataset_enlarge_ratio: 200
prefetch_mode: ~
val:
name: REDS4
type: VideoRecurrentTestDataset
dataroot_gt: datasets/REDS4/GT
dataroot_lq: datasets/REDS4/sharp_bicubic
cache_data: True
io_backend:
type: disk
num_frame: -1 # not needed
# network structures
network_g:
type: IconVSR
num_feat: 64
num_block: 30
keyframe_stride: 5
temporal_padding: 2
spynet_path: experiments/pretrained_models/flownet/spynet_sintel_final-3d2a1287.pth
edvr_path: experiments/pretrained_models/edvr_reds_pretrained_new.pth
# path
path:
pretrain_network_g: ~
strict_load_g: true
resume_state: ~
# training settings
train:
ema_decay: 0.999
optim_g:
type: Adam
lr: !!float 2e-4
weight_decay: 0
betas: [0.9, 0.99]
scheduler:
type: CosineAnnealingRestartLR
periods: [300000]
restart_weights: [1]
eta_min: !!float 1e-7
total_iter: 300000
warmup_iter: -1 # no warm up
fix_flow: 5000
flow_lr_mul: 0.125
# losses
pixel_opt:
type: CharbonnierLoss
loss_weight: 1.0
reduction: mean
# validation settings
val:
val_freq: !!float 5e3
save_img: false
metrics:
psnr: # metric name, can be arbitrary
type: calculate_psnr
crop_border: 0
test_y_channel: false
# logging settings
logger:
print_freq: 100
save_checkpoint_freq: !!float 5e3
use_tb_logger: true
wandb:
project: ~
resume_id: ~
# dist training settings
dist_params:
backend: nccl
port: 29500
find_unused_parameters: true
# general settings
name: IconVSR_Vimeo90K_BDx4
model_type: VideoRecurrentModel
scale: 4
num_gpu: 2 # set num_gpu: 0 for cpu mode
manual_seed: 0
# dataset and data loader settings
datasets:
train:
name: Vimeo90K
type: Vimeo90KRecurrentDataset
dataroot_gt: datasets/vimeo90k/vimeo_septuplet/sequences
dataroot_lq: datasets/vimeo90k/BDx4
meta_info_file: basicsr/data/meta_info/meta_info_Vimeo90K_train_GT.txt
io_backend:
type: disk
num_frame: -1
gt_size: 256
interval_list: [1]
random_reverse: false
use_hflip: true
use_rot: true
flip_sequence: true
# data loader
num_worker_per_gpu: 6
batch_size_per_gpu: 4
dataset_enlarge_ratio: 200
prefetch_mode: ~
val:
name: Vid4
type: VideoRecurrentTestDataset
dataroot_gt: datasets/Vid4/GT
dataroot_lq: datasets/Vid4/BDx4
cache_data: True
io_backend:
type: disk
num_frame: -1 # not needed
# network structures
network_g:
type: IconVSR
num_feat: 64
num_block: 30
keyframe_stride: 5
temporal_padding: 3
spynet_path: experiments/pretrained_models/flownet/spynet_sintel_final-3d2a1287.pth
edvr_path: experiments/pretrained_models/edvr_vimeo90k_pretrained.pth
# path
path:
pretrain_network_g: ~
strict_load_g: true
resume_state: ~
# training settings
train:
ema_decay: 0.999
optim_g:
type: Adam
lr: !!float 2e-4
weight_decay: 0
betas: [0.9, 0.99]
scheduler:
type: CosineAnnealingRestartLR
periods: [300000]
restart_weights: [1]
eta_min: !!float 1e-7
total_iter: 300000
warmup_iter: -1 # no warm up
fix_flow: 5000
flow_lr_mul: 0.125
# losses
pixel_opt:
type: CharbonnierLoss
loss_weight: 1.0
reduction: mean
# validation settings
val:
val_freq: !!float 5e3
save_img: false
metrics:
psnr: # metric name, can be arbitrary
type: calculate_psnr
crop_border: 0
test_y_channel: true
# logging settings
logger:
print_freq: 100
save_checkpoint_freq: !!float 5e3
use_tb_logger: true
wandb:
project: ~
resume_id: ~
# dist training settings
dist_params:
backend: nccl
port: 29500
find_unused_parameters: true
# general settings
name: IconVSR_Vimeo90K_BIx4
model_type: VideoRecurrentModel
scale: 4
num_gpu: 2 # set num_gpu: 0 for cpu mode
manual_seed: 0
# dataset and data loader settings
datasets:
train:
name: Vimeo90K
type: Vimeo90KRecurrentDataset
dataroot_gt: datasets/vimeo90k/vimeo_septuplet/sequences
dataroot_lq: datasets/vimeo90k/BIx4
meta_info_file: basicsr/data/meta_info/meta_info_Vimeo90K_train_GT.txt
io_backend:
type: disk
num_frame: -1
gt_size: 256
interval_list: [1]
random_reverse: false
use_hflip: true
use_rot: true
flip_sequence: true
# data loader
num_worker_per_gpu: 6
batch_size_per_gpu: 4
dataset_enlarge_ratio: 200
prefetch_mode: ~
val:
name: Vid4
type: VideoRecurrentTestDataset
dataroot_gt: datasets/Vid4/GT
dataroot_lq: datasets/Vid4/BIx4
cache_data: True
io_backend:
type: disk
num_frame: -1 # not needed
# network structures
network_g:
type: IconVSR
num_feat: 64
num_block: 30
keyframe_stride: 5
temporal_padding: 3
spynet_path: experiments/pretrained_models/flownet/spynet_sintel_final-3d2a1287.pth
edvr_path: experiments/pretrained_models/edvr_vimeo90k_pretrained.pth
# path
path:
pretrain_network_g: ~
strict_load_g: true
resume_state: ~
# training settings
train:
ema_decay: 0.999
optim_g:
type: Adam
lr: !!float 2e-4
weight_decay: 0
betas: [0.9, 0.99]
scheduler:
type: CosineAnnealingRestartLR
periods: [300000]
restart_weights: [1]
eta_min: !!float 1e-7
total_iter: 300000
warmup_iter: -1 # no warm up
fix_flow: 5000
flow_lr_mul: 0.125
# losses
pixel_opt:
type: CharbonnierLoss
loss_weight: 1.0
reduction: mean
# validation settings
val:
val_freq: !!float 5e3
save_img: false
metrics:
psnr: # metric name, can be arbitrary
type: calculate_psnr
crop_border: 0
test_y_channel: true
# logging settings
logger:
print_freq: 100
save_checkpoint_freq: !!float 5e3
use_tb_logger: true
wandb:
project: ~
resume_id: ~
# dist training settings
dist_params:
backend: nccl
port: 29500
find_unused_parameters: true
# general settings
name: train_BasicVSRPP_REDS
model_type: VideoRecurrentModel
scale: 4
num_gpu: 8 # official: 8 GPUs
manual_seed: 0
# dataset and data loader settings
datasets:
train:
name: REDS
type: REDSRecurrentDataset
dataroot_gt: datasets/REDS/train_sharp
dataroot_lq: datasets/REDS/train_sharp_bicubic
meta_info_file: datasets/REDS/meta_info/meta_info_REDS_GT.txt
val_partition: REDS4 # set to 'official' when use the official validation partition
test_mode: False
io_backend:
type: disk
num_frame: 30
gt_size: 256
interval_list: [1]
random_reverse: false
use_hflip: true
use_rot: true
# data loader
num_worker_per_gpu: 6
batch_size_per_gpu: 1
dataset_enlarge_ratio: 1
prefetch_mode: ~
val:
name: REDS4
type: VideoRecurrentTestDataset
dataroot_gt: datasets/REDS4/GT
dataroot_lq: datasets/REDS4/sharp_bicubic
cache_data: true
io_backend:
type: disk
num_frame: -1 # not needed
# network structures
network_g:
type: BasicVSRPlusPlus
mid_channels: 64
num_blocks: 7
is_low_res_input: true
spynet_path: experiments/pretrained_models/flownet/spynet_sintel_final-3d2a1287.pth
# path
path:
pretrain_network_g: ~
strict_load_g: true
resume_state: ~
# training settings
train:
ema_decay: 0.999
optim_g:
type: Adam
lr: !!float 1e-4
weight_decay: 0
betas: [0.9, 0.99]
scheduler:
type: CosineAnnealingRestartLR
periods: [600000]
restart_weights: [1]
eta_min: !!float 1e-7
total_iter: 600000
warmup_iter: -1 # no warm up
fix_flow: 5000
flow_lr_mul: 0.25
# losses
pixel_opt:
type: CharbonnierLoss
loss_weight: 1.0
reduction: mean
# validation settings
val:
val_freq: !!float 5e3
save_img: false
metrics:
psnr: # metric name, can be arbitrary
type: calculate_psnr
crop_border: 0
test_y_channel: false
# logging settings
logger:
print_freq: 100
save_checkpoint_freq: !!float 5e3
use_tb_logger: true
wandb:
project: ~
resume_id: ~
# dist training settings
dist_params:
backend: nccl
port: 29500
find_unused_parameters: true
# general settings
name: 101_train_ECBSR_x2_m4c16_prelu
model_type: SRModel
scale: 2
num_gpu: 1 # set num_gpu: 0 for cpu mode
manual_seed: 0
# dataset and data loader settings
datasets:
train:
name: DIV2K
type: PairedImageDataset
# It is strongly recommended to use lmdb for faster IO speed, especially for small networks
dataroot_gt: datasets/DF2K/DIV2K_train_HR_sub
dataroot_lq: datasets/DF2K/DIV2K_train_LR_bicubic_X2_sub
meta_info_file: basicsr/data/meta_info/meta_info_DIV2K800sub_GT.txt
filename_tmpl: '{}'
io_backend:
type: disk
gt_size: 128
use_hflip: true
use_rot: true
color: y
# data loader
num_worker_per_gpu: 12
batch_size_per_gpu: 32
dataset_enlarge_ratio: 10
prefetch_mode: ~
# we use multiple validation datasets. The SR benchmark datasets can be download from: https://cv.snu.ac.kr/research/EDSR/benchmark.tar
val:
name: Set5
type: PairedImageDataset
dataroot_gt: datasets/benchmark/Set5/HR
dataroot_lq: datasets/benchmark/Set5/LR_bicubic/X2
filename_tmpl: '{}x2'
color: y
io_backend:
type: disk
val_2:
name: Set14
type: PairedImageDataset
dataroot_gt: datasets/benchmark/Set14/HR
dataroot_lq: datasets/benchmark/Set14/LR_bicubic/X2
filename_tmpl: '{}x2'
color: y
io_backend:
type: disk
val_3:
name: B100
type: PairedImageDataset
dataroot_gt: datasets/benchmark/B100/HR
dataroot_lq: datasets/benchmark/B100/LR_bicubic/X2
filename_tmpl: '{}x2'
color: y
io_backend:
type: disk
val_4:
name: Urban100
type: PairedImageDataset
dataroot_gt: datasets/benchmark/Urban100/HR
dataroot_lq: datasets/benchmark/Urban100/LR_bicubic/X2
filename_tmpl: '{}x2'
color: y
io_backend:
type: disk
# network structures
network_g:
type: ECBSR
num_in_ch: 1
num_out_ch: 1
num_block: 4
num_channel: 16
with_idt: False
act_type: prelu
scale: 2
# path
path:
pretrain_network_g: ~
strict_load_g: true
resume_state: ~
# training settings
train:
ema_decay: 0
optim_g:
type: Adam
lr: !!float 5e-4
weight_decay: 0
betas: [0.9, 0.99]
scheduler:
type: MultiStepLR
milestones: [1600000]
gamma: 1
total_iter: 1600000
warmup_iter: -1 # no warm up
# losses
pixel_opt:
type: L1Loss
loss_weight: 1.0
reduction: mean
# validation settings
val:
val_freq: !!float 1600 # the same as the original setting. # TODO: Can be larger
save_img: false
pbar: False
metrics:
psnr:
type: calculate_psnr
crop_border: 2
test_y_channel: true
better: higher # the higher, the better. Default: higher
ssim:
type: calculate_ssim
crop_border: 2
test_y_channel: true
better: higher # the higher, the better. Default: higher
# logging settings
logger:
print_freq: 100
save_checkpoint_freq: !!float 1600
use_tb_logger: true
wandb:
project: ~
resume_id: ~
# dist training settings
dist_params:
backend: nccl
port: 29500
# general settings
name: 100_train_ECBSR_x4_m4c16_prelu
model_type: SRModel
scale: 4
num_gpu: 1 # set num_gpu: 0 for cpu mode
manual_seed: 0
# dataset and data loader settings
datasets:
train:
name: DIV2K
type: PairedImageDataset
# It is strongly recommended to use lmdb for faster IO speed, especially for small networks
dataroot_gt: datasets/DF2K/DIV2K_train_HR_sub.lmdb
dataroot_lq: datasets/DF2K/DIV2K_train_LR_bicubic_X4_sub.lmdb
meta_info_file: basicsr/data/meta_info/meta_info_DIV2K800sub_GT.txt
filename_tmpl: '{}'
io_backend:
type: lmdb
gt_size: 256
use_hflip: true
use_rot: true
color: y
# data loader
num_worker_per_gpu: 12
batch_size_per_gpu: 32
dataset_enlarge_ratio: 10
prefetch_mode: ~
# we use multiple validation datasets. The SR benchmark datasets can be download from: https://cv.snu.ac.kr/research/EDSR/benchmark.tar
val:
name: Set5
type: PairedImageDataset
dataroot_gt: datasets/benchmark/Set5/HR
dataroot_lq: datasets/benchmark/Set5/LR_bicubic/X4
filename_tmpl: '{}x4'
color: y
io_backend:
type: disk
val_2:
name: Set14
type: PairedImageDataset
dataroot_gt: datasets/benchmark/Set14/HR
dataroot_lq: datasets/benchmark/Set14/LR_bicubic/X4
filename_tmpl: '{}x4'
color: y
io_backend:
type: disk
val_3:
name: B100
type: PairedImageDataset
dataroot_gt: datasets/benchmark/B100/HR
dataroot_lq: datasets/benchmark/B100/LR_bicubic/X4
filename_tmpl: '{}x4'
color: y
io_backend:
type: disk
val_4:
name: Urban100
type: PairedImageDataset
dataroot_gt: datasets/benchmark/Urban100/HR
dataroot_lq: datasets/benchmark/Urban100/LR_bicubic/X4
filename_tmpl: '{}x4'
color: y
io_backend:
type: disk
# network structures
network_g:
type: ECBSR
num_in_ch: 1
num_out_ch: 1
num_block: 4
num_channel: 16
with_idt: False
act_type: prelu
scale: 4
# path
path:
pretrain_network_g: ~
strict_load_g: true
resume_state: ~
# training settings
train:
ema_decay: 0
optim_g:
type: Adam
lr: !!float 5e-4
weight_decay: 0
betas: [0.9, 0.99]
scheduler:
type: MultiStepLR
milestones: [1600000]
gamma: 1
total_iter: 1600000
warmup_iter: -1 # no warm up
# losses
pixel_opt:
type: L1Loss
loss_weight: 1.0
reduction: mean
# validation settings
val:
val_freq: !!float 1600 # the same as the original setting. # TODO: Can be larger
save_img: false
pbar: False
metrics:
psnr:
type: calculate_psnr
crop_border: 4
test_y_channel: true
better: higher # the higher, the better. Default: higher
ssim:
type: calculate_ssim
crop_border: 4
test_y_channel: true
better: higher # the higher, the better. Default: higher
# logging settings
logger:
print_freq: 100
save_checkpoint_freq: !!float 1600
use_tb_logger: true
wandb:
project: ~
resume_id: ~
# dist training settings
dist_params:
backend: nccl
port: 29500
# general settings
name: 100_train_ECBSR_x4_m4c16_prelu_RGB
model_type: SRModel
scale: 4
num_gpu: 1 # set num_gpu: 0 for cpu mode
manual_seed: 0
# dataset and data loader settings
datasets:
train:
name: DIV2K
type: PairedImageDataset
# It is strongly recommended to use lmdb for faster IO speed, especially for small networks
dataroot_gt: datasets/DF2K/DIV2K_train_HR_sub.lmdb
dataroot_lq: datasets/DF2K/DIV2K_train_LR_bicubic_X4_sub.lmdb
meta_info_file: basicsr/data/meta_info/meta_info_DIV2K800sub_GT.txt
filename_tmpl: '{}'
io_backend:
type: lmdb
gt_size: 256
use_hflip: true
use_rot: true
# data loader
num_worker_per_gpu: 12
batch_size_per_gpu: 32
dataset_enlarge_ratio: 10
prefetch_mode: ~
# we use multiple validation datasets. The SR benchmark datasets can be download from: https://cv.snu.ac.kr/research/EDSR/benchmark.tar
val:
name: Set5
type: PairedImageDataset
dataroot_gt: datasets/benchmark/Set5/HR
dataroot_lq: datasets/benchmark/Set5/LR_bicubic/X4
filename_tmpl: '{}x4'
io_backend:
type: disk
val_2:
name: Set14
type: PairedImageDataset
dataroot_gt: datasets/benchmark/Set14/HR
dataroot_lq: datasets/benchmark/Set14/LR_bicubic/X4
filename_tmpl: '{}x4'
io_backend:
type: disk
val_3:
name: B100
type: PairedImageDataset
dataroot_gt: datasets/benchmark/B100/HR
dataroot_lq: datasets/benchmark/B100/LR_bicubic/X4
filename_tmpl: '{}x4'
io_backend:
type: disk
val_4:
name: Urban100
type: PairedImageDataset
dataroot_gt: datasets/benchmark/Urban100/HR
dataroot_lq: datasets/benchmark/Urban100/LR_bicubic/X4
filename_tmpl: '{}x4'
io_backend:
type: disk
# network structures
network_g:
type: ECBSR
num_in_ch: 3
num_out_ch: 3
num_block: 4
num_channel: 16
with_idt: False
act_type: prelu
scale: 4
# path
path:
pretrain_network_g: ~
strict_load_g: true
resume_state: ~
# training settings
train:
ema_decay: 0
optim_g:
type: Adam
lr: !!float 5e-4
weight_decay: 0
betas: [0.9, 0.99]
scheduler:
type: MultiStepLR
milestones: [1600000]
gamma: 1
total_iter: 1600000
warmup_iter: -1 # no warm up
# losses
pixel_opt:
type: L1Loss
loss_weight: 1.0
reduction: mean
# validation settings
val:
val_freq: !!float 1600 # the same as the original setting. # TODO: Can be larger
save_img: false
pbar: False
metrics:
psnr:
type: calculate_psnr
crop_border: 4
test_y_channel: true
better: higher # the higher, the better. Default: higher
ssim:
type: calculate_ssim
crop_border: 4
test_y_channel: true
better: higher # the higher, the better. Default: higher
# logging settings
logger:
print_freq: 100
save_checkpoint_freq: !!float 1600
use_tb_logger: true
wandb:
project: ~
resume_id: ~
# dist training settings
dist_params:
backend: nccl
port: 29500
# general settings
name: 204_EDSR_Lx2_f256b32_DIV2K_300k_B16G1_wandb
model_type: SRModel
scale: 2
num_gpu: 1 # set num_gpu: 0 for cpu mode
manual_seed: 10
# dataset and data loader settings
datasets:
train:
name: DIV2K
type: PairedImageDataset
dataroot_gt: datasets/DIV2K/DIV2K_train_HR_sub
dataroot_lq: datasets/DIV2K/DIV2K_train_LR_bicubic/X2_sub
# (for lmdb)
# dataroot_gt: datasets/DIV2K/DIV2K_train_HR_sub.lmdb
# dataroot_lq: datasets/DIV2K/DIV2K_train_LR_bicubic_X2_sub.lmdb
filename_tmpl: '{}'
io_backend:
type: disk
# (for lmdb)
# type: lmdb
gt_size: 96
use_hflip: true
use_rot: true
# data loader
num_worker_per_gpu: 6
batch_size_per_gpu: 16
dataset_enlarge_ratio: 100
prefetch_mode: ~
val:
name: Set5
type: PairedImageDataset
dataroot_gt: datasets/Set5/GTmod12
dataroot_lq: datasets/Set5/LRbicx2
io_backend:
type: disk
# network structures
network_g:
type: EDSR
num_in_ch: 3
num_out_ch: 3
num_feat: 256
num_block: 32
upscale: 2
res_scale: 0.1
img_range: 255.
rgb_mean: [0.4488, 0.4371, 0.4040]
# path
path:
pretrain_network_g: ~
strict_load_g: true
resume_state: ~
# training settings
train:
ema_decay: 0.999
optim_g:
type: Adam
lr: !!float 1e-4
weight_decay: 0
betas: [0.9, 0.99]
scheduler:
type: MultiStepLR
milestones: [200000]
gamma: 0.5
total_iter: 300000
warmup_iter: -1 # no warm up
# losses
pixel_opt:
type: L1Loss
loss_weight: 1.0
reduction: mean
# validation settings
val:
val_freq: !!float 5e3
save_img: false
metrics:
psnr: # metric name, can be arbitrary
type: calculate_psnr
crop_border: 2
test_y_channel: false
# logging settings
logger:
print_freq: 100
save_checkpoint_freq: !!float 5e3
use_tb_logger: true
wandb:
project: ~
resume_id: ~
# dist training settings
dist_params:
backend: nccl
port: 29500
# general settings
name: 205_EDSR_Lx3_f256b32_DIV2K_300k_B16G1_204pretrain_wandb
model_type: SRModel
scale: 3
num_gpu: 1 # set num_gpu: 0 for cpu mode
manual_seed: 10
# dataset and data loader settings
datasets:
train:
name: DIV2K
type: PairedImageDataset
dataroot_gt: datasets/DIV2K/DIV2K_train_HR_sub
dataroot_lq: datasets/DIV2K/DIV2K_train_LR_bicubic/X3_sub
# (for lmdb)
# dataroot_gt: datasets/DIV2K/DIV2K_train_HR_sub.lmdb
# dataroot_lq: datasets/DIV2K/DIV2K_train_LR_bicubic_X3_sub.lmdb
filename_tmpl: '{}'
io_backend:
type: disk
# (for lmdb)
# type: lmdb
gt_size: 144
use_hflip: true
use_rot: true
# data loader
num_worker_per_gpu: 6
batch_size_per_gpu: 16
dataset_enlarge_ratio: 100
prefetch_mode: ~
val:
name: Set5
type: PairedImageDataset
dataroot_gt: datasets/Set5/GTmod12
dataroot_lq: datasets/Set5/LRbicx3
io_backend:
type: disk
# network structures
network_g:
type: EDSR
num_in_ch: 3
num_out_ch: 3
num_feat: 256
num_block: 32
upscale: 3
res_scale: 0.1
img_range: 255.
rgb_mean: [0.4488, 0.4371, 0.4040]
# path
path:
pretrain_network_g: experiments/204_EDSR_Lx2_f256b32_DIV2K_300k_B16G1_wandb/models/net_g_300000.pth
strict_load_g: false
resume_state: ~
# training settings
train:
ema_decay: 0.999
optim_g:
type: Adam
lr: !!float 1e-4
weight_decay: 0
betas: [0.9, 0.99]
scheduler:
type: MultiStepLR
milestones: [200000]
gamma: 0.5
total_iter: 300000
warmup_iter: -1 # no warm up
# losses
pixel_opt:
type: L1Loss
loss_weight: 1.0
reduction: mean
# validation settings
val:
val_freq: !!float 5e3
save_img: false
metrics:
psnr: # metric name, can be arbitrary
type: calculate_psnr
crop_border: 3
test_y_channel: false
# logging settings
logger:
print_freq: 100
save_checkpoint_freq: !!float 5e3
use_tb_logger: true
wandb:
project: ~
resume_id: ~
# dist training settings
dist_params:
backend: nccl
port: 29500
# general settings
name: 206_EDSR_Lx4_f256b32_DIV2K_300k_B16G1_204pretrain_wandb
model_type: SRModel
scale: 4
num_gpu: 1 # set num_gpu: 0 for cpu mode
manual_seed: 10
# dataset and data loader settings
datasets:
train:
name: DIV2K
type: PairedImageDataset
dataroot_gt: datasets/DIV2K/DIV2K_train_HR_sub
dataroot_lq: datasets/DIV2K/DIV2K_train_LR_bicubic/X4_sub
# (for lmdb)
# dataroot_gt: datasets/DIV2K/DIV2K_train_HR_sub.lmdb
# dataroot_lq: datasets/DIV2K/DIV2K_train_LR_bicubic_X4_sub.lmdb
filename_tmpl: '{}'
io_backend:
type: disk
# (for lmdb)
# type: lmdb
gt_size: 192
use_hflip: true
use_rot: true
# data loader
num_worker_per_gpu: 6
batch_size_per_gpu: 16
dataset_enlarge_ratio: 100
prefetch_mode: ~
val:
name: Set5
type: PairedImageDataset
dataroot_gt: datasets/Set5/GTmod12
dataroot_lq: datasets/Set5/LRbicx4
io_backend:
type: disk
# network structures
network_g:
type: EDSR
num_in_ch: 3
num_out_ch: 3
num_feat: 256
num_block: 32
upscale: 4
res_scale: 0.1
img_range: 255.
rgb_mean: [0.4488, 0.4371, 0.4040]
# path
path:
pretrain_network_g: experiments/204_EDSR_Lx2_f256b32_DIV2K_300k_B16G1_wandb/models/net_g_300000.pth
strict_load_g: false
resume_state: ~
# training settings
train:
ema_decay: 0.999
optim_g:
type: Adam
lr: !!float 1e-4
weight_decay: 0
betas: [0.9, 0.99]
scheduler:
type: MultiStepLR
milestones: [200000]
gamma: 0.5
total_iter: 300000
warmup_iter: -1 # no warm up
# losses
pixel_opt:
type: L1Loss
loss_weight: 1.0
reduction: mean
# validation settings
val:
val_freq: !!float 5e3
save_img: false
metrics:
psnr: # metric name, can be arbitrary
type: calculate_psnr
crop_border: 4
test_y_channel: false
# logging settings
logger:
print_freq: 100
save_checkpoint_freq: !!float 5e3
use_tb_logger: true
wandb:
project: ~
resume_id: ~
# dist training settings
dist_params:
backend: nccl
port: 29500
# general settings
name: 201_EDSR_Mx2_f64b16_DIV2K_300k_B16G1_wandb
model_type: SRModel
scale: 2
num_gpu: 1 # set num_gpu: 0 for cpu mode
manual_seed: 10
# dataset and data loader settings
datasets:
train:
name: DIV2K
type: PairedImageDataset
dataroot_gt: datasets/DIV2K/DIV2K_train_HR_sub
dataroot_lq: datasets/DIV2K/DIV2K_train_LR_bicubic/X2_sub
# (for lmdb)
# dataroot_gt: datasets/DIV2K/DIV2K_train_HR_sub.lmdb
# dataroot_lq: datasets/DIV2K/DIV2K_train_LR_bicubic_X2_sub.lmdb
filename_tmpl: '{}'
io_backend:
type: disk
# (for lmdb)
# type: lmdb
gt_size: 96
use_hflip: true
use_rot: true
# data loader
num_worker_per_gpu: 6
batch_size_per_gpu: 16
dataset_enlarge_ratio: 100
prefetch_mode: ~
val:
name: Set5
type: PairedImageDataset
dataroot_gt: datasets/Set5/GTmod12
dataroot_lq: datasets/Set5/LRbicx2
io_backend:
type: disk
# network structures
network_g:
type: EDSR
num_in_ch: 3
num_out_ch: 3
num_feat: 64
num_block: 16
upscale: 2
res_scale: 1
img_range: 255.
rgb_mean: [0.4488, 0.4371, 0.4040]
# path
path:
pretrain_network_g: ~
strict_load_g: true
resume_state: ~
# training settings
train:
ema_decay: 0.999
optim_g:
type: Adam
lr: !!float 1e-4
weight_decay: 0
betas: [0.9, 0.99]
scheduler:
type: MultiStepLR
milestones: [200000]
gamma: 0.5
total_iter: 300000
warmup_iter: -1 # no warm up
# losses
pixel_opt:
type: L1Loss
loss_weight: 1.0
reduction: mean
# validation settings
val:
val_freq: !!float 5e3
save_img: false
metrics:
psnr: # metric name, can be arbitrary
type: calculate_psnr
crop_border: 2
test_y_channel: false
# logging settings
logger:
print_freq: 100
save_checkpoint_freq: !!float 5e3
use_tb_logger: true
wandb:
project: ~
resume_id: ~
# dist training settings
dist_params:
backend: nccl
port: 29500
# general settings
name: 202_EDSR_Mx3_f64b16_DIV2K_300k_B16G1_201pretrain_wandb
model_type: SRModel
scale: 3
num_gpu: 1 # set num_gpu: 0 for cpu mode
manual_seed: 10
# dataset and data loader settings
datasets:
train:
name: DIV2K
type: PairedImageDataset
dataroot_gt: datasets/DIV2K/DIV2K_train_HR_sub
dataroot_lq: datasets/DIV2K/DIV2K_train_LR_bicubic/X3_sub
# (for lmdb)
# dataroot_gt: datasets/DIV2K/DIV2K_train_HR_sub.lmdb
# dataroot_lq: datasets/DIV2K/DIV2K_train_LR_bicubic_X3_sub.lmdb
filename_tmpl: '{}'
io_backend:
type: disk
# (for lmdb)
# type: lmdb
gt_size: 144
use_hflip: true
use_rot: true
# data loader
num_worker_per_gpu: 6
batch_size_per_gpu: 16
dataset_enlarge_ratio: 100
prefetch_mode: ~
val:
name: Set5
type: PairedImageDataset
dataroot_gt: datasets/Set5/GTmod12
dataroot_lq: datasets/Set5/LRbicx3
io_backend:
type: disk
# network structures
network_g:
type: EDSR
num_in_ch: 3
num_out_ch: 3
num_feat: 64
num_block: 16
upscale: 3
res_scale: 1
img_range: 255.
rgb_mean: [0.4488, 0.4371, 0.4040]
# path
path:
pretrain_network_g: experiments/201_EDSR_Mx2_f64b16_DIV2K_300k_B16G1_wandb/models/net_g_300000.pth
strict_load_g: false
resume_state: ~
# training settings
train:
ema_decay: 0.999
optim_g:
type: Adam
lr: !!float 1e-4
weight_decay: 0
betas: [0.9, 0.99]
scheduler:
type: MultiStepLR
milestones: [200000]
gamma: 0.5
total_iter: 300000
warmup_iter: -1 # no warm up
# losses
pixel_opt:
type: L1Loss
loss_weight: 1.0
reduction: mean
# validation settings
val:
val_freq: !!float 5e3
save_img: false
metrics:
psnr: # metric name, can be arbitrary
type: calculate_psnr
crop_border: 3
test_y_channel: false
# logging settings
logger:
print_freq: 100
save_checkpoint_freq: !!float 5e3
use_tb_logger: true
wandb:
project: ~
resume_id: ~
# dist training settings
dist_params:
backend: nccl
port: 29500
# general settings
name: 203_EDSR_Mx4_f64b16_DIV2K_300k_B16G1_201pretrain_wandb
model_type: SRModel
scale: 4
num_gpu: 1 # set num_gpu: 0 for cpu mode
manual_seed: 10
# dataset and data loader settings
datasets:
train:
name: DIV2K
type: PairedImageDataset
dataroot_gt: datasets/DIV2K/DIV2K_train_HR_sub
dataroot_lq: datasets/DIV2K/DIV2K_train_LR_bicubic/X4_sub
# (for lmdb)
# dataroot_gt: datasets/DIV2K/DIV2K_train_HR_sub.lmdb
# dataroot_lq: datasets/DIV2K/DIV2K_train_LR_bicubic_X4_sub.lmdb
filename_tmpl: '{}'
io_backend:
type: disk
# (for lmdb)
# type: lmdb
gt_size: 192
use_hflip: true
use_rot: true
# data loader
num_worker_per_gpu: 6
batch_size_per_gpu: 16
dataset_enlarge_ratio: 100
prefetch_mode: ~
val:
name: Set5
type: PairedImageDataset
dataroot_gt: datasets/Set5/GTmod12
dataroot_lq: datasets/Set5/LRbicx4
io_backend:
type: disk
# network structures
network_g:
type: EDSR
num_in_ch: 3
num_out_ch: 3
num_feat: 64
num_block: 16
upscale: 4
res_scale: 1
img_range: 255.
rgb_mean: [0.4488, 0.4371, 0.4040]
# path
path:
pretrain_network_g: experiments/201_EDSR_Mx2_f64b16_DIV2K_300k_B16G1_wandb/models/net_g_300000.pth
strict_load_g: false
resume_state: ~
# training settings
train:
ema_decay: 0.999
optim_g:
type: Adam
lr: !!float 1e-4
weight_decay: 0
betas: [0.9, 0.99]
scheduler:
type: MultiStepLR
milestones: [200000]
gamma: 0.5
total_iter: 300000
warmup_iter: -1 # no warm up
# losses
pixel_opt:
type: L1Loss
loss_weight: 1.0
reduction: mean
# validation settings
val:
val_freq: !!float 5e3
save_img: false
metrics:
psnr: # metric name, can be arbitrary
type: calculate_psnr
crop_border: 4
test_y_channel: false
# logging settings
logger:
print_freq: 100
save_checkpoint_freq: !!float 5e3
use_tb_logger: true
wandb:
project: ~
resume_id: ~
# dist training settings
dist_params:
backend: nccl
port: 29500
# TODO
# general settings
name: 107_EDVRwoTSA_CBInit_lr1e-4_400k_REDS_SyncBN
model_type: EDVRModel
scale: 4
num_gpu: 8 # set num_gpu: 0 for cpu mode
manual_seed: 10
# dataset and data loader settings
datasets:
train:
name: REDS
type: REDSDataset
dataroot_gt: datasets/REDS/train_sharp
dataroot_lq: datasets/REDS/train_sharp_bicubic
dataroot_flow: ~
meta_info_file: basicsr/data/meta_info/meta_info_REDS_GT.txt
val_partition: REDS4 # set to 'official' when use the official validation partition
io_backend:
type: disk
num_frame: 5
gt_size: 256
interval_list: [1]
random_reverse: false
use_hflip: true
use_rot: true
# data loader
num_worker_per_gpu: 3
batch_size_per_gpu: 4
dataset_enlarge_ratio: 200
prefetch_mode: ~
val:
name: REDS4
type: VideoTestDataset
dataroot_gt: datasets/REDS/train_sharp
dataroot_lq: datasets/REDS/train_sharp_bicubic
meta_info_file: basicsr/data/meta_info/meta_info_REDS4_test_GT.txt
# change to 'meta_info_REDSofficial4_test_GT' when use the official validation partition
io_backend:
type: disk
cache_data: false
num_frame: 5
padding: reflection_circle
# network structures
network_g:
type: EDVR
num_in_ch: 3
num_out_ch: 3
num_feat: 128
num_frame: 5
deformable_groups: 8
num_extract_block: 5
num_reconstruct_block: 40
center_frame_idx: ~
hr_in: false
with_predeblur: false
with_tsa: false
network_d:
type: VGGStyleDiscriminator
num_in_ch: 3
num_feat: 64
input_size: 256
# path
path:
pretrain_network_g: experiments/101_EDVR_M_x4_SR_REDS_woTSA_600k_B4G8_valREDS4_wandb/models/net_g_600000.pth
strict_load_g: true
resume_state: ~
# training settings
train:
ema_decay: 0.999
optim_g:
type: Adam
lr: !!float 1e-4
weight_decay: 0
betas: [0.9, 0.99]
optim_d:
type: Adam
lr: !!float 1e-4
weight_decay: 0
betas: [0.9, 0.99]
scheduler:
type: MultiStepLR
milestones: [50000, 100000, 200000, 300000]
gamma: 0.5
total_iter: 400000
warmup_iter: -1 # no warm up
# losses
pixel_opt:
type: CharbonnierLoss
loss_weight: !!float 1e-2
reduction: sum
perceptual_opt:
type: PerceptualLoss
layer_weights:
'conv5_4': 1 # before relu
vgg_type: vgg19
use_input_norm: true
range_norm: false
perceptual_weight: 1.0
style_weight: 0
criterion: l1
gan_opt:
type: GANLoss
gan_type: vanilla
real_label_val: 1.0
fake_label_val: 0.0
loss_weight: !!float 5e-3
net_d_iters: 1
net_d_init_iters: 0
# validation settings
val:
val_freq: !!float 5e3
save_img: false
metrics:
psnr: # metric name, can be arbitrary
type: calculate_psnr
crop_border: 0
test_y_channel: false
# logging settings
logger:
print_freq: 100
save_checkpoint_freq: !!float 5e3
use_tb_logger: true
wandb:
project: ~
resume_id: ~
# dist training settings
dist_params:
backend: nccl
port: 29500
# general settings
name: 104_EDVR_L_x4_SR_REDS_600k_B4G8_valREDS4_103pretrain_wandb
model_type: EDVRModel
scale: 4
num_gpu: 8 # set num_gpu: 0 for cpu mode
manual_seed: 10
# dataset and data loader settings
datasets:
train:
name: REDS
type: REDSDataset
dataroot_gt: datasets/REDS/train_sharp
dataroot_lq: datasets/REDS/train_sharp_bicubic
dataroot_flow: ~
meta_info_file: basicsr/data/meta_info/meta_info_REDS_GT.txt
val_partition: REDS4 # set to 'official' when use the official validation partition
io_backend:
type: disk
num_frame: 5
gt_size: 256
interval_list: [1]
random_reverse: false
use_hflip: true
use_rot: true
# data loader
num_worker_per_gpu: 3
batch_size_per_gpu: 4
dataset_enlarge_ratio: 200
prefetch_mode: ~
val:
name: REDS4
type: VideoTestDataset
dataroot_gt: datasets/REDS/train_sharp
dataroot_lq: datasets/REDS/train_sharp_bicubic
meta_info_file: basicsr/data/meta_info/meta_info_REDS4_test_GT.txt
# change to 'meta_info_REDSofficial4_test_GT' when use the official validation partition
io_backend:
type: disk
cache_data: false
num_frame: 5
padding: reflection_circle
# network structures
network_g:
ema_decay: 0.999
type: EDVR
num_in_ch: 3
num_out_ch: 3
num_feat: 128
num_frame: 5
deformable_groups: 8
num_extract_block: 5
num_reconstruct_block: 40
center_frame_idx: ~
hr_in: false
with_predeblur: false
with_tsa: true
# path
path:
pretrain_network_g: experiments/103_EDVR_L_x4_SR_REDS_woTSA_600k_B4G8_valREDS4_wandb/models/net_g_600000.pth
strict_load_g: false
resume_state: ~
# training settings
train:
optim_g:
type: Adam
lr: !!float 4e-4
weight_decay: 0
betas: [0.9, 0.99]
scheduler:
type: CosineAnnealingRestartLR
periods: [50000, 100000, 150000, 150000, 150000]
restart_weights: [1, 0.5, 0.5, 0.5, 0.5]
eta_min: !!float 1e-7
total_iter: 600000
warmup_iter: -1 # no warm up
tsa_iter: 50000
dcn_lr_mul: 1
# losses
pixel_opt:
type: CharbonnierLoss
loss_weight: 1.0
reduction: sum
# validation settings
val:
val_freq: !!float 5e3
save_img: false
metrics:
psnr: # metric name, can be arbitrary
type: calculate_psnr
crop_border: 0
test_y_channel: false
# logging settings
logger:
print_freq: 100
save_checkpoint_freq: !!float 5e3
use_tb_logger: true
wandb:
project: ~
resume_id: ~
# dist training settings
dist_params:
backend: nccl
port: 29500
find_unused_parameters: true
# general settings
name: 103_EDVR_L_x4_SR_REDS_woTSA_600k_B4G8_valREDS4_wandb
model_type: EDVRModel
scale: 4
num_gpu: 8 # set num_gpu: 0 for cpu mode
manual_seed: 10
# dataset and data loader settings
datasets:
train:
name: REDS
type: REDSDataset
dataroot_gt: datasets/REDS/train_sharp
dataroot_lq: datasets/REDS/train_sharp_bicubic
dataroot_flow: ~
meta_info_file: basicsr/data/meta_info/meta_info_REDS_GT.txt
val_partition: REDS4 # set to 'official' when use the official validation partition
io_backend:
type: disk
num_frame: 5
gt_size: 256
interval_list: [1]
random_reverse: false
use_hflip: true
use_rot: true
# data loader
num_worker_per_gpu: 3
batch_size_per_gpu: 4
dataset_enlarge_ratio: 200
prefetch_mode: ~
val:
name: REDS4
type: VideoTestDataset
dataroot_gt: datasets/REDS/train_sharp
dataroot_lq: datasets/REDS/train_sharp_bicubic
meta_info_file: basicsr/data/meta_info/meta_info_REDS4_test_GT.txt
# change to 'meta_info_REDSofficial4_test_GT' when use the official validation partition
io_backend:
type: disk
cache_data: false
num_frame: 5
padding: reflection_circle
# network structures
network_g:
ema_decay: 0.999
type: EDVR
num_in_ch: 3
num_out_ch: 3
num_feat: 128
num_frame: 5
deformable_groups: 8
num_extract_block: 5
num_reconstruct_block: 40
center_frame_idx: ~
hr_in: false
with_predeblur: false
with_tsa: false
# path
path:
pretrain_network_g: ~
strict_load_g: true
resume_state: ~
# training settings
train:
optim_g:
type: Adam
lr: !!float 4e-4
weight_decay: 0
betas: [0.9, 0.99]
scheduler:
type: CosineAnnealingRestartLR
periods: [150000, 150000, 150000, 150000]
restart_weights: [1, 0.5, 0.5, 0.5]
eta_min: !!float 1e-7
total_iter: 600000
warmup_iter: -1 # no warm up
dcn_lr_mul: 1
# losses
pixel_opt:
type: CharbonnierLoss
loss_weight: 1.0
reduction: sum
# validation settings
val:
val_freq: !!float 5e3
save_img: false
metrics:
psnr: # metric name, can be arbitrary
type: calculate_psnr
crop_border: 0
test_y_channel: false
# logging settings
logger:
print_freq: 100
save_checkpoint_freq: !!float 5e3
use_tb_logger: true
wandb:
project: ~
resume_id: ~
# dist training settings
dist_params:
backend: nccl
port: 29500
# general settings
name: 102_EDVR_M_x4_SR_REDS_600k_B4G8_valREDS4_101pretrain_wandb
model_type: EDVRModel
scale: 4
num_gpu: 8 # set num_gpu: 0 for cpu mode
manual_seed: 10
# dataset and data loader settings
datasets:
train:
name: REDS
type: REDSDataset
dataroot_gt: datasets/REDS/train_sharp
dataroot_lq: datasets/REDS/train_sharp_bicubic
dataroot_flow: ~
meta_info_file: basicsr/data/meta_info/meta_info_REDS_GT.txt
val_partition: REDS4 # set to 'official' when use the official validation partition
io_backend:
type: disk
num_frame: 5
gt_size: 256
interval_list: [1]
random_reverse: false
use_hflip: true
use_rot: true
# data loader
num_worker_per_gpu: 3
batch_size_per_gpu: 4
dataset_enlarge_ratio: 200
prefetch_mode: ~
val:
name: REDS4
type: VideoTestDataset
dataroot_gt: datasets/REDS/train_sharp
dataroot_lq: datasets/REDS/train_sharp_bicubic
meta_info_file: basicsr/data/meta_info/meta_info_REDS4_test_GT.txt
# change to 'meta_info_REDSofficial4_test_GT' when use the official validation partition
io_backend:
type: disk
cache_data: false
num_frame: 5
padding: reflection_circle
# network structures
network_g:
type: EDVR
num_in_ch: 3
num_out_ch: 3
num_feat: 64
num_frame: 5
deformable_groups: 8
num_extract_block: 5
num_reconstruct_block: 10
center_frame_idx: ~
hr_in: false
with_predeblur: false
with_tsa: true
# path
path:
pretrain_network_g: experiments/101_EDVR_M_x4_SR_REDS_woTSA_600k_B4G8_valREDS4_wandb/models/net_g_600000.pth
strict_load_g: false
resume_state: ~
# training settings
train:
ema_decay: 0.999
optim_g:
type: Adam
lr: !!float 4e-4
weight_decay: 0
betas: [0.9, 0.99]
scheduler:
type: CosineAnnealingRestartLR
periods: [50000, 100000, 150000, 150000, 150000]
restart_weights: [1, 1, 1, 1, 1]
eta_min: !!float 1e-7
total_iter: 600000
warmup_iter: -1 # no warm up
tsa_iter: 50000
dcn_lr_mul: 1
# losses
pixel_opt:
type: CharbonnierLoss
loss_weight: 1.0
reduction: sum
# validation settings
val:
val_freq: !!float 5e3
save_img: false
metrics:
psnr: # metric name, can be arbitrary
type: calculate_psnr
crop_border: 0
test_y_channel: false
# logging settings
logger:
print_freq: 100
save_checkpoint_freq: !!float 5e3
use_tb_logger: true
wandb:
project: ~
resume_id: ~
# dist training settings
dist_params:
backend: nccl
port: 29500
find_unused_parameters: true
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment