"git@developer.sourcefind.cn:modelzoo/yolov7_migraphx.git" did not exist on "e93d7587634cc6ef3b92a3884928e4592f53c6b6"
Commit 5efcc6ff authored by mashun1's avatar mashun1
Browse files

metaportrait

parents
Pipeline #584 canceled with stages
from torch import nn
import torch.nn.functional as F
import torch
from torch.nn import BatchNorm2d
def kp2gaussian(kp, spatial_size, kp_variance):
"""
Transform a keypoint into gaussian like representation
"""
mean = kp["value"]
coordinate_grid = make_coordinate_grid(spatial_size, mean.type())
number_of_leading_dimensions = len(mean.shape) - 1
shape = (1,) * number_of_leading_dimensions + coordinate_grid.shape
coordinate_grid = coordinate_grid.view(*shape)
repeats = mean.shape[:number_of_leading_dimensions] + (1, 1, 1)
coordinate_grid = coordinate_grid.repeat(*repeats)
# Preprocess kp shape
shape = mean.shape[:number_of_leading_dimensions] + (1, 1, 2)
mean = mean.view(*shape)
mean_sub = coordinate_grid - mean
out = torch.exp(-0.5 * (mean_sub ** 2).sum(-1) / kp_variance)
return out
def make_coordinate_grid(spatial_size, type):
"""
Create a meshgrid [-1,1] x [-1,1] of given spatial_size.
"""
h, w = spatial_size
x = torch.arange(w).type(type)
y = torch.arange(h).type(type)
x = 2 * (x / (w - 1)) - 1
y = 2 * (y / (h - 1)) - 1
yy = y.view(-1, 1).repeat(1, w)
xx = x.view(1, -1).repeat(h, 1)
meshed = torch.cat([xx.unsqueeze_(2), yy.unsqueeze_(2)], 2)
return meshed
class ResBlock2d(nn.Module):
"""
Res block, preserve spatial resolution.
"""
def __init__(self, in_features, kernel_size, padding):
super(ResBlock2d, self).__init__()
self.conv1 = nn.Conv2d(
in_channels=in_features,
out_channels=in_features,
kernel_size=kernel_size,
padding=padding,
)
self.conv2 = nn.Conv2d(
in_channels=in_features,
out_channels=in_features,
kernel_size=kernel_size,
padding=padding,
)
self.norm1 = BatchNorm2d(in_features, affine=True)
self.norm2 = BatchNorm2d(in_features, affine=True)
def forward(self, x):
out = self.norm1(x)
out = F.relu(out)
out = self.conv1(out)
out = self.norm2(out)
out = F.relu(out)
out = self.conv2(out)
out += x
return out
class UpBlock2d(nn.Module):
"""
Upsampling block for use in decoder.
"""
def __init__(
self,
in_features,
out_features,
kernel_size=3,
padding=1,
groups=1,
Lwarp=False,
AdaINc=0,
use_IN=False
):
super(UpBlock2d, self).__init__()
self.AdaINc = AdaINc
self.conv = nn.Conv2d(
in_channels=in_features,
out_channels=out_features,
kernel_size=kernel_size,
padding=padding,
groups=groups,
)
if AdaINc > 0:
self.norm = ADAIN(out_features, feature_nc=AdaINc)
elif use_IN:
self.norm = nn.InstanceNorm2d(out_features, affine=True)
else:
self.norm = BatchNorm2d(out_features, affine=True)
self.Lwarp = Lwarp
if Lwarp:
self.SameBlock2d = SameBlock2d(
out_features, out_features, groups, kernel_size, padding, AdaINc=AdaINc
)
def forward(self, x, drv_exp=None):
out = F.interpolate(x, scale_factor=2)
out = self.conv(out)
if self.AdaINc > 0:
out = self.norm(out, drv_exp)
else:
out = self.norm(out)
out = F.relu(out)
if self.Lwarp:
out = self.SameBlock2d(out, drv_exp=drv_exp)
return out
class DownBlock2d(nn.Module):
"""
Downsampling block for use in encoder.
"""
def __init__(
self,
in_features,
out_features,
kernel_size=3,
padding=1,
groups=1,
Lwarp=False,
AdaINc=0,
use_IN=False
):
super(DownBlock2d, self).__init__()
self.AdaINc = AdaINc
self.conv = nn.Conv2d(
in_channels=in_features,
out_channels=out_features,
kernel_size=kernel_size,
padding=padding,
groups=groups,
)
if AdaINc > 0:
self.norm = ADAIN(out_features, feature_nc=AdaINc)
elif use_IN:
self.norm = nn.InstanceNorm2d(out_features, affine=True)
else:
self.norm = BatchNorm2d(out_features, affine=True)
self.pool = nn.AvgPool2d(kernel_size=(2, 2))
self.Lwarp = Lwarp
if Lwarp:
self.SameBlock2d = SameBlock2d(
out_features, out_features, groups, kernel_size, padding, AdaINc=AdaINc
)
def forward(self, x, drv_exp=None):
out = self.conv(x)
if self.AdaINc > 0:
out = self.norm(out, drv_exp)
else:
out = self.norm(out)
out = F.relu(out)
out = self.pool(out)
if self.Lwarp:
out = self.SameBlock2d(out, drv_exp=drv_exp)
return out
class SameBlock2d(nn.Module):
"""
Simple block, preserve spatial resolution.
"""
def __init__(
self, in_features, out_features, groups=1, kernel_size=3, padding=1, AdaINc=0, use_IN=False
):
super(SameBlock2d, self).__init__()
self.AdaINc = AdaINc
self.conv = nn.Conv2d(
in_channels=in_features,
out_channels=out_features,
kernel_size=kernel_size,
padding=padding,
groups=groups,
)
if AdaINc > 0:
self.norm = ADAIN(out_features, feature_nc=AdaINc)
elif use_IN:
self.norm = nn.InstanceNorm2d(out_features, affine=True)
else:
self.norm = BatchNorm2d(out_features, affine=True)
def forward(self, x, drv_exp=None):
out = self.conv(x)
if self.AdaINc > 0:
out = self.norm(out, drv_exp)
else:
out = self.norm(out)
out = F.relu(out)
return out
class ADAIN(nn.Module):
def __init__(self, norm_nc, feature_nc):
super().__init__()
self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False)
nhidden = 128
use_bias = True
self.mlp_shared = nn.Sequential(
nn.Linear(feature_nc, nhidden, bias=use_bias), nn.ReLU()
)
self.mlp_gamma = nn.Linear(nhidden, norm_nc, bias=use_bias)
self.mlp_beta = nn.Linear(nhidden, norm_nc, bias=use_bias)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.LayerNorm):
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if m.weight is not None:
nn.init.constant_(m.weight, 1.0)
def forward(self, x, feature):
# Part 1. generate parameter-free normalized activations
normalized = self.param_free_norm(x)
# Part 2. produce scaling and bias conditioned on feature
feature = feature.view(feature.size(0), -1)
actv = self.mlp_shared(feature)
gamma = self.mlp_gamma(actv)
beta = self.mlp_beta(actv)
# apply scale and bias
gamma = gamma.view(*gamma.size()[:2], 1, 1)
beta = beta.view(*beta.size()[:2], 1, 1)
out = normalized * (1 + gamma) + beta
return out
class Encoder(nn.Module):
"""
Hourglass Encoder
"""
def __init__(
self,
block_expansion,
in_features,
num_blocks=3,
max_features=256,
Lwarp=False,
AdaINc=0,
use_IN=False
):
super(Encoder, self).__init__()
down_blocks = []
for i in range(num_blocks):
down_blocks.append(
DownBlock2d(
in_features
if i == 0
else min(max_features, block_expansion * (2 ** i)),
min(max_features, block_expansion * (2 ** (i + 1))),
kernel_size=3,
padding=1,
Lwarp=Lwarp,
AdaINc=AdaINc,
use_IN=use_IN
)
)
self.down_blocks = nn.ModuleList(down_blocks)
def forward(self, x, drv_exp=None):
outs = [x]
for down_block in self.down_blocks:
outs.append(down_block(outs[-1], drv_exp=drv_exp))
return outs
class Decoder(nn.Module):
"""
Hourglass Decoder
"""
def __init__(
self,
block_expansion,
in_features,
num_blocks=3,
dec_lease=0,
max_features=256,
Lwarp=False,
AdaINc=0,
use_IN=False
):
super(Decoder, self).__init__()
up_blocks = []
for i in range(dec_lease, num_blocks)[::-1]:
in_filters = (1 if i == num_blocks - 1 else 2) * min(
max_features, block_expansion * (2 ** (i + 1))
)
out_filters = min(max_features, block_expansion * (2 ** i))
up_blocks.append(
UpBlock2d(
in_filters,
out_filters,
kernel_size=3,
padding=1,
Lwarp=Lwarp,
AdaINc=AdaINc,
use_IN=use_IN
)
)
self.up_blocks = nn.ModuleList(up_blocks)
self.out_filters = (
out_filters + in_features if dec_lease == 0 else out_filters * 2
)
def forward(self, x, drv_exp=None, return_all=False):
out = x.pop()
if return_all:
out_list = [out]
for up_block in self.up_blocks:
out = up_block(out, drv_exp=drv_exp)
if return_all:
out_list.append(out)
skip = x.pop()
out = torch.cat([out, skip], dim=1)
if return_all:
out_list.pop()
out_list.append(out)
return out, out_list
return out
class Hourglass(nn.Module):
"""
Hourglass architecture.
"""
def __init__(
self,
block_expansion,
in_features,
num_blocks=3,
max_features=256,
Lwarp=False,
AdaINc=0,
dec_lease=0,
use_IN=False
):
super(Hourglass, self).__init__()
self.encoder = Encoder(
block_expansion, in_features, num_blocks, max_features, Lwarp, AdaINc, use_IN
)
self.decoder = Decoder(
block_expansion,
in_features,
num_blocks,
dec_lease,
max_features,
Lwarp,
AdaINc,
use_IN
)
self.out_filters = self.decoder.out_filters
def forward(self, x, drv_exp=None, return_all=False):
return self.decoder(self.encoder(x, drv_exp=drv_exp), drv_exp=drv_exp, return_all=return_all)
class LayerNorm2d(nn.Module):
def __init__(self, n_out, affine=True):
super(LayerNorm2d, self).__init__()
self.n_out = n_out
self.affine = affine
if self.affine:
self.weight = nn.Parameter(torch.ones(n_out, 1, 1))
self.bias = nn.Parameter(torch.zeros(n_out, 1, 1))
def forward(self, x):
normalized_shape = x.size()[1:]
if self.affine:
return F.layer_norm(
x,
normalized_shape,
self.weight.expand(normalized_shape),
self.bias.expand(normalized_shape),
)
else:
return F.layer_norm(x, normalized_shape)
class AntiAliasInterpolation2d(nn.Module):
"""
Band-limited downsampling, for better preservation of the input signal.
"""
def __init__(self, channels, scale):
super(AntiAliasInterpolation2d, self).__init__()
sigma = (1 / scale - 1) / 2
kernel_size = 2 * round(sigma * 4) + 1
self.ka = kernel_size // 2
self.kb = self.ka - 1 if kernel_size % 2 == 0 else self.ka
kernel_size = [kernel_size, kernel_size]
sigma = [sigma, sigma]
# The gaussian kernel is the product of the
# gaussian function of each dimension.
kernel = 1
meshgrids = torch.meshgrid(
[torch.arange(size, dtype=torch.float32) for size in kernel_size]
)
for size, std, mgrid in zip(kernel_size, sigma, meshgrids):
mean = (size - 1) / 2
kernel *= torch.exp(-((mgrid - mean) ** 2) / (2 * std ** 2))
# Make sure sum of values in gaussian kernel equals 1.
kernel = kernel / torch.sum(kernel)
# Reshape to depthwise convolutional weight
kernel = kernel.view(1, 1, *kernel.size())
kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))
self.register_buffer("weight", kernel)
self.groups = channels
self.scale = scale
def forward(self, input):
if self.scale == 1.0:
return input
out = F.pad(input, (self.ka, self.kb, self.ka, self.kb))
out = F.conv2d(out, weight=self.weight, groups=self.groups)
out = F.interpolate(out, scale_factor=(self.scale, self.scale))
return out
if __name__ == '__main__':
model = Hourglass(
block_expansion=64,
in_features=3,
max_features=512,
num_blocks=3,
Lwarp=False,
AdaINc=0,
dec_lease=0,
)
print(model)
x = torch.zeros((2, 3, 256, 256))
out, out_list = model(x, return_all=True)
print(out.shape)
for t in out_list:
print(t.shape)
import copy
import warnings
warnings.filterwarnings("ignore")
import os
import time
import torch
import torch.distributed as dist
from torch.nn.utils import remove_spectral_norm
import utils
def inner_optimization(G_full_clone, D_full_clone, inner_optimizer_G, inner_optimizer_D, meta_batch_size, data, use_gan_training=False, ngpus=1):
# forward + backward + optimize
support_losses, support_generated = G_full_clone(data, stage="Full")
support_loss = sum([val.mean() for val in support_losses.values()])
G_full_clone.zero_grad()
support_loss.backward()
for param in G_full_clone.parameters():
if param.grad is not None:
torch.nan_to_num(param.grad, nan=0, posinf=1e3, neginf=-1e3, out=param.grad)
torch.nn.utils.clip_grad_norm_(G_full_clone.parameters(), 0.3)
inner_optimizer_G.step()
losses_G, loss_G = utils.reduce_loss_dict(support_losses, ngpus), utils.reduce_loss(support_loss, ngpus)
losses_D, loss_D = {}, 0
if use_gan_training:
inner_optimizer_D.zero_grad()
losses_D = D_full_clone(data, support_generated)
loss_D = sum([val.mean() for val in losses_D.values()])
loss_D.backward()
for param in D_full_clone.parameters():
if param.grad is not None:
torch.nan_to_num(param.grad, nan=0, posinf=1e5, neginf=-1e5, out=param.grad)
torch.nn.utils.clip_grad_norm_(D_full_clone.parameters(), 0.3)
inner_optimizer_D.step()
losses_D, loss_D = utils.reduce_loss_dict(losses_D, ngpus), utils.reduce_loss(loss_D, ngpus)
return losses_G, loss_G, losses_D, loss_D, support_generated
def train_ddp(args, conf, models, datasets):
G = models["generator"]
D = models["discriminator"]
if args["local_rank"] == 0:
print(G)
print(D)
dataset_train = datasets["dataset_train"]
optim_G, optim_D, scheduler_G, scheduler_D = utils.build_outer_optimizer_and_scheduler(conf, G, D)
if conf["model"].get("warp_ckpt", None):
utils.load_ckpt(
conf["model"]["warp_ckpt"],
{"generator": G},
device=torch.device("cpu"),
strict=True,
warp_ckpt=True,
)
if args["ckpt"] is not None:
start_epoch = utils.load_ckpt(
args["ckpt"],
models,
device=torch.device("cuda", args["device"]),
strict=False,
)
if args["remove_sn"]:
for _, m in G.named_modules():
if hasattr(m, 'weight_u') and hasattr(m, 'weight_v'):
remove_spectral_norm(m)
G_full, D_full = utils.build_full_model(conf, args, G, D)
G_full_clone, D_full_clone = utils.clone_model(conf, args, copy.deepcopy(G), copy.deepcopy(D))
start_epoch = 0
meta_batch_size = conf['dataset'].get('meta_batch_size', 0)
num_support_samples = conf['dataset'].get('num_support_samples', 1)
use_gan_training = conf['train']['loss_weights']['generator_gan'] != 0
if args["fp16"]:
scaler = torch.cuda.amp.GradScaler()
if conf["train"]["tensorboard"] and args["local_rank"] == 0:
event_path = os.path.join(
conf["train"]["event_save_path"], conf["general"]["exp_name"]
)
writer = utils.Visualizer(event_path)
dtime = 0
total_iters = 0
if args["local_rank"] == 0:
print("start to train...")
for epoch in range(start_epoch + 1, conf["train"]["epochs"] + 1):
stime = stime_iter = time.time()
for i, data in enumerate(dataset_train):
total_iters += 1
dtime += time.time() - stime
step = (epoch - 1) * len(dataset_train) + i
loss_G_inner_init, loss_D_inner_init = 0, 0
loss_G_inner_last, loss_D_inner_last = 0, 0
losses_D, loss_D = {}, 0
optim_G.zero_grad()
if args["stage"] != "Warp":
optim_D.zero_grad()
if args["task"] == "Pretrain":
with torch.cuda.amp.autocast():
losses_G, generated = G_full(data, stage=args["stage"])
loss_G = sum([val.mean() for val in losses_G.values()])
scaler.scale(loss_G).backward()
scaler.unscale_(optim_G)
for param in G_full.module.parameters():
if param.grad is not None:
torch.nan_to_num(param.grad, nan=0, posinf=1e5, neginf=-1e5, out=param.grad)
torch.nn.utils.clip_grad_norm_(G_full.parameters(), 0.3)
scaler.step(optim_G)
scaler.update()
if args["stage"] != "Warp":
optim_D.zero_grad()
with torch.cuda.amp.autocast():
losses_D = D_full(data, generated)
loss_D = sum([val.mean() for val in losses_D.values()])
scaler.scale(loss_D).backward()
scaler.unscale_(optim_D)
for param in D_full.module.parameters():
if param.grad is not None:
torch.nan_to_num(param.grad, nan=0, posinf=1e5, neginf=-1e5, out=param.grad)
torch.nn.utils.clip_grad_norm_(D_full.parameters(), 0.3)
scaler.step(optim_D)
scaler.update()
elif args["task"] == "Meta":
# Reptile implementation
support_set, _ = data[0], data[1] # support_set samples of shape (num_samples_per_cls, num_cls, *)
data = support_set
orig_state_dict_G = G_full.module.generator.state_dict()
orig_state_dict_D = D_full.module.discriminator.state_dict()
grad_list_G = [torch.zeros(p.size()).to(args["device"]) for p in G_full.module.generator.parameters()]
grad_list_D = [torch.zeros(p.size()).to(args["device"]) for p in D_full.module.discriminator.parameters()]
for cls_idx in range(conf['dataset'].get('num_classes_per_set', 1)):
G_full_clone.generator.load_state_dict(orig_state_dict_G)
if use_gan_training:
D_full_clone.discriminator.load_state_dict(orig_state_dict_D)
support_set_cls = utils.return_cls_data(support_set, cls_idx) # Return data of one cls of shape (num_samples_per_cls, *)
inner_optimizer_G, inner_optimizer_D = utils.build_inner_optimizer(conf, G_full_clone, D_full_clone)
for inner_step in range(conf['dataset'].get('inner_update_steps', 2)):
# Return batch data of shape (meta_bs, *)
batch_data = utils.convert_data_to_cuda(utils.return_batch_data(
support_set_cls,
(inner_step * meta_batch_size) % num_support_samples,
((inner_step + 1) * meta_batch_size) % num_support_samples if ((inner_step + 1) * meta_batch_size) % num_support_samples != 0 else num_support_samples
))
losses_G, loss_G, losses_D, loss_D, support_generated = inner_optimization(G_full_clone, D_full_clone, inner_optimizer_G, inner_optimizer_D, meta_batch_size, batch_data, use_gan_training, args["ngpus"])
if inner_step == 0:
if cls_idx == 0:
generated = support_generated
loss_G_inner_init += loss_G / conf['dataset'].get('num_classes_per_set', 1)
loss_D_inner_init += loss_D / conf['dataset'].get('num_classes_per_set', 1)
loss_G_inner_last += loss_G / conf['dataset'].get('num_classes_per_set', 1)
loss_D_inner_last += loss_D / conf['dataset'].get('num_classes_per_set', 1)
# Update meta grad
for idx, (orig_param, updated_param) in enumerate(zip(G_full.module.generator.parameters(), G_full_clone.generator.parameters())):
grad_list_G[idx] += (orig_param.data - updated_param.data) / conf['dataset'].get('num_classes_per_set', 1)
if use_gan_training:
for idx, (orig_param, updated_param) in enumerate(zip(D_full.module.discriminator.parameters(), D_full_clone.discriminator.parameters())):
grad_list_D[idx] += (orig_param.data - updated_param.data) / conf['dataset'].get('num_classes_per_set', 1)
# Sync grad
for idx in range(len(grad_list_G)):
dist.all_reduce(grad_list_G[idx])
if use_gan_training:
for idx in range(len(grad_list_D)):
dist.all_reduce(grad_list_D[idx])
# Computer grad for outer optimization
for idx, p in enumerate(G_full.module.generator.parameters()):
if p.grad is None:
p.grad = torch.zeros(p.size()).to(args["device"])
p.grad.data.add_(grad_list_G[idx] / args["ngpus"])
for param in G_full.module.parameters():
if param.grad is not None:
torch.nan_to_num(param.grad, nan=0, posinf=1e3, neginf=-1e3, out=param.grad)
torch.nn.utils.clip_grad_norm_(G_full.parameters(), 0.3)
optim_G.step()
G_full._sync_params_and_buffers()
data = utils.process_vis_data(data)
if use_gan_training:
# Computer grad for outer optimization
for idx, p in enumerate(D_full.module.discriminator.parameters()):
if p.grad is None:
p.grad = torch.zeros(p.size()).to(args["device"])
p.grad.data.add_(grad_list_D[idx] / args["ngpus"])
for param in D_full.module.parameters():
if param.grad is not None:
torch.nan_to_num(param.grad, nan=0, posinf=1e3, neginf=-1e3, out=param.grad)
torch.nn.utils.clip_grad_norm_(D_full.parameters(), 0.3)
optim_D.step()
D_full._sync_params_and_buffers()
else:
raise ValueError("{} task is not defined.".format(args["task"]))
if args["task"] != "Eval" and args["local_rank"] == 0 and i % conf["train"]["print_freq"] == 0:
string = "Epoch {} Iter {} D/Time : {:.3f}/{} ".format(epoch, i, time.time() - stime_iter, time.strftime("%Hh%Mm%Ss", time.gmtime(time.time() - stime)))
for loss_name in losses_G.keys():
if losses_G[loss_name].mean() != 0:
string += loss_name + " : {:.2f};".format(losses_G[loss_name].mean())
if args["stage"] != "Warp":
for loss_name in losses_D.keys():
if losses_D[loss_name].mean() != 0:
string += loss_name + " : {:.2f};".format(losses_D[loss_name].mean())
string += "loss_G_init : {:.2f};loss_D_init : {:.2f}".format(loss_G_inner_init, loss_D_inner_init)
string += "loss_G_last : {:.2f};loss_D_last : {:.2f}".format(loss_G_inner_last, loss_D_inner_last)
print(string)
stime_iter = time.time()
# Save tensorboard event
if args["task"] != "Eval" and args["local_rank"] == 0 and step % conf["train"]["event_save_freq"] == 0:
if conf["train"]["tensorboard"]:
print("Epoch {} Iter {} Step {} event save".format(epoch, i, step))
utils.save_events(writer, losses_G, loss_G_inner_last, step, losses_D, loss_D_inner_last, generated, loss_G_inner_init, loss_D_inner_init)
num = min(6, generated["prediction"].shape[0])
utils.save_training_images(conf, args, writer, generated, data, step, num)
# Save ckpt
if (args["task"] != "Eval" and args["local_rank"] == 0 and total_iters % conf["train"]["ckpt_save_iter_freq"] == 0):
print("save ckpt")
ckpt_out_path = os.path.join(
conf["train"]["ckpt_save_path"], conf["general"]["exp_name"]
)
ckpt_models = {
"generator": G,
"discriminator": D,
"optimizer_generator": optim_G,
"optimizer_discriminator": optim_D,
}
utils.save_ckpt(
out_path=ckpt_out_path,
epoch=epoch,
models=ckpt_models,
total_iters=total_iters,
)
scheduler_G.step()
if args["stage"] != "Warp":
scheduler_D.step()
# Save checkpoint
if args["local_rank"] == 0 and epoch % conf["train"]["ckpt_save_freq"] == 0:
print("save ckpt")
ckpt_out_path = os.path.join(conf["train"]["ckpt_save_path"], conf["general"]["exp_name"])
ckpt_models = {
"generator": G,
"discriminator": D,
"optimizer_generator": optim_G,
"optimizer_discriminator": optim_D,
}
utils.save_ckpt(out_path=ckpt_out_path, epoch=epoch, models=ckpt_models)
return
from .train_utils import *
from .visualizer import *
54 55
87 86
85 84
84 83
34 33
33 32
23 35
31 30
22 36
36 79
79 78
81 62
62 77
20 15
15 16
9 6
6 7
48 49
41 47
52 53
53 39
39 42
42 634
637 68
66 67
75 73
73 72
72 71
64 65
65 69
69 636
60 61
61 43
57 44
630 8
8 10
13 12
11 14
100 0
101 27
628 629
629 633
633 640
26 5
5 28
46 38
38 45
59 58
58 51
3 89
634 635
636 637
74 88
88 639
641 90
90 91
91 92
94 96
94 95
95 642
97 96
99 118
27 102
635 641
641 411
103 104
104 643
638 643
108 107
107 110
112 105
113 114
109 108
115 116
110 25
120 117
2 119
630 632
630 644
119 644
1 122
126 129
123 124
123 127
127 128
128 148
152 153
153 154
155 156
212 197
213 205
205 200
235 236
238 239
239 237
193 192
231 234
182 175
201 181
181 179
179 178
178 177
176 182
206 164
164 162
162 198
198 196
196 208
186 166
228 660
226 227
224 225
219 220
220 202
204 209
209 218
215 214
157 165
203 216
168 169
171 172
172 173
173 661
254 160
255 189
655 656
185 159
195 183
183 194
194 163
654 655
653 659
187 188
653 167
158 242
244 245
247 248
248 249
249 250
250 251
251 252
161 255
258 257
259 258
260 259
266 267
267 199
262 263
264 265
245 268
272 273
261 257
186 261
187 270
271 188
270 276
265 246
276 277
264 277
246 278
278 279
280 282
283 285
283 284
302 303
303 304
304 305
305 306
309 310
635 212
197 54
55 213
200 40
236 82
82 240
240 87
86 238
237 85
83 235
29 191
191 34
32 193
192 23
35 184
184 31
30 190
190 29
21 232
232 22
78 231
234 80
80 233
233 81
77 230
230 76
76 229
229 21
175 20
16 180
180 18
18 174
174 19
19 201
177 17
17 176
24 167
167 9
7 207
207 48
49 206
208 50
50 186
166 24
199 41
47 211
211 52
68 222
222 66
67 223
71 228
223 75
227 63
63 221
221 64
74 224
225 70
70 226
218 60
43 219
202 88
657 1
1 204
44 215
214 56
56 210
210 37
4 157
165 629
639 203
216 57
157 37
4 159
159 3
89 158
242 2
10 168
169 13
12 170
170 11
14 171
102 161
656 628
640 657
654 26
28 185
638 241
241 46
45 217
217 59
51 195
631 658
631 659
653 632
25 187
163 632
634 658
657 636
74 660
639 661
638 661
92 243
243 93
93 244
411 247
252 98
253 99
189 100
0 254
160 101
662 643
188 103
105 256
256 106
642 662
106 662
268 109
40 260
261 111
111 266
99 262
263 113
114 264
269 115
116 274
98 253
103 272
257 110
25 269
112 275
274 118
117 271
273 120
279 97
654 644
275 121
265 121
265 117
165 122
125 282
281 125
281 129
126 285
148 301
284 149
150 302
306 151
151 307
307 152
154 308
308 155
156 309
150 310
369 370
401 400
399 398
398 397
349 348
348 347
338 350
346 345
337 351
351 393
393 392
395 377
377 391
335 330
330 331
324 321
321 322
363 364
356 362
367 368
368 354
354 357
357 634
637 383
381 382
389 388
388 387
387 386
379 380
380 384
384 636
375 376
376 358
372 359
630 323
323 325
328 327
326 329
414 315
415 342
341 320
320 343
361 353
353 360
374 373
373 366
318 402
641 403
403 404
404 405
407 409
407 408
408 642
410 409
413 432
342 416
417 418
418 643
422 421
421 424
426 419
427 428
423 422
429 430
424 340
434 431
317 433
433 644
316 436
440 443
437 438
437 441
441 442
442 462
466 467
467 468
469 470
526 511
527 519
519 514
549 550
552 553
553 551
507 506
545 548
496 489
515 495
495 493
493 492
492 491
490 496
520 478
478 476
476 512
512 510
510 522
500 480
542 660
540 541
538 539
533 534
534 516
518 523
523 532
529 528
471 479
517 530
482 483
485 486
486 487
487 661
568 474
569 503
499 473
509 497
497 508
508 477
501 502
653 481
472 556
558 559
561 562
562 563
563 564
564 565
565 566
475 569
572 571
573 572
574 573
580 581
581 513
576 577
578 579
559 582
586 587
575 571
500 575
501 584
585 502
584 590
579 560
590 591
578 591
560 592
592 593
594 596
597 599
597 598
616 617
617 618
618 619
619 620
623 624
635 526
511 369
370 527
514 355
550 396
396 554
554 401
400 552
551 399
397 549
344 505
505 349
347 507
506 338
350 498
498 346
345 504
504 344
336 546
546 337
392 545
548 394
394 547
547 395
391 544
544 390
390 543
543 336
489 335
331 494
494 333
333 488
488 334
334 515
491 332
332 490
339 481
481 324
322 521
521 363
364 520
522 365
365 500
480 339
513 356
362 525
525 367
383 536
536 381
382 537
386 542
537 389
541 378
378 535
535 379
539 385
385 540
532 375
358 533
657 316
316 518
359 529
528 371
371 524
524 352
319 471
479 629
639 517
530 372
471 352
319 473
473 318
402 472
556 317
325 482
483 328
327 484
484 326
329 485
416 475
654 341
343 499
638 555
555 361
360 531
531 374
366 509
340 501
477 632
405 557
557 406
406 558
411 561
566 412
567 413
503 414
315 568
474 415
502 417
419 570
570 420
420 662
582 423
355 574
575 425
425 580
413 576
577 427
428 578
583 429
430 588
412 567
417 586
571 424
340 583
426 589
588 432
431 585
587 434
593 410
589 435
579 435
579 431
479 436
439 596
595 439
595 443
440 599
462 615
598 463
464 616
620 465
465 621
621 466
468 622
622 469
470 623
464 624
74 538
516 88
import os
import datetime
import random
import numpy as np
import cv2
import torch
import torch.nn as nn
from torch.optim.lr_scheduler import MultiStepLR
from torch.nn.parallel import DistributedDataParallel as DDP
from torch import distributed as dist
from modules.generator import Generator
from modules.model import (
GeneratorFullModel,
DiscriminatorFullModel
)
def adjust_lr(optim, epoch, args):
decay_times = len([x for x in args["lr_milestones"] if x - 1 < epoch])
adj_lr = args["lr"] * ((1 / 10) ** decay_times)
for pg in optim.param_groups:
pg["lr"] = adj_lr
print("learning rate is {}".format(adj_lr))
return adj_lr
def make_weights(train_data_list, weight_lst=None):
weights = []
for i, train_data in enumerate(train_data_list):
count = len(train_data)
for j in range(count):
if weight_lst is not None:
weights.append(1 / count * weight_lst[i])
else:
weights.append(1 / count)
return weights
def frozen_layers(model, key_list=None):
for name, param in model.named_parameters():
if key_list is None:
param.requires_grad = False
print("FIX : {}".format(name))
else:
for k in key_list:
if k in name:
param.requires_grad = False
print("FIX : {}".format(name))
break
return model
def set_random_seed(seed):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
def save_ckpt(out_path, epoch, models, total_iters=None):
ckpt = {k: v.state_dict() for k, v in models.items()}
ckpt["epoch"] = epoch
if out_path.lower().endswith((".pth", ".pth.tar")):
save_path = out_path
else:
if not os.path.isdir(out_path):
os.makedirs(out_path)
time = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
if total_iters is not None:
save_path = os.path.join(out_path, "ckpt_{}_{}.pth.tar".format(epoch, total_iters))
else:
save_path = os.path.join(out_path, "ckpt_{}_{}.pth.tar".format(epoch, time))
torch.save(ckpt, save_path)
return save_path
def load_ckpt(ckpt_path, models, device=None, strict=True, warp_ckpt=False):
ckpt = torch.load(ckpt_path, map_location=device)
epoch = ckpt["epoch"] if "epoch" in ckpt else 0
for key in models:
if key in ckpt:
print("load {} in checkpoint".format(key))
if not (isinstance(models[key], torch.optim.Adam)):
if warp_ckpt:
pretrained_dict = {k: v for k, v in ckpt[key].items() if 'ladder_network' in k or 'dense_motion_network' in k}
model_dict = models[key].state_dict()
model_dict.update(pretrained_dict)
models[key].load_state_dict(model_dict)
else:
msg = models[key].load_state_dict(ckpt[key], strict=strict)
print(msg)
else:
models[key].load_state_dict(ckpt[key])
return epoch
def frame_cropping(frame, bbox=None, expand_ratio=1.0, offset_x=0, offset_y=0):
img_h = frame.shape[0]
img_w = frame.shape[1]
min_img_sz = min(img_w, img_h)
if bbox is None:
bbox = [0, 0, 1, 1]
bbox_w = max(bbox[2] * img_w, bbox[3] * img_h)
center = [
int((bbox[0] + bbox[2] * 0.5 + bbox[2] * offset_x) * img_w),
int((bbox[1] + bbox[3] * 0.5 - bbox[3] * offset_y) * img_h),
]
crop_sz = min(min_img_sz, int(bbox_w * expand_ratio))
half_sz = int(crop_sz * 0.5)
if center[0] + half_sz > img_w:
center[0] = img_w - half_sz
if center[0] - half_sz < 0:
center[0] = half_sz
if center[1] + half_sz > img_h:
center[1] = img_h - half_sz
if center[1] - half_sz < 0:
center[1] = half_sz
frame_crop = frame[center[1] - half_sz : center[1] + half_sz, center[0] - half_sz : center[0] + half_sz]
bbox_crop = [(center[0] - half_sz) / img_w, (center[1] - half_sz) / img_h, crop_sz / img_w, crop_sz / img_h]
return frame_crop, bbox_crop
def ldmk_norm_by_bbox(ldmk, bbox, is_inverse=False):
ldmk_new = np.zeros(ldmk.shape, dtype=ldmk.dtype)
if not is_inverse:
ldmk_new[:, 0] = (ldmk[:, 0] - bbox[0]) / bbox[2]
ldmk_new[:, 1] = (ldmk[:, 1] - bbox[1]) / bbox[3]
else:
ldmk_new[:, 0] = ldmk[:, 0] * bbox[2] + bbox[0]
ldmk_new[:, 1] = ldmk[:, 1] * bbox[3] + bbox[1]
return ldmk_new
def draw_kp(frame, kp, size, color=(255, 255, 255)):
if frame is None:
frame = np.zeros((size[0], size[1], 3), dtype=np.uint8)
else:
frame = cv2.resize(frame, size)
for i in range(kp.shape[0]):
x = int((kp[i][0]) * size[0])
y = int((kp[i][1]) * size[1])
frame = cv2.circle(frame, (x, y), 1, color, -1)
return frame
def nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None): # pylint: disable=redefined-builtin
assert isinstance(input, torch.Tensor)
if posinf is None:
posinf = torch.finfo(input.dtype).max
if neginf is None:
neginf = torch.finfo(input.dtype).min
assert nan == 0
return torch.clamp(input.unsqueeze(0).nansum(0), min=neginf, max=posinf, out=out)
def build_outer_optimizer_and_scheduler(conf, G, D):
base_params_id = list(map(id, G.dense_motion_network.parameters()))
if conf["model"]["generator"].get("ladder", False):
base_params_id += list(map(id, G.ladder_network.parameters()))
warp_params = filter(lambda p: id(p) in base_params_id, G.parameters())
refine_params = filter(lambda p: id(p) not in base_params_id, G.parameters())
optim_G = torch.optim.Adam(
[
{"params": refine_params},
{
"params": warp_params,
"lr": conf["train"]["lr_generator"] * conf["train"].get("warplr_tune", 1.0),
},
],
lr=conf["train"]["lr_generator"],
betas=(conf["train"].get("outer_beta_1", 0.5), conf["train"].get("outer_beta_2", 0.999)),
)
optim_D = torch.optim.Adam(
D.parameters(), lr=conf["train"]["lr_discriminator"], betas=(conf["train"].get("outer_beta_1", 0.5), conf["train"].get("outer_beta_2", 0.999))
)
scheduler_G = MultiStepLR(
optim_G,
conf["train"]["epoch_milestones"],
gamma=0.1,
last_epoch=-1,
)
scheduler_D = MultiStepLR(
optim_D,
conf["train"]["epoch_milestones"],
gamma=0.1,
last_epoch=-1,
)
return optim_G, optim_D, scheduler_G, scheduler_D
def build_inner_optimizer(conf, G_full_clone, D_full_clone):
base_params_id = list(map(id, G_full_clone.generator.dense_motion_network.parameters()))
if conf["model"]["generator"].get("ladder", False):
base_params_id += list(map(id, G_full_clone.generator.ladder_network.parameters()))
warp_params = filter(lambda p: id(p) in base_params_id, G_full_clone.generator.parameters())
refine_params = filter(lambda p: id(p) not in base_params_id, G_full_clone.generator.parameters())
inner_optimizer_G = torch.optim.Adam(
[
{"params": refine_params},
{
"params": warp_params,
"lr": conf["train"]["inner_lr_generator"] * conf["train"].get("inner_warplr_tune", 1.0),
},
],
lr=conf["train"]["inner_lr_generator"],
betas=(conf["train"].get("inner_beta_1", 0.5), conf["train"].get("inner_beta_2", 0.999)),
)
inner_optimizer_D = torch.optim.Adam(
D_full_clone.discriminator.parameters(), lr=conf["train"]["inner_lr_discriminator"], betas=(conf["train"].get("inner_beta_1", 0.5), conf["train"].get("inner_beta_2", 0.999))
)
return inner_optimizer_G, inner_optimizer_D
def build_full_model(conf, args, G, D):
G_full = GeneratorFullModel(
None,
G,
D,
conf["train"],
conf["model"].get("arch", None),
rank=args["local_rank"],
conf=conf,
)
if conf["model"]["discriminator"].get("type", "MultiPatchGan") == "MultiPatchGan":
D_full = DiscriminatorFullModel(None, G, D, conf["train"])
else:
raise Exception("Unsupported discriminator type: {}".format(conf["model"]["discriminator"].get("type", "MultiPatchGan")))
# w/ sync-batchnorm at modules/util.py
G_full.generator = torch.nn.SyncBatchNorm.convert_sync_batchnorm(G_full.generator)
G_full = DDP(G_full, device_ids=[args["local_rank"]], find_unused_parameters=True)
D_full = DDP(
D_full,
device_ids=[args["local_rank"]],
find_unused_parameters=True,
broadcast_buffers=False,
)
return G_full, D_full
def clone_model(conf, args, G, D):
G_full_clone = GeneratorFullModel(
None,
G,
D,
conf["train"],
conf["model"].get("arch", None),
rank=args["local_rank"],
conf=conf,
)
G_full_clone.cuda()
D_full_clone = DiscriminatorFullModel(None, G, D, conf["train"])
D_full_clone.cuda()
return G_full_clone, D_full_clone
def convert_data_to_cuda(data):
for key, value in data.items():
if isinstance(value, list):
if isinstance(value[0], (str, list)):
continue
data[key] = [v.cuda() for v in value]
elif isinstance(value, str):
continue
else:
data[key] = value.cuda()
return data
def return_batch_data(data, start, end):
batch_data = {}
for key, value in data.items():
batch_data[key] = value[start:end, ...]
return batch_data
def return_cls_data(data, index):
cls_data = {}
for key, value in data.items():
cls_data[key] = value[:, index, ...]
return cls_data
def process_vis_data(data):
for key, value in data.items():
if isinstance(value, list):
if isinstance(value[0], str):
continue
data[key] = [v.cpu() for v in value]
elif isinstance(value, str):
continue
else:
data[key] = value.cpu().transpose(0, 1).reshape(-1, *value.shape[2:])
return data
def save_events(writer, losses_G, loss_G, step, losses_D=None, loss_D=None, generated=None, loss_G_init=None, loss_D_init=None):
for key in losses_G:
writer.Scalar(key + "_G", losses_G[key].mean(), step)
if losses_D is not None:
for key in losses_D:
writer.Scalar(key + "_D", losses_D[key].mean(), step)
writer.Scalar("loss_D", loss_D, step)
writer.Scalar("loss_G", loss_G, step)
if loss_D_init is not None:
writer.Scalar("loss_D_init", loss_D_init, step)
if loss_G_init is not None:
writer.Scalar("loss_G_init", loss_G_init, step)
if generated is not None:
writer.Image("Pred", generated["prediction"][:16], step)
def save_training_images(conf, args, writer, generated, data, step, num):
writer.save_image(
generated["prediction"].cpu(),
generated["deformed"].cpu(),
step,
data["source"],
data["driving"],
num=num,
src_ldmk_line=data["source_line"],
drv_ldmk_line=data["driving_line"],
)
def reduce_loss_dict(loss_dict, world_size):
if world_size < 2:
return loss_dict
with torch.no_grad():
keys = []
losses = []
for k in sorted(loss_dict.keys()):
keys.append(k)
losses.append(loss_dict[k])
losses = torch.stack(losses, 0)
dist.reduce(losses, dst=0)
if dist.get_rank() == 0:
losses /= world_size
reduced_losses = {k: v for k, v in zip(keys, losses)}
return reduced_losses
def reduce_loss(loss, world_size):
if world_size < 2:
return loss
with torch.no_grad():
dist.reduce(loss, dst=0)
return loss / world_size
import os
from torch.utils.tensorboard import SummaryWriter
import torch
import torch.nn
import torch.nn.functional as F
import torchvision.utils as tvu
class Visualizer(object):
def __init__(self, path):
if not os.path.isdir(path):
os.makedirs(path)
self.writer = SummaryWriter(path)
self.imgs_path = path.replace('events', 'Imgs')
if not os.path.isdir(self.imgs_path):
os.makedirs(self.imgs_path)
def Scalar(self, name, val, step):
self.writer.add_scalar(name, val, step)
def Image(self, name, val, step):
x = tvu.make_grid(val)
self.writer.add_image(name, x, step)
def save_image(self, val, val_deform, step, srcs, drv, num, src_ldmk_line=None, drv_ldmk_line=None, test_flag=-1):
src = srcs[:num]
imgs = torch.cat((drv[:num], drv_ldmk_line[:num], src, src_ldmk_line[:num], val_deform[:num], val[:num]), dim=0)
if test_flag == -1:
tvu.save_image(imgs, os.path.join(self.imgs_path, str(step)+'.png'), nrow=num, normalize=True)
else:
tvu.save_image(imgs, os.path.join(self.imgs_path, 'test_' + str(step) + '_' + str(test_flag) +'.png'), nrow=num, normalize=True)
This image diff could not be displayed because it is too large. You can view the blob instead.
name: meta_portrait_base
channels:
- defaults
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2022.4.26=h06a4308_0
- certifi=2022.5.18.1=py37h06a4308_0
- ld_impl_linux-64=2.38=h1181459_1
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_0
- libgomp=11.2.0=h1234567_0
- libstdcxx-ng=11.2.0=h1234567_0
- ncurses=6.3=h7f8727e_2
- openssl=1.1.1o=h7f8727e_0
- pip=21.2.2=py37h06a4308_0
- python=3.7.13=h12debd9_0
- readline=8.1.2=h7f8727e_1
- setuptools=61.2.0=py37h06a4308_0
- sqlite=3.38.3=hc218d9a_0
- tk=8.6.11=h1ccaba5_1
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.2.5=h7f8727e_1
- zlib=1.2.12=h7f8727e_2
- pip:
- absl-py==1.0.0
- addict==2.4.0
- argon2-cffi==21.3.0
- argon2-cffi-bindings==21.2.0
- astor==0.8.1
- attrs==21.4.0
- backcall==0.2.0
- beautifulsoup4==4.11.1
- bleach==5.0.0
- cachetools==5.2.0
- cffi==1.15.0
- charset-normalizer==2.0.12
- cycler==0.11.0
- debugpy==1.6.0
- decorator==4.4.2
- defusedxml==0.7.1
- easydict==1.9
- entrypoints==0.4
- fastjsonschema==2.15.3
- flatbuffers==2.0
- freetype-py==2.3.0
- future==0.18.2
- gast==0.5.3
- google-auth==2.6.6
- google-auth-oauthlib==0.4.6
- google-pasta==0.2.0
- grpcio==1.46.3
- h5py==3.7.0
- idna==3.3
- imageio==2.9.0
- imageio-ffmpeg==0.4.7
- imgaug==0.4.0
- importlib-metadata==4.11.4
- importlib-resources==5.7.1
- ipykernel==6.13.0
- ipython==7.34.0
- ipython-genutils==0.2.0
- ipywidgets==7.7.0
- jedi==0.18.1
- jinja2==3.1.2
- joblib==1.1.0
- jsonschema==4.5.1
- jupyter-client==7.2.0
- jupyter-core==4.10.0
- jupyterlab-pygments==0.2.2
- jupyterlab-widgets==1.1.0
- keras-applications==1.0.8
- keras-preprocessing==1.1.2
- kiwisolver==1.4.2
- lmdb==1.1.1
- lpips==0.1.3
- markdown==3.3.7
- markupsafe==2.1.1
- matplotlib==3.2.1
- matplotlib-inline==0.1.3
- mistune==0.8.4
- moviepy==1.0.3
- mrcfile==1.3.0
- nbclient==0.6.3
- nbconvert==6.5.0
- nbformat==5.4.0
- nest-asyncio==1.5.5
- networkx==2.6.3
- notebook==6.4.11
- numpy==1.21.6
- oauthlib==3.2.0
- onnx==1.11.0
- onnxruntime==1.11.1
- open3d==0.11.2
- opencv-python==4.4.0.44
- packaging==21.3
- pandas==1.1.1
- pandocfilters==1.5.0
- parso==0.8.3
- pexpect==4.8.0
- pickleshare==0.7.5
- pillow==6.0.0
- plotly==4.9.0
- plyfile==0.7.4
- proglog==0.1.10
- prometheus-client==0.14.1
- prompt-toolkit==3.0.29
- protobuf==3.19.4
- psutil==5.9.1
- ptyprocess==0.7.0
- pyasn1==0.4.8
- pyasn1-modules==0.2.8
- pycparser==2.21
- pyglet==1.5.26
- pygments==2.12.0
- pyopengl==3.1.0
- pyparsing==3.0.9
- pyrender==0.1.45
- pyrsistent==0.18.1
- python-dateutil==2.8.1
- pytorch-fid==0.1.1
- pytz==2022.1
- pywavelets==1.1.1
- pyyaml==5.3.1
- pyzmq==23.0.0
- requests==2.27.1
- requests-oauthlib==1.3.1
- retrying==1.3.3
- rsa==4.8
- scenepic==1.0.7
- scikit-image==0.17.2
- scikit-learn==0.23.2
- scikit-video==1.1.11
- scipy==1.7.3
- send2trash==1.8.0
- shapely==1.8.2
- six==1.16.0
- sk-video==1.1.10
- sklearn==0.0
- soupsieve==2.3.2.post1
- tb-nightly==2.10.0a20220529
- tensorboard==1.14.0
- tensorboard-data-server==0.6.1
- tensorboard-plugin-wit==1.8.1
- tensorboardx==2.2
- tensorflow==1.14.0
- tensorflow-estimator==1.14.0
- termcolor==1.1.0
- terminado==0.15.0
- threadpoolctl==3.1.0
- tifffile==2021.11.2
- tinycss2==1.1.1
- torch==1.11.0
- torch-ema==0.2
- torch-fidelity==0.2.0
- torchvision==0.12.0
- tornado==6.1
- tqdm==4.64.0
- traitlets==5.2.1.post0
- transformations==2021.6.6
- trimesh==3.12.5
- typing-extensions==4.2.0
- urllib3==1.26.9
- validators==0.19.0
- wcwidth==0.2.5
- webencodings==0.5.1
- werkzeug==2.1.2
- widgetsnbextension==3.6.0
- wrapt==1.14.1
- yapf==0.32.0
- zipp==3.1.0
# 模型唯一标识
modelCode =426
# 模型名称
modelName=MetaPortrait_Pytorch
# 模型描述
modelDescription=MetaPortrait是一种保持身份的说话头部生成框架
# 应用场景
appScenario=人脸识别、直播、美颜特效
# 框架类型
frameType=PyTorch
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment