Commit fba8bde8 authored by bailuo's avatar bailuo
Browse files

update

parents
Pipeline #1808 failed with stages
# Copyright (c) SenseTime Research. All rights reserved.
import os
import sys
import torch
import numpy as np
sys.path.append(".")
from torch_utils.models import Generator
import click
import cv2
from typing import List, Optional
import subprocess
import legacy
from edit.edit_helper import conv_warper, decoder, encoder_ifg, encoder_ss, encoder_sefa
"""
Edit generated images with different SOTA methods.
Notes:
1. We provide some latent directions in the folder, you can play around with them.
2. ''upper_length'' and ''bottom_length'' of ''attr_name'' are available for demo.
3. Layers to control and editing strength are set in edit/edit_config.py.
Examples:
\b
# Editing with InterfaceGAN, StyleSpace, and Sefa
python edit.py --network pretrained_models/stylegan_human_v2_1024.pkl --attr_name upper_length \\
--seeds 61531,61570,61571,61610 --outdir outputs/edit_results
# Editing using inverted latent code
python edit.py ---network outputs/pti/checkpoints/model_test.pkl --attr_name upper_length \\
--outdir outputs/edit_results --real True --real_w_path outputs/pti/embeddings/test/PTI/test/0.pt --real_img_path aligned_image/test.png
"""
@click.command()
@click.pass_context
@click.option('--network', 'ckpt_path', help='Network pickle filename', required=True)
@click.option('--attr_name', help='choose one of the attr: upper_length or bottom_length', type=str, required=True)
@click.option('--trunc', 'truncation', type=float, help='Truncation psi', default=0.8, show_default=True)
@click.option('--gen_video', type=bool, default=True, help='If want to generate video')
@click.option('--combine', type=bool, default=True, help='If want to combine different editing results in the same frame')
@click.option('--seeds', type=legacy.num_range, help='List of random seeds')
@click.option('--outdir', help='Where to save the output images', type=str, required=True, default='outputs/editing', metavar='DIR')
@click.option('--real', type=bool, help='True for editing real image', default=False)
@click.option('--real_w_path', help='Path of latent code for real image')
@click.option('--real_img_path', help='Path of real image, this just concat real image with inverted and edited results together')
def main(
ctx: click.Context,
ckpt_path: str,
attr_name: str,
truncation: float,
gen_video: bool,
combine: bool,
seeds: Optional[List[int]],
outdir: str,
real: str,
real_w_path: str,
real_img_path: str
):
## convert pkl to pth
# if not os.path.exists(ckpt_path.replace('.pkl','.pth')):
legacy.convert(ckpt_path, ckpt_path.replace('.pkl','.pth'), G_only=real)
ckpt_path = ckpt_path.replace('.pkl','.pth')
print("start...", flush=True)
config = {"latent" : 512, "n_mlp" : 8, "channel_multiplier": 2}
generator = Generator(
size = 1024,
style_dim=config["latent"],
n_mlp=config["n_mlp"],
channel_multiplier=config["channel_multiplier"]
)
generator.load_state_dict(torch.load(ckpt_path)['g_ema'])
generator.eval().cuda()
with torch.no_grad():
mean_path = os.path.join('edit','mean_latent.pkl')
if not os.path.exists(mean_path):
mean_n = 3000
mean_latent = generator.mean_latent(mean_n).detach()
legacy.save_obj(mean_latent, mean_path)
else:
mean_latent = legacy.load_pkl(mean_path).cuda()
finals = []
## -- selected sample seeds -- ##
# seeds = [60948,60965,61174,61210,61511,61598,61610] #bottom -> long
# [60941,61064,61103,61313,61531,61570,61571] # bottom -> short
# [60941,60965,61064,61103,6117461210,61531,61570,61571,61610] # upper --> long
# [60948,61313,61511,61598] # upper --> short
if real: seeds = [0]
for t in seeds:
if real: # now assume process single real image only
if real_img_path:
real_image = cv2.imread(real_img_path)
real_image = cv2.cvtColor(real_image, cv2.COLOR_BGR2RGB)
import torchvision.transforms as transforms
transform = transforms.Compose( # normalize to (-1, 1)
[transforms.ToTensor(),
transforms.Normalize(mean=(.5,.5,.5), std=(.5,.5,.5))]
)
real_image = transform(real_image).unsqueeze(0).cuda()
test_input = torch.load(real_w_path)
output, _ = generator(test_input, False, truncation=1,input_is_latent=True, real=True)
else: # generate image from random seeds
test_input = torch.from_numpy(np.random.RandomState(t).randn(1, 512)).float().cuda() # torch.Size([1, 512])
output, _ = generator([test_input], False, truncation=truncation, truncation_latent=mean_latent, real=real)
# interfacegan
style_space, latent, noise = encoder_ifg(generator, test_input, attr_name, truncation, mean_latent,real=real)
image1 = decoder(generator, style_space, latent, noise)
# stylespace
style_space, latent, noise = encoder_ss(generator, test_input, attr_name, truncation, mean_latent,real=real)
image2 = decoder(generator, style_space, latent, noise)
# sefa
latent, noise = encoder_sefa(generator, test_input, attr_name, truncation, mean_latent,real=real)
image3, _ = generator([latent], noise=noise, input_is_latent=True)
if real_img_path:
final = torch.cat((real_image, output, image1, image2, image3), 3)
else:
final = torch.cat((output, image1, image2, image3), 3)
# legacy.visual(output, f'{outdir}/{attr_name}_{t:05d}_raw.jpg')
# legacy.visual(image1, f'{outdir}/{attr_name}_{t:05d}_ifg.jpg')
# legacy.visual(image2, f'{outdir}/{attr_name}_{t:05d}_ss.jpg')
# legacy.visual(image3, f'{outdir}/{attr_name}_{t:05d}_sefa.jpg')
if gen_video:
total_step = 90
if real:
video_ifg_path = f"{outdir}/video/ifg_{attr_name}_{real_w_path.split('/')[-2]}/"
video_ss_path = f"{outdir}/video/ss_{attr_name}_{real_w_path.split('/')[-2]}/"
video_sefa_path = f"{outdir}/video/ss_{attr_name}_{real_w_path.split('/')[-2]}/"
else:
video_ifg_path = f"{outdir}/video/ifg_{attr_name}_{t:05d}/"
video_ss_path = f"{outdir}/video/ss_{attr_name}_{t:05d}/"
video_sefa_path = f"{outdir}/video/ss_{attr_name}_{t:05d}/"
video_comb_path = f"{outdir}/video/tmp"
if combine:
if not os.path.exists(video_comb_path):
os.makedirs(video_comb_path)
else:
if not os.path.exists(video_ifg_path):
os.makedirs(video_ifg_path)
if not os.path.exists(video_ss_path):
os.makedirs(video_ss_path)
if not os.path.exists(video_sefa_path):
os.makedirs(video_sefa_path)
for i in range(total_step):
style_space, latent, noise = encoder_ifg(generator, test_input, attr_name, truncation, mean_latent, step=i, total=total_step,real=real)
image1 = decoder(generator, style_space, latent, noise)
style_space, latent, noise = encoder_ss(generator, test_input, attr_name, truncation, mean_latent, step=i, total=total_step,real=real)
image2 = decoder(generator, style_space, latent, noise)
latent, noise = encoder_sefa(generator, test_input, attr_name, truncation, mean_latent, step=i, total=total_step,real=real)
image3, _ = generator([latent], noise=noise, input_is_latent=True)
if combine:
if real_img_path:
comb_img = torch.cat((real_image, output, image1, image2, image3), 3)
else:
comb_img = torch.cat((output, image1, image2, image3), 3)
legacy.visual(comb_img, os.path.join(video_comb_path, f'{i:05d}.jpg'))
else:
legacy.visual(image1, os.path.join(video_ifg_path, f'{i:05d}.jpg'))
legacy.visual(image2, os.path.join(video_ss_path, f'{i:05d}.jpg'))
if combine:
cmd=f"ffmpeg -hide_banner -loglevel error -y -r 30 -i {video_comb_path}/%05d.jpg -vcodec libx264 -pix_fmt yuv420p {video_ifg_path.replace('ifg_', '')[:-1] + '.mp4'}"
subprocess.call(cmd, shell=True)
else:
cmd=f"ffmpeg -hide_banner -loglevel error -y -r 30 -i {video_ifg_path}/%05d.jpg -vcodec libx264 -pix_fmt yuv420p {video_ifg_path[:-1] + '.mp4'}"
subprocess.call(cmd, shell=True)
cmd=f"ffmpeg -hide_banner -loglevel error -y -r 30 -i {video_ss_path}/%05d.jpg -vcodec libx264 -pix_fmt yuv420p {video_ss_path[:-1] + '.mp4'}"
subprocess.call(cmd, shell=True)
# interfacegan, stylespace, sefa
finals.append(final)
final = torch.cat(finals, 2)
legacy.visual(final, os.path.join(outdir,'final.jpg'))
if __name__ == "__main__":
main()
\ No newline at end of file
# Copyright (c) SenseTime Research. All rights reserved.
# empty
\ No newline at end of file
# Copyright (c) SenseTime Research. All rights reserved.
attr_dict = dict(
interface_gan={ # strength
'upper_length': [-1], # strength: negative for shorter, positive for longer
'bottom_length': [1]
},
stylespace={ # layer, strength, threshold
'upper_length': [5, -5, 0.0028], # strength: negative for shorter, positive for longer
'bottom_length': [3, 5, 0.003]
},
sefa={ # layer, strength
'upper_length': [[4, 5, 6, 7], 5], #-5 # strength: negative for longer, positive for shorter
'bottom_length': [[4, 5, 6, 7], 5]
}
)
\ No newline at end of file
# Copyright (c) SenseTime Research. All rights reserved.
from legacy import save_obj, load_pkl
import torch
from torch.nn import functional as F
import pandas as pd
from .edit_config import attr_dict
import os
def conv_warper(layer, input, style, noise):
# the conv should change
conv = layer.conv
batch, in_channel, height, width = input.shape
style = style.view(batch, 1, in_channel, 1, 1)
weight = conv.scale * conv.weight * style
if conv.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
weight = weight * demod.view(batch, conv.out_channel, 1, 1, 1)
weight = weight.view(
batch * conv.out_channel, in_channel, conv.kernel_size, conv.kernel_size
)
if conv.upsample:
input = input.view(1, batch * in_channel, height, width)
weight = weight.view(
batch, conv.out_channel, in_channel, conv.kernel_size, conv.kernel_size
)
weight = weight.transpose(1, 2).reshape(
batch * in_channel, conv.out_channel, conv.kernel_size, conv.kernel_size
)
out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, conv.out_channel, height, width)
out = conv.blur(out)
elif conv.downsample:
input = conv.blur(input)
_, _, height, width = input.shape
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, conv.out_channel, height, width)
else:
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=conv.padding, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, conv.out_channel, height, width)
out = layer.noise(out, noise=noise)
out = layer.activate(out)
return out
def decoder(G, style_space, latent, noise):
# an decoder warper for G
out = G.input(latent)
out = conv_warper(G.conv1, out, style_space[0], noise[0])
skip = G.to_rgb1(out, latent[:, 1])
i = 1
for conv1, conv2, noise1, noise2, to_rgb in zip(
G.convs[::2], G.convs[1::2], noise[1::2], noise[2::2], G.to_rgbs
):
out = conv_warper(conv1, out, style_space[i], noise=noise1)
out = conv_warper(conv2, out, style_space[i+1], noise=noise2)
skip = to_rgb(out, latent[:, i + 2], skip)
i += 2
image = skip
return image
def encoder_ifg(G, noise, attr_name, truncation=1, truncation_latent=None,
latent_dir='latent_direction/ss/',
step=0, total=0, real=False):
if not real:
styles = [noise]
styles = [G.style(s) for s in styles]
style_space = []
if truncation<1:
if not real:
style_t = []
for style in styles:
style_t.append(truncation_latent + truncation * (style - truncation_latent))
styles = style_t
else: # styles are latent (tensor: 1,18,512), for real PTI output
truncation_latent = truncation_latent.repeat(18,1).unsqueeze(0) # (1,512) --> (1,18,512)
styles = torch.add(truncation_latent,torch.mul(torch.sub(noise,truncation_latent),truncation))
noise = [getattr(G.noises, 'noise_{}'.format(i)) for i in range(G.num_layers)]
if not real:
inject_index = G.n_latent
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
else: latent=styles
style_space.append(G.conv1.conv.modulation(latent[:, 0]))
i = 1
for conv1, conv2, noise1, noise2, to_rgb in zip(
G.convs[::2], G.convs[1::2], noise[1::2], noise[2::2], G.to_rgbs
):
style_space.append(conv1.conv.modulation(latent[:, i]))
style_space.append(conv2.conv.modulation(latent[:, i+1]))
i += 2
# get layer, strength by dict
strength = attr_dict['interface_gan'][attr_name][0]
if step != 0 and total != 0:
strength = step / total * strength
for i in range(15):
style_vect = load_pkl(os.path.join(latent_dir, '{}/style_vect_mean_{}.pkl'.format(attr_name, i)))
style_vect = torch.from_numpy(style_vect).to(latent.device).float()
style_space[i] += style_vect * strength
return style_space, latent, noise
def encoder_ss(G, noise, attr_name, truncation=1, truncation_latent=None,
statics_dir="latent_direction/ss_statics",
latent_dir="latent_direction/ss/",
step=0, total=0,real=False):
if not real:
styles = [noise]
styles = [G.style(s) for s in styles]
style_space = []
if truncation<1:
if not real:
style_t = []
for style in styles:
style_t.append(
truncation_latent + truncation * (style - truncation_latent)
)
styles = style_t
else: # styles are latent (tensor: 1,18,512), for real PTI output
truncation_latent = truncation_latent.repeat(18,1).unsqueeze(0) # (1,512) --> (1,18,512)
styles = torch.add(truncation_latent,torch.mul(torch.sub(noise,truncation_latent),truncation))
noise = [getattr(G.noises, 'noise_{}'.format(i)) for i in range(G.num_layers)]
if not real:
inject_index = G.n_latent
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
else: latent = styles
style_space.append(G.conv1.conv.modulation(latent[:, 0]))
i = 1
for conv1, conv2, noise1, noise2, to_rgb in zip(
G.convs[::2], G.convs[1::2], noise[1::2], noise[2::2], G.to_rgbs
):
style_space.append(conv1.conv.modulation(latent[:, i]))
style_space.append(conv2.conv.modulation(latent[:, i+1]))
i += 2
# get threshold, layer, strength by dict
layer, strength, threshold = attr_dict['stylespace'][attr_name]
statis_dir = os.path.join(statics_dir, "{}_statis/{}".format(attr_name, layer))
statis_csv_path = os.path.join(statis_dir, "statis.csv")
statis_df = pd.read_csv(statis_csv_path)
statis_df = statis_df.sort_values(by='channel', ascending=True)
ch_mask = statis_df['strength'].values
ch_mask = torch.from_numpy(ch_mask).to(latent.device).float()
ch_mask = (ch_mask.abs()>threshold).float()
style_vect = load_pkl(os.path.join(latent_dir, '{}/style_vect_mean_{}.pkl'.format(attr_name, layer)))
style_vect = torch.from_numpy(style_vect).to(latent.device).float()
style_vect = style_vect * ch_mask
if step != 0 and total != 0:
strength = step / total * strength
style_space[layer] += style_vect * strength
return style_space, latent, noise
def encoder_sefa(G, noise, attr_name, truncation=1, truncation_latent=None,
latent_dir='latent_direction/sefa/',
step=0, total=0, real=False):
if not real:
styles = [noise]
styles = [G.style(s) for s in styles]
if truncation<1:
if not real:
style_t = []
for style in styles:
style_t.append(
truncation_latent + truncation * (style - truncation_latent)
)
styles = style_t
else:
truncation_latent = truncation_latent.repeat(18,1).unsqueeze(0) # (1,512) --> (1,18,512)
styles = torch.add(truncation_latent,torch.mul(torch.sub(noise,truncation_latent),truncation))
noise = [getattr(G.noises, 'noise_{}'.format(i)) for i in range(G.num_layers)]
if not real:
inject_index = G.n_latent
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
else: latent = styles
layer, strength = attr_dict['sefa'][attr_name]
sefa_vect = torch.load(os.path.join(latent_dir, '{}.pt'.format(attr_name))).to(latent.device).float()
if step != 0 and total != 0:
strength = step / total * strength
for l in layer:
latent[:, l, :] += (sefa_vect * strength * 2)
return latent, noise
name: stylehuman
channels:
- pytorch
- nvidia
dependencies:
- python == 3.8
- pip
- numpy>=1.20
- click>=8.0
- pillow=8.3.1
- scipy=1.7.1
- pytorch=1.9.1
- cudatoolkit=11.1
- requests=2.26.0
- tqdm=4.62.2
- ninja=1.10.2
- matplotlib=3.4.2
- imageio=2.9.0
- pip:
- imgui==1.3.0
- glfw==2.2.0
- pyopengl==3.1.5
- imageio-ffmpeg==0.4.3
- lpips==0.1.4
- pyspng
- dlib
- opencv-python
- pandas
- moviepy
- imutils
\ No newline at end of file
# Copyright (c) SenseTime Research. All rights reserved.
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
## this script is for generating images from pre-trained network based on StyleGAN1 (TensorFlow) and StyleGAN2-ada (PyTorch) ##
import os
import click
import dnnlib
import numpy as np
import PIL.Image
import legacy
from typing import List, Optional
"""
Generate images using pretrained network pickle.
Examples:
\b
# Generate human full-body images without truncation
python generate.py --outdir=outputs/generate/stylegan_human_v2_1024 --trunc=1 --seeds=1,3,5,7 \\
--network=pretrained_models/stylegan_human_v2_1024.pkl --version 2
\b
# Generate human full-body images with truncation
python generate.py --outdir=outputs/generate/stylegan_human_v2_1024 --trunc=0.8 --seeds=0-100\\
--network=pretrained_models/stylegan_human_v2_1024.pkl --version 2
# \b
# Generate human full-body images using stylegan V1
# python generate.py --outdir=outputs/generate/stylegan_human_v1_1024 \\
# --network=pretrained_models/stylegan_human_v1_1024.pkl --version 1
"""
@click.command()
@click.pass_context
@click.option('--network', 'network_pkl', help='Network pickle filename', required=True)
@click.option('--seeds', type=legacy.num_range, help='List of random seeds')
@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True)
@click.option('--noise-mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True)
@click.option('--outdir', help='Where to save the output images', default= 'outputs/generate/' , type=str, required=True, metavar='DIR')
@click.option('--version', help="stylegan version, 1, 2 or 3", type=int, default=2)
def generate_images(
ctx: click.Context,
network_pkl: str,
seeds: Optional[List[int]],
truncation_psi: float,
noise_mode: str,
outdir: str,
version: int
):
print('Loading networks from "%s"...' % network_pkl)
if version == 1:
import dnnlib.tflib as tflib
tflib.init_tf()
G, D, Gs = legacy.load_pkl(network_pkl)
else:
import torch
device = torch.device('cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu')
dtype = torch.float32 if device.type == 'mps' else torch.float64
with dnnlib.util.open_url(network_pkl) as f:
G = legacy.load_network_pkl(f)['G_ema'].to(device, dtype=dtype) # type: ignore
os.makedirs(outdir, exist_ok=True)
if seeds is None:
ctx.fail('--seeds option is required.')
# Generate images.
target_z = np.array([])
target_w = np.array([])
latent_out = outdir.replace('/images/','')
for seed_idx, seed in enumerate(seeds):
if seed % 5000 == 0:
print('Generating image for seed %d (%d/%d) ...' % (seed, seed_idx, len(seeds)))
if version == 1: ## stylegan v1
z = np.random.RandomState(seed).randn(1, Gs.input_shape[1])
# Generate image.
fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
if noise_mode == 'const': randomize_noise=False
else: randomize_noise = True
images = Gs.run(z, None, truncation_psi=truncation_psi, randomize_noise=randomize_noise, output_transform=fmt)
PIL.Image.fromarray(images[0], 'RGB').save(f'{outdir}/seed{seed:04d}.png')
else: ## stylegan v2/v3
label = torch.zeros([1, G.c_dim], device=device)
z = torch.from_numpy(np.random.RandomState(seed).randn(1, G.z_dim)).to(device, dtype=dtype)
if target_z.size==0:
target_z= z.cpu()
else:
target_z=np.append(target_z, z.cpu(), axis=0)
w = G.mapping(z, label,truncation_psi=truncation_psi)
img = G.synthesis(w, noise_mode=noise_mode,force_fp32 = True)
if target_w.size==0:
target_w= w.cpu()
else:
target_w=np.append(target_w, w.cpu(), axis=0)
img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(f'{outdir}/seed{seed:04d}.png')
# print(target_z)
# print(target_z.shape,target_w.shape)
#----------------------------------------------------------------------------
if __name__ == "__main__":
generate_images()
#----------------------------------------------------------------------------
\ No newline at end of file
# Copyright (c) SenseTime Research. All rights reserved.
import torch
import torch.nn.functional as F
from tqdm import tqdm
from lpips import LPIPS
import numpy as np
from torch_utils.models import Generator as bodyGAN
from torch_utils.models_face import Generator as FaceGAN
import dlib
from utils.face_alignment import align_face_for_insetgan
from utils.util import visual,tensor_to_numpy, numpy_to_tensor
import legacy
import os
import click
class InsetGAN(torch.nn.Module):
def __init__(self, stylebody_ckpt, styleface_ckpt):
super().__init__()
## convert pkl to pth
if not os.path.exists(stylebody_ckpt.replace('.pkl','.pth')):
legacy.convert(stylebody_ckpt, stylebody_ckpt.replace('.pkl','.pth'))
stylebody_ckpt = stylebody_ckpt.replace('.pkl','.pth')
if not os.path.exists(styleface_ckpt.replace('.pkl','.pth')):
legacy.convert(styleface_ckpt, styleface_ckpt.replace('.pkl','.pth'))
styleface_ckpt = styleface_ckpt.replace('.pkl','.pth')
# dual generator
config = {"latent" : 512, "n_mlp" : 8, "channel_multiplier": 2}
self.body_generator = bodyGAN(
size = 1024,
style_dim=config["latent"],
n_mlp=config["n_mlp"],
channel_multiplier=config["channel_multiplier"]
)
self.body_generator.load_state_dict(torch.load(stylebody_ckpt)['g_ema'])
self.body_generator.eval().requires_grad_(False).cuda()
self.face_generator = FaceGAN(
size = 1024,
style_dim=config["latent"],
n_mlp=config["n_mlp"],
channel_multiplier=config["channel_multiplier"]
)
self.face_generator.load_state_dict(torch.load(styleface_ckpt)['g_ema'])
self.face_generator.eval().requires_grad_(False).cuda()
# crop function
self.dlib_predictor = dlib.shape_predictor('./pretrained_models/shape_predictor_68_face_landmarks.dat')
self.dlib_cnn_face_detector = dlib.cnn_face_detection_model_v1("pretrained_models/mmod_human_face_detector.dat")
# criterion
self.lpips_loss = LPIPS(net='alex').cuda().eval()
self.l1_loss = torch.nn.L1Loss(reduction='mean')
def loss_coarse(self, A_face, B, p1=500, p2=0.05):
A_face = F.interpolate(A_face, size=(64, 64), mode='area')
B = F.interpolate(B, size=(64, 64), mode='area')
loss_l1 = p1 * self.l1_loss(A_face, B)
loss_lpips = p2 * self.lpips_loss(A_face, B)
return loss_l1 + loss_lpips
@staticmethod
def get_border_mask(A, x, spec):
mask = torch.zeros_like(A)
mask[:, :, :x, ] = 1
mask[:, :, -x:, ] = 1
mask[:, :, :, :x ] = 1
mask[:, :, :, -x:] = 1
return mask
@staticmethod
def get_body_mask(A, crop, padding=4):
mask = torch.ones_like(A)
mask[:, :, crop[1]-padding:crop[3]+padding, crop[0]-padding:crop[2]+padding] = 0
return mask
def loss_border(self, A_face, B, p1=10000, p2=2, spec=None):
mask = self.get_border_mask(A_face, 8, spec)
loss_l1 = p1 * self.l1_loss(A_face*mask, B*mask)
loss_lpips = p2 * self.lpips_loss(A_face*mask, B*mask)
return loss_l1 + loss_lpips
def loss_body(self, A, B, crop, p1=9000, p2=0.1):
padding = int((crop[3] - crop[1]) / 20)
mask = self.get_body_mask(A, crop, padding)
loss_l1 = p1 * self.l1_loss(A*mask, B*mask)
loss_lpips = p2 * self.lpips_loss(A*mask, B*mask)
return loss_l1+loss_lpips
def loss_face(self, A, B, crop, p1=5000, p2=1.75):
mask = 1 - self.get_body_mask(A, crop)
loss_l1 = p1 * self.l1_loss(A*mask, B*mask)
loss_lpips = p2 * self.lpips_loss(A*mask, B*mask)
return loss_l1+loss_lpips
def loss_reg(self, w, w_mean, p1, w_plus_delta=None, p2=None):
return p1 * torch.mean(((w - w_mean) ** 2)) + p2 * torch.mean(w_plus_delta ** 2)
# FFHQ type
def detect_face_dlib(self, img):
# tensor to numpy array rgb uint8
img = tensor_to_numpy(img)
aligned_image, crop, rect = align_face_for_insetgan(img=img,
detector=self.dlib_cnn_face_detector,
predictor=self.dlib_predictor,
output_size=256)
aligned_image = np.array(aligned_image)
aligned_image = numpy_to_tensor(aligned_image)
return aligned_image, crop, rect
# joint optimization
def dual_optimizer(self,
face_w,
body_w,
joint_steps=500,
face_initial_learning_rate=0.02,
body_initial_learning_rate=0.05,
lr_rampdown_length=0.25,
lr_rampup_length=0.05,
seed=None,
output_path=None,
video=0):
'''
Given a face_w, optimize a body_w with suitable body pose & shape for face_w
'''
def visual_(path, synth_body, synth_face, body_crop, step, both=False, init_body_with_face=None):
tmp = synth_body.clone().detach()
tmp[:, :, body_crop[1]:body_crop[3], body_crop[0]:body_crop[2]] = synth_face
if both:
tmp = torch.cat([synth_body, tmp], dim=3)
save_path = os.path.join(path, f"{step:04d}.jpg")
visual(tmp, save_path)
def forward(face_w_opt,
body_w_opt,
face_w_delta,
body_w_delta,
body_crop,
update_crop=False
):
if face_w_opt.shape[1] != 18:
face_ws = (face_w_opt).repeat([1, 18, 1])
else:
face_ws = face_w_opt.clone()
face_ws = face_ws + face_w_delta
synth_face, _ = self.face_generator([face_ws], input_is_latent=True, randomize_noise=False)
body_ws = (body_w_opt).repeat([1, 18, 1])
body_ws = body_ws + body_w_delta
synth_body, _ = self.body_generator([body_ws], input_is_latent=True, randomize_noise=False)
if update_crop:
old_r = (body_crop[3]-body_crop[1]) // 2, (body_crop[2]-body_crop[0]) // 2
_, body_crop, _ = self.detect_face_dlib(synth_body)
center = (body_crop[1] + body_crop[3]) // 2, (body_crop[0] + body_crop[2]) // 2
body_crop = (center[1] - old_r[1], center[0] - old_r[0], center[1] + old_r[1], center[0] + old_r[0])
synth_body_face = synth_body[:, :, body_crop[1]:body_crop[3], body_crop[0]:body_crop[2]]
if synth_face.shape[2] > body_crop[3]-body_crop[1]:
synth_face_resize = F.interpolate(synth_face, size=(body_crop[3]-body_crop[1], body_crop[2]-body_crop[0]), mode='area')
return synth_body, synth_body_face, synth_face, synth_face_resize, body_crop
def update_lr(init_lr, step, num_steps, lr_rampdown_length, lr_rampup_length):
t = step / num_steps
lr_ramp = min(1.0, (1.0 - t) / lr_rampdown_length)
lr_ramp = 0.5 - 0.5 * np.cos(lr_ramp * np.pi)
lr_ramp = lr_ramp * min(1.0, t / lr_rampup_length)
lr = init_lr * lr_ramp
return lr
# update output_path
output_path = os.path.join(output_path, seed)
os.makedirs(output_path, exist_ok=True)
# define optimized params
body_w_mean = self.body_generator.mean_latent(10000).detach()
face_w_opt = face_w.clone().detach().requires_grad_(True)
body_w_opt = body_w.clone().detach().requires_grad_(True)
face_w_delta = torch.zeros_like(face_w.repeat([1, 18, 1])).requires_grad_(True)
body_w_delta = torch.zeros_like(body_w.repeat([1, 18, 1])).requires_grad_(True)
# generate ref face & body
ref_body, _ = self.body_generator([body_w.repeat([1, 18, 1])], input_is_latent=True, randomize_noise=False)
# for inversion
ref_face, _ = self.face_generator([face_w.repeat([1, 18, 1])], input_is_latent=True, randomize_noise=False)
# get initilized crop
_, body_crop, _ = self.detect_face_dlib(ref_body)
_, _, face_crop = self.detect_face_dlib(ref_face) # NOTE: this is face rect only. no FFHQ type.
# create optimizer
face_optimizer = torch.optim.Adam([face_w_opt, face_w_delta], betas=(0.9, 0.999), lr=face_initial_learning_rate)
body_optimizer = torch.optim.Adam([body_w_opt, body_w_delta], betas=(0.9, 0.999), lr=body_initial_learning_rate)
global_step = 0
# Stage1: remove background of face image
face_steps = 25
pbar = tqdm(range(face_steps))
for step in pbar:
face_lr = update_lr(face_initial_learning_rate / 2, step, face_steps, lr_rampdown_length, lr_rampup_length)
for param_group in face_optimizer.param_groups:
param_group['lr'] =face_lr
synth_body, synth_body_face, synth_face_raw, synth_face, body_crop = forward(face_w_opt,
body_w_opt,
face_w_delta,
body_w_delta,
body_crop)
loss_face = self.loss_face(synth_face_raw, ref_face, face_crop, 5000, 1.75)
loss_coarse = self.loss_coarse(synth_face, synth_body_face, 50, 0.05)
loss_border = self.loss_border(synth_face, synth_body_face, 1000, 0.1)
loss = loss_coarse + loss_border + loss_face
face_optimizer.zero_grad()
loss.backward()
face_optimizer.step()
# visualization
if video:
visual_(output_path, synth_body, synth_face, body_crop, global_step)
pbar.set_description(
(
f"face: {step:.4f}, lr: {face_lr}, loss: {loss.item():.2f}, loss_coarse: {loss_coarse.item():.2f};"
f"loss_border: {loss_border.item():.2f}, loss_face: {loss_face.item():.2f};"
)
)
global_step += 1
# Stage2: find a suitable body
body_steps = 150
pbar = tqdm(range(body_steps))
for step in pbar:
body_lr = update_lr(body_initial_learning_rate, step, body_steps, lr_rampdown_length, lr_rampup_length)
update_crop = True if (step % 50 == 0) else False
# update_crop = False
for param_group in body_optimizer.param_groups:
param_group['lr'] =body_lr
synth_body, synth_body_face, synth_face_raw, synth_face, body_crop = forward(face_w_opt,
body_w_opt,
face_w_delta,
body_w_delta,
body_crop,
update_crop=update_crop)
loss_coarse = self.loss_coarse(synth_face, synth_body_face, 500, 0.05)
loss_border = self.loss_border(synth_face, synth_body_face, 2500, 0)
loss_body = self.loss_body(synth_body, ref_body, body_crop, 9000, 0.1)
loss_reg = self.loss_reg(body_w_opt, body_w_mean, 15000, body_w_delta, 0)
loss = loss_coarse + loss_border + loss_body + loss_reg
body_optimizer.zero_grad()
loss.backward()
body_optimizer.step()
# visualization
if video:
visual_(output_path, synth_body, synth_face, body_crop, global_step)
pbar.set_description(
(
f"body: {step:.4f}, lr: {body_lr}, loss: {loss.item():.2f}, loss_coarse: {loss_coarse.item():.2f};"
f"loss_border: {loss_border.item():.2f}, loss_body: {loss_body.item():.2f}, loss_reg: {loss_reg:.2f}"
)
)
global_step += 1
# Stage3: joint optimization
interval = 50
joint_face_steps = joint_steps // 2
joint_body_steps = joint_steps // 2
face_step = 0
body_step = 0
pbar = tqdm(range(joint_steps))
flag = -1
for step in pbar:
if step % interval == 0: flag += 1
text_flag = 'optimize_face' if flag % 2 == 0 else 'optimize_body'
synth_body, synth_body_face, synth_face_raw, synth_face, body_crop = forward(face_w_opt,
body_w_opt,
face_w_delta,
body_w_delta,
body_crop)
if text_flag == 'optimize_face':
face_lr = update_lr(face_initial_learning_rate, face_step, joint_face_steps, lr_rampdown_length, lr_rampup_length)
for param_group in face_optimizer.param_groups:
param_group['lr'] =face_lr
loss_face = self.loss_face(synth_face_raw, ref_face, face_crop, 5000, 1.75)
loss_coarse = self.loss_coarse(synth_face, synth_body_face, 500, 0.05)
loss_border = self.loss_border(synth_face, synth_body_face, 25000, 0)
loss = loss_coarse + loss_border + loss_face
face_optimizer.zero_grad()
loss.backward()
face_optimizer.step()
pbar.set_description(
(
f"face: {step}, lr: {face_lr:.4f}, loss: {loss.item():.2f}, loss_coarse: {loss_coarse.item():.2f};"
f"loss_border: {loss_border.item():.2f}, loss_face: {loss_face.item():.2f};"
)
)
face_step += 1
else:
body_lr = update_lr(body_initial_learning_rate, body_step, joint_body_steps, lr_rampdown_length, lr_rampup_length)
for param_group in body_optimizer.param_groups:
param_group['lr'] =body_lr
loss_coarse = self.loss_coarse(synth_face, synth_body_face, 500, 0.05)
loss_border = self.loss_border(synth_face, synth_body_face, 2500, 0)
loss_body = self.loss_body(synth_body, ref_body, body_crop, 9000, 0.1)
loss_reg = self.loss_reg(body_w_opt, body_w_mean, 25000, body_w_delta, 0)
loss = loss_coarse + loss_border + loss_body + loss_reg
body_optimizer.zero_grad()
loss.backward()
body_optimizer.step()
pbar.set_description(
(
f"body: {step}, lr: {body_lr:.4f}, loss: {loss.item():.2f}, loss_coarse: {loss_coarse.item():.2f};"
f"loss_border: {loss_border.item():.2f}, loss_body: {loss_body.item():.2f}, loss_reg: {loss_reg:.2f}"
)
)
body_step += 1
if video:
visual_(output_path, synth_body, synth_face, body_crop, global_step)
global_step += 1
return face_w_opt.repeat([1, 18, 1])+face_w_delta, body_w_opt.repeat([1, 18, 1])+body_w_delta, body_crop
"""
Jointly combine and optimize generated faces and bodies .
Examples:
\b
# Combine the generate human full-body image from the provided StyleGAN-Human pre-trained model
# and the generated face image from FFHQ model, optimize both latent codes to produce the coherent face-body image
python insetgan.py --body_network=pretrained_models/stylegan_human_v2_1024.pkl --face_network=pretrained_models/ffhq.pkl \\
--body_seed=82 --face_seed=43 --trunc=0.6 --outdir=outputs/insetgan/ --video 1
"""
@click.command()
@click.pass_context
@click.option('--face_network', default="./pretrained_models/ffhq.pkl", help='Network pickle filename', required=True)
@click.option('--body_network', default='./pretrained_models/stylegan2_1024.pkl', help='Network pickle filename', required=True)
@click.option('--face_seed', type=int, default=82, help='selected random seed')
@click.option('--body_seed', type=int, default=43, help='selected random seed')
@click.option('--joint_steps', type=int, default=500, help='num steps for joint optimization')
@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=0.6, show_default=True)
@click.option('--outdir', help='Where to save the output images', default= "outputs/insetgan/" , type=str, required=True, metavar='DIR')
@click.option('--video', help="set to 1 if want to save video", type=int, default=0)
def main(
ctx: click.Context,
face_network: str,
body_network: str,
face_seed: int,
body_seed: int,
joint_steps: int,
truncation_psi: float,
outdir: str,
video: int):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
insgan = InsetGAN(body_network, face_network)
os.makedirs(outdir, exist_ok=True)
face_z = np.random.RandomState(face_seed).randn(1, 512).astype(np.float32)
face_mean = insgan.face_generator.mean_latent(3000)
face_w = insgan.face_generator.get_latent(torch.from_numpy(face_z).to(device)) # [N, L, C]
face_w = truncation_psi * face_w + (1-truncation_psi) * face_mean
face_img, _ = insgan.face_generator([face_w], input_is_latent=True)
body_z = np.random.RandomState(body_seed).randn(1, 512).astype(np.float32)
body_mean = insgan.body_generator.mean_latent(3000)
body_w = insgan.body_generator.get_latent(torch.from_numpy(body_z).to(device)) # [N, L, C]
body_w = truncation_psi * body_w + (1-truncation_psi) * body_mean
body_img, _ = insgan.body_generator([body_w], input_is_latent=True)
_, body_crop, _ = insgan.detect_face_dlib(body_img)
face_img = F.interpolate(face_img, size=(body_crop[3]-body_crop[1], body_crop[2]-body_crop[0]), mode='area')
cp_body = body_img.clone()
cp_body[:, :, body_crop[1]:body_crop[3], body_crop[0]:body_crop[2]] = face_img
optim_face_w, optim_body_w, crop = insgan.dual_optimizer(
face_w,
body_w,
joint_steps=joint_steps,
seed=f'{face_seed:04d}_{body_seed:04d}',
output_path=outdir,
video=video
)
if video:
ffmpeg_cmd = f"ffmpeg -hide_banner -loglevel error -i ./{outdir}/{face_seed:04d}_{body_seed:04d}/%04d.jpg -c:v libx264 -vf fps=30 -pix_fmt yuv420p ./{outdir}/{face_seed:04d}_{body_seed:04d}.mp4"
os.system(ffmpeg_cmd)
new_face_img, _ = insgan.face_generator([optim_face_w], input_is_latent=True)
new_shape = crop[3] - crop[1], crop[2] - crop[0]
new_face_img_crop = F.interpolate(new_face_img, size=new_shape, mode='area')
seamless_body, _ = insgan.body_generator([optim_body_w], input_is_latent=True)
seamless_body[:, :, crop[1]:crop[3], crop[0]:crop[2]] = new_face_img_crop
temp = torch.cat([cp_body, seamless_body], dim=3)
visual(temp, f"{outdir}/{face_seed:04d}_{body_seed:04d}.png")
if __name__ == "__main__":
main()
\ No newline at end of file
# Copyright (c) SenseTime Research. All rights reserved.
## interpolate between two z code
## score all middle latent code
# https://www.aiuai.cn/aifarm1929.html
import os
import re
from typing import List
from tqdm import tqdm
import click
import dnnlib
import numpy as np
import PIL.Image
import torch
import click
import legacy
import random
from typing import List, Optional
def lerp(code1, code2, alpha):
return code1 * alpha + code2 * (1 - alpha)
# Taken and adapted from wikipedia's slerp article
# https://en.wikipedia.org/wiki/Slerp
def slerp(code1, code2, alpha, DOT_THRESHOLD=0.9995): # Spherical linear interpolation
code1_copy = np.copy(code1)
code2_copy = np.copy(code2)
code1 = code1 / np.linalg.norm(code1)
code2 = code2 / np.linalg.norm(code2)
dot = np.sum(code1 * code2)
if np.abs(dot) > DOT_THRESHOLD:
return lerp(code1_copy, code2_copy, alpha)
# Calculate initial angle between v0 and v1
theta_0 = np.arccos(dot)
sin_theta_0 = np.sin(theta_0)
# Angle at timestep t
theta_t = theta_0 * alpha
sin_theta_t = np.sin(theta_t)
s0 = np.sin(theta_0 - theta_t) / sin_theta_0
s1 = sin_theta_t / sin_theta_0
code3 = s0 * code1_copy + s1 * code2_copy
return code3
def generate_image_from_z(G, z, noise_mode, truncation_psi, device):
label = torch.zeros([1, G.c_dim], device=device)
w = G.mapping(z, label,truncation_psi=truncation_psi)
img = G.synthesis(w, noise_mode=noise_mode,force_fp32 = True)
img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
img = PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB')
return img
def get_concat_h(im1, im2):
dst = PIL.Image.new('RGB', (im1.width + im2.width, im1.height))
dst.paste(im1, (0, 0))
dst.paste(im2, (im1.width, 0))
return dst
def make_latent_interp_animation(G, code1, code2, img1, img2, num_interps, noise_mode, save_mid_image, truncation_psi,device, outdir,fps):
step_size = 1.0/num_interps
all_imgs = []
amounts = np.arange(0, 1, step_size)
for seed_idx, alpha in enumerate(tqdm(amounts)):
interpolated_latent_code = lerp(code1, code2, alpha)
image = generate_image_from_z(G,interpolated_latent_code, noise_mode, truncation_psi, device)
interp_latent_image = image.resize((512, 1024))
if not os.path.exists(os.path.join(outdir,'img')): os.makedirs(os.path.join(outdir,'img'), exist_ok=True)
if save_mid_image:
interp_latent_image.save(f'{outdir}/img/seed{seed_idx:04d}.png')
frame = get_concat_h(img2, interp_latent_image)
frame = get_concat_h(frame, img1)
all_imgs.append(frame)
save_name = os.path.join(outdir,'latent_space_traversal.gif')
all_imgs[0].save(save_name, save_all=True, append_images=all_imgs[1:], duration=1000/fps, loop=0)
"""
Create interpolated images between two given seeds using pretrained network pickle.
Examples:
\b
python interpolation.py --network=pretrained_models/stylegan_human_v2_1024.pkl --seeds=85,100 --outdir=outputs/inter_gifs
"""
@click.command()
@click.pass_context
@click.option('--network', 'network_pkl', help='Network pickle filename', required=True)
@click.option('--seeds', type=legacy.num_range, help='List of 2 random seeds, e.g. 1,2')
@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=0.8, show_default=True)
@click.option('--noise-mode', 'noise_mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True)
@click.option('--outdir', default= 'outputs/inter_gifs', help='Where to save the output images', type=str, required=True, metavar='DIR')
@click.option('--save_mid_image', default=True, type=bool, help='select True if you want to save all interpolated images')
@click.option('--fps', default= 15, help='FPS for GIF', type=int)
@click.option('--num_interps', default= 100, help='Number of interpolation images', type=int)
def main(
ctx: click.Context,
network_pkl: str,
seeds: Optional[List[int]],
truncation_psi: float,
noise_mode: str,
outdir: str,
save_mid_image: bool,
fps:int,
num_interps:int
):
device = torch.device('cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu')
dtype = torch.float32 if device.type == 'mps' else torch.float64
with dnnlib.util.open_url(network_pkl) as f:
G = legacy.load_network_pkl(f)['G_ema'].to(device, dtype=dtype) # type: ignore
outdir = os.path.join(outdir)
if not os.path.exists(outdir):
os.makedirs(outdir,exist_ok=True)
os.makedirs(os.path.join(outdir,'img'),exist_ok=True)
if len(seeds) > 2:
print("Receiving more than two seeds, only use the first two.")
seeds = seeds[0:2]
elif len(seeds) == 1:
print('Require two seeds, randomly generate two now.')
seeds = [seeds[0],random.randint(0,10000)]
z1 = torch.from_numpy(np.random.RandomState(seeds[0]).randn(1, G.z_dim)).to(device, dtype=dtype)
z2 = torch.from_numpy(np.random.RandomState(seeds[1]).randn(1, G.z_dim)).to(device, dtype=dtype)
img1 = generate_image_from_z(G, z1, noise_mode, truncation_psi, device)
img2 = generate_image_from_z(G, z2, noise_mode, truncation_psi, device)
img1.save(f'{outdir}/seed{seeds[0]:04d}.png')
img2.save(f'{outdir}/seed{seeds[1]:04d}.png')
make_latent_interp_animation(G, z1, z2, img1, img2, num_interps, noise_mode, save_mid_image, truncation_psi, device, outdir, fps)
if __name__ == "__main__":
main()
,channel,strength
401,401,0.0051189866
79,79,0.004417926
499,499,0.0042351373
272,272,0.0033855115
2,2,0.003143758
267,267,0.0025972966
510,510,0.0025229468
130,130,0.0022487796
228,228,0.0021741684
101,101,0.001948409
418,418,0.0018696061
481,481,0.0017976156
88,88,0.0017784507
58,58,0.0017771542
116,116,0.0017733901
282,282,0.0017370607
207,207,0.0017357969
429,429,0.0016839057
284,284,0.0016397897
139,139,0.0016203154
242,242,0.0016087457
319,319,0.0015855831
237,237,0.0015757639
475,475,0.001572773
427,427,0.001526569
276,276,0.0015258243
163,163,0.0014731362
460,460,0.0014698224
268,268,0.0014479166
87,87,0.0013900393
486,486,0.0013423131
367,367,0.0013412643
129,129,0.0013402662
448,448,0.0013169902
438,438,0.0012944449
463,463,0.001292959
109,109,0.0012898487
197,197,0.0012845552
215,215,0.0012809597
419,419,0.0012448858
170,170,0.0012249282
46,46,0.0012235934
191,191,0.0012160796
6,6,0.0012132789
292,292,0.0012097002
174,174,0.0011935516
198,198,0.0011886621
450,450,0.0011825112
334,334,0.0011808838
134,134,0.0011740165
297,297,0.0011682409
388,388,0.0011680947
4,4,0.0011665267
96,96,0.001155285
144,144,0.0011436475
383,383,0.0011424082
472,472,0.0011330546
200,200,0.0011163615
126,126,0.0011128024
7,7,0.0011117855
149,149,0.0011032915
142,142,0.0010992303
108,108,0.0010948912
55,55,0.0010793228
35,35,0.0010671945
156,156,0.0010658612
75,75,0.0010604868
497,497,0.0010573129
333,333,0.0010556887
346,346,0.0010499252
259,259,0.0010397168
33,33,0.0010339752
196,196,0.0010080026
321,321,0.0010073334
169,169,0.001006006
187,187,0.0010003602
421,421,0.0009871207
347,347,0.0009822787
495,495,0.0009788647
235,235,0.00097607024
313,313,0.000972718
316,316,0.00096160895
32,32,0.0009501351
365,365,0.0009465426
50,50,0.0009324631
309,309,0.0009274587
461,461,0.0009274281
439,439,0.0009251979
140,140,0.00091394293
220,220,0.0009082752
482,482,0.0009080755
430,430,0.0009043302
218,218,0.0009004896
143,143,0.00089990336
99,99,0.0008916955
70,70,0.00089066115
168,168,0.0008892674
209,209,0.00088266545
391,391,0.0008787587
137,137,0.00087609346
369,369,0.00087306765
355,355,0.0008672569
354,354,0.0008614661
352,352,0.0008591456
359,359,0.00085570634
258,258,0.00084690325
385,385,0.0008433214
296,296,0.0008431721
153,153,0.0008382941
17,17,0.0008377292
186,186,0.00083413016
162,162,0.00083256833
473,473,0.00082412886
47,47,0.0008205253
89,89,0.0008157718
283,283,0.00080831826
351,351,0.0008064204
124,124,0.00080594653
457,457,0.0008049854
188,188,0.00078665616
154,154,0.00078267895
120,120,0.0007801328
190,190,0.0007771554
85,85,0.0007743246
22,22,0.00076702645
266,266,0.00076506665
227,227,0.0007623983
21,21,0.0007611779
295,295,0.00075573527
476,476,0.00074219145
115,115,0.0007414153
363,363,0.000735065
44,44,0.00073410827
83,83,0.0007331246
118,118,0.0007294682
511,511,0.00072840624
322,322,0.00072799233
483,483,0.0007264888
219,219,0.0007200153
274,274,0.0007194695
455,455,0.0007167979
509,509,0.0007164892
412,412,0.00071247795
239,239,0.00071194395
3,3,0.00070765684
420,420,0.0007068611
53,53,0.0007004219
173,173,0.00069374195
480,480,0.0006935094
189,189,0.0006923614
80,80,0.0006903429
141,141,0.00068950764
208,208,0.00068684825
474,474,0.00068534614
386,386,0.00068098377
107,107,0.00068088406
504,504,0.0006799584
328,328,0.00067885965
307,307,0.00067760365
64,64,0.0006768076
362,362,0.00067168014
86,86,0.000671517
279,279,0.0006705279
361,361,0.00066772755
175,175,0.0006668292
16,16,0.0006653719
345,345,0.00066359126
372,372,0.00066246424
380,380,0.0006578692
330,330,0.00065479317
470,470,0.00065403903
43,43,0.00065164984
205,205,0.0006473879
294,294,0.00063388806
357,357,0.00063241523
36,36,0.00063216814
68,68,0.00063114305
57,57,0.0006299799
213,213,0.00062879996
210,210,0.0006244437
49,49,0.00062352256
241,241,0.00062221487
487,487,0.0006212713
82,82,0.00062058726
466,466,0.0006202627
395,395,0.0006191936
72,72,0.0006174072
158,158,0.0006166083
437,437,0.00061549625
113,113,0.0006142644
277,277,0.0006109689
157,157,0.00060956663
291,291,0.0006088704
370,370,0.0006068144
104,104,0.00060625107
41,41,0.0006062162
94,94,0.00060481543
493,493,0.00060343795
247,247,0.00060251716
338,338,0.00060242455
204,204,0.00060240383
424,424,0.0005989864
344,344,0.0005951116
360,360,0.0005884679
151,151,0.0005868215
264,264,0.0005865409
293,293,0.00058519834
62,62,0.0005819824
300,300,0.00058131246
238,238,0.00057876547
407,407,0.0005770471
342,342,0.0005726651
5,5,0.00057154294
114,114,0.00057109277
240,240,0.00057103945
452,452,0.0005675462
91,91,0.0005672489
413,413,0.0005642817
119,119,0.00056401774
458,458,0.0005635342
180,180,0.0005617487
10,10,0.0005590609
181,181,0.00055804825
479,479,0.0005575437
29,29,0.00055724994
9,9,0.00055689306
102,102,0.0005544259
399,399,0.00055424945
97,97,0.0005539326
172,172,0.00054995815
31,31,0.0005499472
364,364,0.0005491284
492,492,0.00054688356
164,164,0.00054382335
371,371,0.0005417347
275,275,0.00053873524
308,308,0.00053855526
501,501,0.00053753663
92,92,0.00053742283
506,506,0.0005367725
167,167,0.00053528306
305,305,0.00053263427
485,485,0.00053174875
318,318,0.0005315904
177,177,0.00053113786
166,166,0.0005307643
193,193,0.0005297839
469,469,0.0005261937
25,25,0.000521339
48,48,0.0005209389
128,128,0.00052093086
498,498,0.00052030955
405,405,0.0005189927
201,201,0.00051636016
229,229,0.00051383715
24,24,0.00051297864
123,123,0.0005124072
477,477,0.0005120602
402,402,0.0005115426
377,377,0.00051048637
348,348,0.0005102354
23,23,0.00050849793
451,451,0.00050814415
406,406,0.0005045002
27,27,0.0004999815
350,350,0.0004998393
185,185,0.0004971214
390,390,0.00049634936
375,375,0.0004955703
431,431,0.00049411313
105,105,0.00049172394
411,411,0.0004917152
148,148,0.00049001497
250,250,0.0004884555
392,392,0.00048794085
374,374,0.00048640848
252,252,0.00048480998
269,269,0.00048419714
192,192,0.00048391512
217,217,0.00048157398
263,263,0.00048102875
415,415,0.00047999277
212,212,0.0004762149
417,417,0.00047523607
467,467,0.0004741602
340,340,0.00047381772
397,397,0.00047334703
433,433,0.00047333006
378,378,0.00047203893
206,206,0.0004719441
443,443,0.00047179937
484,484,0.00047088487
434,434,0.0004697333
396,396,0.00046903393
13,13,0.00046736852
379,379,0.00046703266
178,178,0.0004656918
202,202,0.0004656007
341,341,0.00046170983
456,456,0.0004603486
462,462,0.0004575785
67,67,0.00045551857
138,138,0.00045521912
459,459,0.00045479517
358,358,0.000450105
77,77,0.00044913465
146,146,0.00044637956
66,66,0.0004448067
98,98,0.00044425039
442,442,0.00044048973
0,0,0.00044048866
216,216,0.0004404604
18,18,0.0004400146
54,54,0.00043942602
20,20,0.00043839475
508,508,0.0004379366
285,285,0.0004373548
195,195,0.00043511056
155,155,0.00043351707
444,444,0.0004311831
257,257,0.00043021288
287,287,0.00042994966
449,449,0.0004278185
280,280,0.00042747098
255,255,0.000425165
56,56,0.00042424107
404,404,0.0004226035
488,488,0.00042232242
356,356,0.00042173947
244,244,0.0004209792
432,432,0.0004125784
214,214,0.0004114269
393,393,0.0004107277
270,270,0.00041058182
111,111,0.0004104286
324,324,0.00040866464
61,61,0.00040655467
366,366,0.00040608697
147,147,0.00040604445
311,311,0.00040550664
500,500,0.00040497814
211,211,0.00040463882
112,112,0.00040117715
100,100,0.00040099313
234,234,0.00040040378
132,132,0.00039979443
478,478,0.0003981258
221,221,0.0003965061
368,368,0.0003960585
336,336,0.00039551125
339,339,0.00039536165
19,19,0.0003951851
71,71,0.00039469448
490,490,0.00039201186
253,253,0.0003899332
332,332,0.00038865488
447,447,0.00038850866
223,223,0.0003865917
12,12,0.00038631624
256,256,0.00038542095
303,303,0.00038529842
335,335,0.0003850341
125,125,0.00038496146
52,52,0.00038445802
465,465,0.00037988674
14,14,0.00037270828
445,445,0.0003714122
51,51,0.0003710708
183,183,0.00036938934
435,435,0.00036892455
76,76,0.0003672379
203,203,0.0003666505
74,74,0.00036636737
464,464,0.00036338985
28,28,0.00036282517
376,376,0.00036232817
389,389,0.00036217785
394,394,0.00036191306
30,30,0.00036106672
327,327,0.000358803
73,73,0.0003566918
343,343,0.00035621642
384,384,0.0003560467
440,440,0.00035523146
251,251,0.00035423675
260,260,0.00035293537
265,265,0.00035225553
387,387,0.0003514995
298,298,0.00034635572
306,306,0.00034440306
110,110,0.00034401406
254,254,0.0003433519
505,505,0.00034252735
60,60,0.00034171442
302,302,0.000340328
171,171,0.00033906853
38,38,0.0003379222
59,59,0.00033599927
353,353,0.00033367483
317,317,0.0003322806
337,337,0.0003305284
135,135,0.0003302332
423,423,0.0003287331
310,310,0.00032717254
503,503,0.00032671163
69,69,0.00032358448
145,145,0.00032273383
160,160,0.00032157102
40,40,0.00032081635
400,400,0.00031983448
278,278,0.00031925194
489,489,0.0003178289
199,199,0.00031677147
133,133,0.00031545162
373,373,0.00031506256
331,331,0.0003125472
382,382,0.00031192778
11,11,0.00031145735
494,494,0.00031131672
426,426,0.00031126558
233,233,0.00030971633
290,290,0.000309349
232,232,0.00030930655
262,262,0.00030752877
231,231,0.00030624977
314,314,0.0003054031
502,502,0.00030359824
323,323,0.0003030356
222,222,0.0002989251
428,428,0.0002974012
496,496,0.00029618212
230,230,0.0002957415
121,121,0.00029490213
304,304,0.00029465224
179,179,0.00029258238
248,248,0.00029258014
436,436,0.0002923748
425,425,0.0002921299
236,236,0.0002915477
150,150,0.00029135606
414,414,0.00029009863
286,286,0.00028853436
320,320,0.00028726715
37,37,0.00028702882
131,131,0.0002847645
225,225,0.0002837026
441,441,0.00028333988
326,326,0.0002814045
422,422,0.00028055938
165,165,0.00027847502
471,471,0.00027833483
349,349,0.0002757503
409,409,0.00027481435
103,103,0.00027326684
95,95,0.00027135346
249,249,0.00027121097
90,90,0.0002710454
224,224,0.00027073256
34,34,0.0002699063
65,65,0.00026914835
184,184,0.00026874637
398,398,0.00026665113
301,301,0.00026612534
325,325,0.00026601987
261,261,0.00026447696
246,246,0.00026436363
122,122,0.00026221105
84,84,0.00026163872
78,78,0.00025123646
299,299,0.00024949652
408,408,0.00024725433
161,161,0.00024636975
288,288,0.00024635496
42,42,0.0002452217
106,106,0.00024499706
182,182,0.00024430823
507,507,0.00024347423
271,271,0.00024137288
136,136,0.00023734072
403,403,0.00023719446
453,453,0.0002368316
26,26,0.00023657693
468,468,0.00023344165
127,127,0.00023242869
117,117,0.0002311785
45,45,0.00022815086
1,1,0.0002279681
194,194,0.00022725234
312,312,0.00022653052
410,410,0.00022528754
491,491,0.00022254683
93,93,0.00022081727
63,63,0.00022056459
226,226,0.00021268189
381,381,0.00020935576
329,329,0.00020906379
446,446,0.00020802105
245,245,0.00020745523
15,15,0.0002072561
281,281,0.00020714967
315,315,0.00020467484
152,152,0.00020205516
8,8,0.00019883284
81,81,0.00019411556
273,273,0.00019290911
39,39,0.00019221198
243,243,0.0001919428
416,416,0.00018266037
289,289,0.00016792006
454,454,0.0001655061
176,176,0.00015807906
159,159,0.0001545051
,channel,strength
371,371,0.010705759
130,130,0.007931795
66,66,0.0069621187
411,411,0.0065370337
241,241,0.0061685536
178,178,0.0057360367
422,422,0.0055051707
59,59,0.0054199533
193,193,0.0052992324
405,405,0.00511423
202,202,0.00487821
414,414,0.004596879
347,347,0.0045533227
325,325,0.0042810338
479,479,0.0041018343
234,234,0.003791195
104,104,0.0037741603
437,437,0.0036558367
186,186,0.0036010114
214,214,0.0035913743
472,472,0.0035745492
99,99,0.003559262
13,13,0.003553579
302,302,0.0034689216
428,428,0.0034320198
43,43,0.0032388393
215,215,0.0031643964
346,346,0.0031622355
392,392,0.0031421043
469,469,0.0031391508
185,185,0.0031346607
110,110,0.0031338846
152,152,0.0031238336
255,255,0.0031061403
27,27,0.003093111
494,494,0.0030917446
238,238,0.0030462171
111,111,0.003043536
162,162,0.0030410155
125,125,0.0030364853
51,51,0.0030085724
231,231,0.0029884125
335,335,0.002956904
184,184,0.002923058
80,80,0.0029210225
253,253,0.0029075942
357,357,0.0028416363
180,180,0.0028330602
360,360,0.0027900473
105,105,0.0027881858
33,33,0.0027586774
475,475,0.0027457555
332,332,0.0027370767
220,220,0.0026984583
31,31,0.0026166395
53,53,0.0026120786
106,106,0.0025991872
412,412,0.00259617
382,382,0.0025869396
38,38,0.0025757526
316,316,0.0025433942
389,389,0.0025421656
435,435,0.002534331
225,225,0.002509905
354,354,0.00245047
243,243,0.0024502743
221,221,0.0024180906
218,218,0.0023629142
56,56,0.0023580198
230,230,0.0023460235
8,8,0.0022959905
344,344,0.0022738976
102,102,0.002244589
279,279,0.0022293774
77,77,0.0022287841
404,404,0.0022230886
200,200,0.0022142548
450,450,0.002204902
319,319,0.0021294882
117,117,0.0021246204
6,6,0.0021206948
247,247,0.002117081
297,297,0.0020922043
40,40,0.0020740507
352,352,0.0020036534
239,239,0.0019678425
402,402,0.0019613549
315,315,0.0019569998
195,195,0.0019549306
128,128,0.0019411439
207,207,0.0019369258
432,432,0.0019148714
365,365,0.0019143238
322,322,0.0018905443
24,24,0.0018891945
265,265,0.0018829522
417,417,0.0018819926
334,334,0.0018748969
0,0,0.0018602412
85,85,0.0018551659
126,126,0.0018536468
4,4,0.001846846
232,232,0.0018420395
376,376,0.0018346108
333,333,0.0018218933
250,250,0.0018113112
169,169,0.0018011285
361,361,0.0017686478
25,25,0.0017465113
427,427,0.0017453731
461,461,0.0017188549
182,182,0.0017154247
11,11,0.0016988176
197,197,0.0016953972
20,20,0.0016696001
246,246,0.0016686992
339,339,0.0016661945
205,205,0.0016548207
177,177,0.0016535291
153,153,0.0016440097
356,356,0.0016337134
456,456,0.0016318822
42,42,0.0016255479
378,378,0.0016247402
155,155,0.0016181484
401,401,0.0016147293
16,16,0.001612585
30,30,0.0016062072
377,377,0.0015920534
385,385,0.0015913579
79,79,0.0015898152
135,135,0.0015849494
384,384,0.0015830033
338,338,0.0015758197
98,98,0.0015708887
21,21,0.0015523953
35,35,0.0015421407
364,364,0.0015421154
270,270,0.0015403415
447,447,0.0015402873
485,485,0.0015397645
121,121,0.0015270623
408,408,0.0015214243
32,32,0.0015187038
336,336,0.0014896884
413,413,0.0014843026
499,499,0.0014718628
487,487,0.0014659785
488,488,0.0014608143
122,122,0.0014533442
491,491,0.0014338568
54,54,0.001432836
363,363,0.0014307784
151,151,0.001427959
91,91,0.0014276047
314,314,0.0014251476
161,161,0.0014244247
211,211,0.0014210346
362,362,0.0014053485
216,216,0.0013955731
159,159,0.0013930433
233,233,0.0013912213
449,449,0.0013888216
48,48,0.0013732343
248,248,0.0013702087
299,299,0.0013695534
503,503,0.0013617459
1,1,0.0013607475
237,237,0.0013588071
57,57,0.0013579503
409,409,0.001355633
483,483,0.0013551097
229,229,0.001353437
19,19,0.0013416156
293,293,0.0013304886
390,390,0.0013271623
168,168,0.0013254886
381,381,0.0013072129
366,366,0.0013022809
288,288,0.0012966645
451,451,0.0012919087
244,244,0.0012890152
292,292,0.0012853605
463,463,0.0012843801
470,470,0.0012821403
416,416,0.0012795452
157,157,0.0012787202
464,464,0.0012758262
329,329,0.0012730482
490,490,0.0012670642
74,74,0.0012638838
170,170,0.001260051
278,278,0.0012583392
22,22,0.0012556936
399,399,0.0012530655
100,100,0.0012508945
355,355,0.001250555
486,486,0.0012472017
506,506,0.0012441961
459,459,0.0012374625
309,309,0.0012292771
113,113,0.0012270019
138,138,0.0012170793
47,47,0.0012084226
65,65,0.0012071957
198,198,0.001206154
196,196,0.001205836
285,285,0.0012046142
49,49,0.0012002704
457,457,0.0011999249
425,425,0.0011967781
175,175,0.0011944658
148,148,0.0011917639
86,86,0.0011908459
94,94,0.0011882967
501,501,0.0011801487
476,476,0.0011663077
156,156,0.0011636167
387,387,0.0011613253
266,266,0.0011546519
496,496,0.0011480699
340,340,0.0011474552
343,343,0.0011469066
52,52,0.0011451634
369,369,0.0011436169
90,90,0.0011377904
386,386,0.0011357777
96,96,0.0011343259
124,124,0.0011335325
460,460,0.0011268322
321,321,0.0011234696
264,264,0.0011168371
287,287,0.0011126697
353,353,0.0011116265
462,462,0.0011107579
154,154,0.0011100196
388,388,0.0011071602
448,448,0.0011041508
187,187,0.0010949599
328,328,0.0010922498
454,454,0.0010910842
306,306,0.0010906332
320,320,0.0010779172
391,391,0.0010766807
318,318,0.0010763333
107,107,0.0010709754
505,505,0.0010685796
206,206,0.0010629565
34,34,0.0010543949
473,473,0.0010541352
173,173,0.0010531493
109,109,0.0010509333
424,424,0.0010466026
430,430,0.0010448382
150,150,0.0010423292
268,268,0.001038994
2,2,0.0010353344
29,29,0.0010323756
504,504,0.0010309685
119,119,0.0010272656
174,174,0.0010258228
260,260,0.0010184883
249,249,0.0010175364
36,36,0.0010174246
137,137,0.001017379
303,303,0.0010154104
163,163,0.0010028904
455,455,0.0010002597
510,510,0.0009994362
245,245,0.00099766
262,262,0.0009885678
281,281,0.0009873835
28,28,0.0009856436
380,380,0.0009821856
228,228,0.0009717986
367,367,0.000969772
286,286,0.00096976873
7,7,0.00096907315
146,146,0.00096689834
139,139,0.000962454
204,204,0.0009589661
3,3,0.000956985
280,280,0.0009566337
509,509,0.00094728253
181,181,0.0009440433
68,68,0.0009432923
269,269,0.0009399444
179,179,0.00093980297
274,274,0.0009315241
95,95,0.0009284373
263,263,0.00092792546
72,72,0.00092775293
277,277,0.0009271326
436,436,0.0009269056
500,500,0.0009265228
87,87,0.00092501455
310,310,0.00092400954
300,300,0.00092247425
144,144,0.0009218417
426,426,0.00091638684
81,81,0.00091634423
188,188,0.00090678554
289,289,0.0009011138
418,418,0.00089773914
397,397,0.00089350634
304,304,0.0008868619
482,482,0.0008865463
495,495,0.0008795051
312,312,0.00087571866
166,166,0.00087193854
183,183,0.00087024615
5,5,0.00086923986
446,446,0.0008691286
212,212,0.00086676405
46,46,0.00086484046
118,118,0.00086480496
254,254,0.00086135446
88,88,0.000861164
219,219,0.0008608658
467,467,0.0008600293
76,76,0.00085848826
331,331,0.00085327355
498,498,0.0008505213
39,39,0.000848981
93,93,0.00084537006
433,433,0.0008452787
410,410,0.00084377866
194,194,0.00083563104
478,478,0.00083321065
272,272,0.00083227345
223,223,0.00083198043
311,311,0.00083184306
431,431,0.0008309719
337,337,0.0008306422
189,189,0.0008290602
341,341,0.0008284594
394,394,0.00082656107
396,396,0.0008221022
92,92,0.0008204752
50,50,0.0008168993
84,84,0.0008081732
64,64,0.0008072594
123,123,0.00080666045
71,71,0.0008044883
140,140,0.00080113776
120,120,0.0007968432
14,14,0.00079405046
324,324,0.00079051324
115,115,0.0007894897
191,191,0.0007880761
439,439,0.0007866818
393,393,0.000783365
131,131,0.0007829777
327,327,0.0007810872
17,17,0.0007793945
97,97,0.00077890133
295,295,0.0007760637
423,423,0.00077443745
403,403,0.0007743149
497,497,0.0007740729
171,171,0.00077238533
276,276,0.0007655106
452,452,0.00076521165
136,136,0.0007644099
82,82,0.0007621963
142,142,0.000760782
374,374,0.0007555941
444,444,0.0007507936
282,282,0.0007499849
421,421,0.0007498477
375,375,0.00074311404
358,358,0.0007403847
217,217,0.00073654624
165,165,0.00073613157
420,420,0.0007335366
227,227,0.0007332281
330,330,0.00073107216
368,368,0.00072970067
37,37,0.00072445447
149,149,0.00072384684
477,477,0.000723286
407,407,0.00072004553
242,242,0.00071955
134,134,0.0007190485
172,172,0.0007161819
69,69,0.00071376155
372,372,0.00071212306
236,236,0.00071169576
349,349,0.000709231
484,484,0.00070886995
222,222,0.0007068514
9,9,0.00070441724
481,481,0.0007041735
373,373,0.0007033714
323,323,0.00070036115
434,434,0.0006995436
438,438,0.00069369253
359,359,0.0006884251
370,370,0.00068327464
308,308,0.000678813
445,445,0.0006780726
10,10,0.0006776827
127,127,0.0006771573
224,224,0.0006768041
296,296,0.00067331357
256,256,0.00066997507
493,493,0.0006680274
67,67,0.0006640576
116,116,0.0006630596
132,132,0.000661265
62,62,0.00066118705
508,508,0.00065791415
468,468,0.0006579114
440,440,0.00065191026
317,317,0.00065190904
160,160,0.00065158366
492,492,0.00065123267
458,458,0.0006491314
114,114,0.0006431901
58,58,0.00063985295
313,313,0.0006342638
18,18,0.0006290864
261,261,0.0006265827
383,383,0.00062478095
294,294,0.00062198006
143,143,0.0006198618
61,61,0.0006060728
103,103,0.0006045529
419,419,0.00060209975
466,466,0.0006016268
507,507,0.0005986943
273,273,0.0005929426
240,240,0.0005922067
350,350,0.00058880384
429,429,0.000588744
129,129,0.00058819435
267,267,0.00058599794
252,252,0.0005856361
23,23,0.00058518484
400,400,0.0005806418
283,283,0.0005790221
89,89,0.00057516847
12,12,0.0005741658
176,176,0.0005739618
192,192,0.00057157595
101,101,0.00056115567
442,442,0.0005588267
41,41,0.0005569018
63,63,0.0005514146
441,441,0.0005468517
398,398,0.00054553134
307,307,0.0005450811
298,298,0.00054445845
342,342,0.00054111326
502,502,0.0005392242
78,78,0.0005375705
44,44,0.0005357114
73,73,0.00053344626
209,209,0.00053105725
158,158,0.0005261206
70,70,0.0005217334
199,199,0.0005199517
471,471,0.0005012095
511,511,0.0004969943
259,259,0.000493192
235,235,0.00047622804
301,301,0.0004594244
275,275,0.0004556442
167,167,0.00045370075
133,133,0.00044329825
147,147,0.00043812627
348,348,0.00042484334
190,190,0.0004229568
406,406,0.00042175007
480,480,0.000417746
108,108,0.00041762542
395,395,0.000411962
305,305,0.0004101298
290,290,0.00040680933
489,489,0.0004044473
251,251,0.0004041571
164,164,0.00040222614
257,257,0.00039560342
379,379,0.000392651
326,326,0.00038673886
112,112,0.00036090027
83,83,0.000344462
351,351,0.00034133552
210,210,0.00033938422
141,141,0.0003355473
60,60,0.00033554618
226,226,0.00033128157
203,203,0.00032476717
15,15,0.00027671162
208,208,0.0002761039
291,291,0.000266872
213,213,0.0002634147
415,415,0.00026088266
474,474,0.000256801
271,271,0.00024554084
201,201,0.00023913333
443,443,0.00023458686
145,145,0.00022581422
284,284,0.00022399156
258,258,0.00021423028
465,465,0.00021232266
453,453,0.00021226592
75,75,0.00020900984
55,55,0.00020078795
26,26,0.00018182797
45,45,0.00016305555
345,345,0.00015414672
,channel,strength
242,242,0.01746412
134,134,0.011444086
71,71,0.01060778
395,395,0.0062382165
363,363,0.0058679837
175,175,0.004722381
53,53,0.0044826367
112,112,0.0042659384
457,457,0.003703029
288,288,0.003450465
328,328,0.00344062
414,414,0.0032427178
205,205,0.0032254586
321,321,0.003165059
32,32,0.003014796
9,9,0.0025584965
180,180,0.0025317036
452,452,0.0024022916
69,69,0.0023928392
210,210,0.0023827436
385,385,0.0023732155
98,98,0.0023490055
307,307,0.0023184065
418,418,0.002295881
470,470,0.0022638584
341,341,0.0021729823
308,308,0.0021633923
37,37,0.0021602109
440,440,0.0021246527
16,16,0.0020481
10,10,0.0020317773
486,486,0.002021162
150,150,0.0020176854
89,89,0.0019858822
278,278,0.001971183
430,430,0.0019591004
463,463,0.0019105887
434,434,0.0018847745
437,437,0.0017904
127,127,0.0017076981
310,310,0.0016927454
151,151,0.0016283162
224,224,0.0015725751
268,268,0.0015449473
402,402,0.00153654
190,190,0.0014757048
92,92,0.0014610167
117,117,0.0014568182
110,110,0.0014490561
423,423,0.0014475571
161,161,0.0014227595
291,291,0.001410095
225,225,0.0013883268
189,189,0.001364547
157,157,0.0013630674
499,499,0.00135625
274,274,0.0013522932
166,166,0.0013465862
475,475,0.0013449993
300,300,0.0013322539
368,368,0.0012959774
267,267,0.0012800089
36,36,0.0012684392
11,11,0.0012514348
184,184,0.0012406422
453,453,0.0012389477
173,173,0.0012284226
429,429,0.001227794
229,229,0.0011854741
212,212,0.0011837278
295,295,0.0011766667
318,318,0.0011758378
390,390,0.0011577767
67,67,0.0011576377
26,26,0.001157294
256,256,0.0011399041
287,287,0.0011214579
245,245,0.0011204005
118,118,0.001119347
379,379,0.0011182849
412,412,0.0011027583
169,169,0.0010857102
488,488,0.001084579
108,108,0.0010608159
155,155,0.0010607184
465,465,0.0010555563
80,80,0.0010430918
285,285,0.0010419621
191,191,0.001039581
320,320,0.0010336601
489,489,0.0009907437
46,46,0.00098818
359,359,0.0009863363
415,415,0.000984566
438,438,0.0009843353
2,2,0.0009840623
483,483,0.000978259
116,116,0.00096878014
279,279,0.00096704334
391,391,0.0009629472
75,75,0.0009625787
386,386,0.0009506957
213,213,0.00094954396
81,81,0.0009334278
170,170,0.0009309288
459,459,0.0009304561
25,25,0.0009263908
422,422,0.0009251744
316,316,0.0009241467
254,254,0.00092409964
294,294,0.00092359504
322,322,0.00092271896
493,493,0.00092130696
168,168,0.00091688987
361,361,0.0009016815
302,302,0.0008988095
199,199,0.0008969074
42,42,0.00089361327
275,275,0.00089227123
20,20,0.00089052174
197,197,0.00088976097
43,43,0.0008803114
370,370,0.00087934994
436,436,0.00087875564
28,28,0.00087209826
290,290,0.0008675793
330,330,0.00085718994
94,94,0.0008566909
511,511,0.0008561607
77,77,0.0008551629
484,484,0.0008420306
202,202,0.0008376041
78,78,0.00083523884
487,487,0.00083071465
44,44,0.0008302506
456,456,0.00082660967
343,343,0.00082623283
186,186,0.0008204403
428,428,0.0008122731
63,63,0.000809409
371,371,0.0007866993
367,367,0.0007859241
410,410,0.00078440236
129,129,0.00078421313
492,492,0.0007841307
219,219,0.00078324176
181,181,0.0007805426
192,192,0.000759081
348,348,0.00075573416
156,156,0.00075448444
149,149,0.0007483769
497,497,0.0007449612
97,97,0.0007447865
238,238,0.0007345617
427,427,0.0007344842
48,48,0.0007277395
496,496,0.0007251045
468,468,0.0007233464
351,351,0.00071131974
396,396,0.0007099477
240,240,0.00070780434
277,277,0.00070575473
397,397,0.00070399133
362,362,0.0007016971
122,122,0.0006994947
425,425,0.0006994096
347,347,0.0006979086
502,502,0.00069097895
377,377,0.00068525685
70,70,0.00068207615
481,481,0.0006812176
185,185,0.00067786046
90,90,0.00067573227
472,472,0.0006711317
339,339,0.0006699505
405,405,0.000669038
426,426,0.0006675291
204,204,0.0006652177
296,296,0.0006634654
235,235,0.000660739
141,141,0.0006510568
203,203,0.00064648956
293,293,0.0006462373
508,508,0.00064582145
121,121,0.00064454816
93,93,0.00064454804
406,406,0.00064240245
12,12,0.0006397705
344,344,0.00063504284
19,19,0.0006331822
332,332,0.0006323309
194,194,0.00062925677
313,313,0.0006285695
507,507,0.0006174056
82,82,0.0006161944
239,239,0.00060919294
490,490,0.0006089468
266,266,0.0006075872
128,128,0.0006045006
407,407,0.00060399977
451,451,0.00060152815
137,137,0.0005997921
454,454,0.00059671386
270,270,0.00059550925
404,404,0.00059543905
439,439,0.000591806
460,460,0.0005913006
23,23,0.0005911026
373,373,0.000590888
96,96,0.0005903696
55,55,0.00059018075
365,365,0.000585234
411,411,0.00058369443
374,374,0.00058170315
119,119,0.000581078
458,458,0.00058017287
420,420,0.00057635305
393,393,0.00057374657
261,261,0.0005727315
319,319,0.00057239935
283,283,0.00057194225
233,233,0.0005646504
403,403,0.00056403375
357,357,0.00056290894
241,241,0.00056050875
182,182,0.00056047173
257,257,0.000556821
364,364,0.00055118365
174,174,0.0005500873
111,111,0.00054910127
309,309,0.0005458186
193,193,0.00054326915
5,5,0.0005425164
24,24,0.0005411514
162,162,0.0005377205
432,432,0.0005299737
284,284,0.000529899
491,491,0.0005281161
352,352,0.000527957
39,39,0.0005278845
297,297,0.00052527175
312,312,0.0005249035
443,443,0.0005247692
143,143,0.0005237088
389,389,0.00052194
132,132,0.0005208663
57,57,0.00051607064
85,85,0.00051471085
482,482,0.00051036675
247,247,0.0005102306
477,477,0.00050744385
358,358,0.0005060787
125,125,0.000505312
466,466,0.00050286297
479,479,0.00050196645
323,323,0.0005004967
232,232,0.0004986481
114,114,0.0004957778
87,87,0.0004945035
158,158,0.0004930025
376,376,0.00049192616
384,384,0.00049149833
65,65,0.00049068604
144,144,0.0004868068
41,41,0.00048459516
33,33,0.00048199162
135,135,0.00047957737
72,72,0.00047893118
449,449,0.00047819986
442,442,0.0004762921
130,130,0.00047556465
244,244,0.00047063318
163,163,0.00047034535
292,292,0.00047004907
350,350,0.00046825878
304,304,0.0004671009
474,474,0.0004649635
208,208,0.00046426945
286,286,0.00046057467
178,178,0.00045859
450,450,0.00045825946
226,226,0.00045819176
8,8,0.00045815166
252,252,0.0004559861
154,154,0.00045457872
264,264,0.00045156496
146,146,0.00044669665
220,220,0.00044567848
501,501,0.00044542857
273,273,0.00044488933
171,171,0.0004427107
401,401,0.00044082777
381,381,0.00043972745
140,140,0.00043886708
356,356,0.00043867563
464,464,0.0004386433
353,353,0.00043817703
230,230,0.0004378558
249,249,0.00043769262
15,15,0.0004355795
345,345,0.0004354763
133,133,0.0004346199
120,120,0.00043198353
505,505,0.00043042895
95,95,0.00042860254
378,378,0.0004282037
455,455,0.00042507777
298,298,0.00042234876
145,145,0.0004205409
104,104,0.0004192717
394,394,0.00041797172
45,45,0.00041774593
366,366,0.00041443488
325,325,0.00041310437
62,62,0.00041287072
179,179,0.00041070042
317,317,0.0004105377
338,338,0.00040974608
79,79,0.00040827427
66,66,0.00040683098
139,139,0.0004056471
29,29,0.00040440765
152,152,0.00040363474
214,214,0.00040329635
435,435,0.0004028462
22,22,0.00040165885
346,346,0.0004001219
209,209,0.00039810632
392,392,0.00039726324
315,315,0.00039714077
433,433,0.00039478665
506,506,0.0003915952
105,105,0.0003900892
446,446,0.00038801826
0,0,0.00038777932
18,18,0.0003875171
424,424,0.00038726558
331,331,0.00038722638
51,51,0.00038692914
417,417,0.00038655673
14,14,0.0003852234
6,6,0.00038509126
281,281,0.0003836144
383,383,0.00038292323
216,216,0.00038181938
262,262,0.00037940088
74,74,0.00037926337
3,3,0.00037809636
387,387,0.00037650907
342,342,0.00037650426
398,398,0.0003733892
136,136,0.00037266538
243,243,0.00037207166
471,471,0.00037091138
73,73,0.00036918517
86,86,0.00036871075
68,68,0.00036807635
372,372,0.00036491535
200,200,0.0003633216
107,107,0.00036330617
480,480,0.00036279066
124,124,0.00036251516
131,131,0.00036231225
269,269,0.00036161783
447,447,0.00036158395
167,167,0.0003561097
38,38,0.00035443963
211,211,0.00035319992
369,369,0.00035081702
248,248,0.00035030014
101,101,0.00034903514
115,115,0.00034696967
413,413,0.0003440623
289,289,0.00034358836
56,56,0.00034278553
195,195,0.0003423341
388,388,0.00034183572
416,416,0.00034126177
206,206,0.0003408197
375,375,0.00033973216
4,4,0.00033931396
494,494,0.00033901844
99,99,0.00033845875
282,282,0.00033784402
172,172,0.00033696217
218,218,0.00033630853
165,165,0.0003358777
196,196,0.0003349965
419,419,0.0003341522
469,469,0.00033384332
327,327,0.0003337259
324,324,0.00033304066
260,260,0.00033025324
478,478,0.00032999786
148,148,0.0003298886
500,500,0.00032941083
83,83,0.00032738544
231,231,0.00032720037
280,280,0.00032718244
498,498,0.00032587873
467,467,0.00032520766
102,102,0.00032370255
84,84,0.00032345828
21,21,0.00032132064
334,334,0.00032061047
237,237,0.00032028827
441,441,0.0003187737
485,485,0.0003181529
159,159,0.00031660515
50,50,0.00031633597
258,258,0.00031522466
164,164,0.00031313743
59,59,0.00031286437
160,160,0.00031265983
355,355,0.00031167237
7,7,0.00031130642
76,76,0.00030938906
476,476,0.0003087692
263,263,0.0003072378
495,495,0.0003052068
27,27,0.00030507136
234,234,0.00030466946
299,299,0.00030342882
113,113,0.00030309713
276,276,0.00030135227
217,217,0.00030106696
61,61,0.00030104464
54,54,0.00030067936
349,349,0.0003001497
380,380,0.00029829858
503,503,0.00029654359
303,303,0.0002964005
88,88,0.00029631294
188,188,0.00029344021
444,444,0.00029314525
329,329,0.0002908486
400,400,0.0002895397
223,223,0.00028937528
123,123,0.0002876151
272,272,0.0002861553
91,91,0.0002857061
354,354,0.00028297797
409,409,0.0002819136
448,448,0.0002804029
509,509,0.00027843454
215,215,0.00027820378
183,183,0.0002780649
253,253,0.0002754507
461,461,0.00027407336
255,255,0.00027380435
251,251,0.00027299367
109,109,0.00027289733
246,246,0.00027286436
259,259,0.00027249
201,201,0.00026961832
207,207,0.00026913724
198,198,0.00026806045
236,236,0.00026771048
326,326,0.00026338472
49,49,0.00026074707
138,138,0.0002606831
147,147,0.00025864976
30,30,0.00025828253
408,408,0.0002574985
177,177,0.00025741325
153,153,0.00025581883
187,187,0.0002544332
34,34,0.0002536471
58,58,0.00025209208
473,473,0.00024838003
221,221,0.00024675552
126,126,0.00024573723
228,228,0.00024463152
306,306,0.00024407305
250,250,0.00024358112
421,421,0.00024149592
176,176,0.00024142586
64,64,0.00023718696
336,336,0.00023363969
40,40,0.00023206352
504,504,0.00023138674
399,399,0.0002291037
305,305,0.00022865152
60,60,0.0002266992
301,301,0.00022585278
222,222,0.00022387604
311,311,0.00022371336
17,17,0.0002226341
52,52,0.00022181227
142,142,0.00021962133
103,103,0.00021821138
382,382,0.00021803642
1,1,0.0002171288
337,337,0.00021468119
445,445,0.00021307352
314,314,0.00021049718
100,100,0.00020906334
360,360,0.00020833187
340,340,0.00020668289
106,106,0.0002023169
462,462,0.00019855957
271,271,0.00019850428
227,227,0.00019575101
35,35,0.00019263716
510,510,0.00018776124
265,265,0.0001874168
31,31,0.00018595619
333,333,0.00018071612
13,13,0.00017645532
431,431,0.00017538466
47,47,0.00017165719
335,335,0.00016702584
,channel,strength
423,423,0.004408924
341,341,0.0032079767
379,379,0.0028457695
184,184,0.0028266357
368,368,0.0027224978
486,486,0.0025201144
313,313,0.0025118296
426,426,0.002334192
367,367,0.0022732853
213,213,0.0022067663
436,436,0.0021884514
308,308,0.0021619347
496,496,0.0020120202
393,393,0.001984407
422,422,0.0019403459
425,425,0.0018690284
511,511,0.0017944475
181,181,0.00178793
497,497,0.0016764585
361,361,0.0016183082
2,2,0.0015440682
267,267,0.0015391556
114,114,0.0015294778
170,170,0.001501581
432,432,0.001494758
385,385,0.0014706858
339,339,0.001420463
415,415,0.0013572118
116,116,0.0013073295
373,373,0.0013072236
453,453,0.0013039358
320,320,0.0012937128
256,256,0.0012766926
127,127,0.0012432956
75,75,0.0012031216
287,287,0.0011476731
309,309,0.001147382
456,456,0.0011466638
343,343,0.0011463353
457,457,0.0011426552
98,98,0.0011147967
437,437,0.0011112978
435,435,0.0010973605
182,182,0.0010894896
150,150,0.0010746964
279,279,0.0010726912
189,189,0.0010475953
9,9,0.001045001
371,371,0.001035062
53,53,0.0010211505
168,168,0.0010098461
146,146,0.0010040879
470,470,0.00097405957
69,69,0.00097038125
375,375,0.0009642161
134,134,0.00094965403
475,475,0.0009452465
125,125,0.0009439787
434,434,0.0009384095
288,288,0.0009309878
205,205,0.0009186115
128,128,0.0009111188
328,328,0.0008899651
319,319,0.0008791126
42,42,0.00087770226
458,458,0.0008629476
272,272,0.0008628456
414,414,0.00086089334
261,261,0.00085644424
304,304,0.00083918776
24,24,0.0008195494
161,161,0.0008154048
225,225,0.0008051095
67,67,0.0008038561
482,482,0.0007958309
430,430,0.0007924481
499,499,0.0007831592
390,390,0.00078276
11,11,0.00076811534
332,332,0.0007633267
197,197,0.0007563872
325,325,0.00074650464
322,322,0.00073741924
157,157,0.00071299827
93,93,0.00069571583
108,108,0.0006955461
185,185,0.00068194594
115,115,0.0006798201
80,80,0.0006779811
405,405,0.00067563757
450,450,0.00067155104
105,105,0.00065722043
57,57,0.00065079477
36,36,0.0006450097
454,454,0.0006430749
247,247,0.0006401138
8,8,0.00063832046
102,102,0.0006370239
316,316,0.0006331501
488,488,0.00062760746
364,364,0.0006171263
389,389,0.00060887367
172,172,0.0006087077
352,352,0.00060442963
463,463,0.0006023239
19,19,0.00060143415
210,210,0.00059817376
186,186,0.0005907412
471,471,0.0005869326
140,140,0.00058600784
94,94,0.000580931
81,81,0.000580287
270,270,0.0005789991
417,417,0.0005783043
196,196,0.0005754427
46,46,0.00057365885
464,464,0.00056969275
104,104,0.00056765747
171,171,0.00056728435
487,487,0.0005661972
220,220,0.00056612946
68,68,0.00056457426
33,33,0.0005640128
72,72,0.0005622605
229,229,0.00055803073
41,41,0.0005567271
148,148,0.0005566318
455,455,0.00055565406
349,349,0.00055537344
442,442,0.00055352686
162,162,0.0005533156
191,191,0.00054769375
202,202,0.0005464838
226,226,0.0005416205
466,466,0.00054103765
192,192,0.0005383168
479,479,0.00052559533
143,143,0.00052148907
199,199,0.0005204556
281,281,0.00051968545
410,410,0.0005187148
403,403,0.0005185161
38,38,0.00051575975
429,429,0.00051309256
381,381,0.0005028584
363,363,0.00050276244
26,26,0.0004997491
502,502,0.0004992033
208,208,0.000498523
109,109,0.0004985198
179,179,0.000497237
433,433,0.00049502135
190,190,0.0004945647
223,223,0.00049233536
50,50,0.00048974034
97,97,0.00048946124
37,37,0.0004884755
85,85,0.00048751346
6,6,0.00048617192
351,351,0.0004857896
212,212,0.00048565448
259,259,0.00048205393
294,294,0.00048184753
118,118,0.00048155375
117,117,0.00047967743
428,428,0.0004783297
110,110,0.0004755637
90,90,0.00047256536
55,55,0.00047060288
233,233,0.0004681879
264,264,0.00046594813
240,240,0.00046441547
310,310,0.00046434483
238,238,0.00046259057
145,145,0.00046210623
22,22,0.00046033954
395,395,0.00045831574
397,397,0.00045133353
18,18,0.00044636318
129,129,0.0004431245
241,241,0.0004430129
465,465,0.00044261583
63,63,0.00044257144
418,418,0.00044039512
507,507,0.00043912584
358,358,0.0004376243
365,365,0.00043710147
242,242,0.00043396762
411,411,0.00043118192
338,338,0.0004302841
122,122,0.00042847547
235,235,0.00042753594
476,476,0.0004272296
3,3,0.00042361638
176,176,0.00041432443
407,407,0.00041412332
391,391,0.00041366852
1,1,0.00041138142
73,73,0.0004112309
493,493,0.00041061096
421,421,0.000409816
459,459,0.00040876624
14,14,0.00040792229
284,284,0.00040753878
61,61,0.0004073927
293,293,0.00040734603
275,275,0.00040710357
494,494,0.0004049803
180,180,0.00040350194
301,301,0.00040267198
198,198,0.00040212894
193,193,0.00040090212
280,280,0.0004008506
396,396,0.00040081202
5,5,0.0004002457
76,76,0.00040004638
350,350,0.00039750861
283,283,0.0003969664
344,344,0.0003949259
230,230,0.00039482582
149,149,0.0003945113
244,244,0.00039450615
357,357,0.00039388
491,491,0.00039187729
45,45,0.00038878893
298,298,0.00038755446
56,56,0.00038721022
503,503,0.00038716424
217,217,0.0003867056
159,159,0.00038530145
468,468,0.00038489333
427,427,0.0003842091
291,291,0.00038256386
500,500,0.0003809668
290,290,0.00038067088
460,460,0.00038007705
331,331,0.00037731865
211,211,0.00037723288
440,440,0.00037687554
74,74,0.00037646503
119,119,0.0003755539
133,133,0.00037539483
353,353,0.00037147343
321,321,0.00037131374
32,32,0.0003699305
16,16,0.0003695323
71,71,0.00036733068
483,483,0.0003673293
131,131,0.00036619935
404,404,0.00036550473
218,218,0.00036407504
424,424,0.00036398123
23,23,0.00036384634
65,65,0.0003628074
111,111,0.00036226492
260,260,0.00036025967
258,258,0.0003602185
112,112,0.00035953286
492,492,0.00035905885
167,167,0.00035840494
399,399,0.00035528265
376,376,0.00035397
326,326,0.00035366518
481,481,0.00035335837
79,79,0.00035233115
101,101,0.00035053334
276,276,0.00034943986
296,296,0.00034848423
152,152,0.00034692476
10,10,0.00034476537
489,489,0.00034423766
438,438,0.0003438288
120,120,0.0003430017
239,239,0.00034234222
317,317,0.00034196727
347,347,0.00034175767
206,206,0.0003414347
84,84,0.00034051857
490,490,0.0003404467
107,107,0.0003396762
495,495,0.00033961574
333,333,0.00033911737
446,446,0.000338417
254,254,0.00033728743
386,386,0.00033677832
250,250,0.00033664028
306,306,0.0003353818
246,246,0.0003352706
372,372,0.00033452015
169,169,0.00033249875
451,451,0.0003304912
173,173,0.0003292484
302,302,0.00032853018
151,151,0.0003258597
263,263,0.0003249236
274,274,0.00032480215
156,156,0.00032411155
307,307,0.00032296553
88,88,0.00032126167
39,39,0.0003210367
91,91,0.00032037924
413,413,0.00032021984
232,232,0.0003186513
366,366,0.00031673085
480,480,0.00031612715
44,44,0.00031571824
462,462,0.00031546177
380,380,0.0003152556
83,83,0.00031272677
132,132,0.0003114493
209,209,0.00030984185
48,48,0.00030914877
382,382,0.00030887048
195,195,0.00030860244
154,154,0.00030410106
166,166,0.0003024194
245,245,0.00030229168
262,262,0.00030191787
237,237,0.0002995339
443,443,0.0002943032
467,467,0.00029363343
121,121,0.00029333794
416,416,0.00029272414
160,160,0.00029269484
4,4,0.00029229806
92,92,0.00029168854
77,77,0.00028903817
400,400,0.0002876192
278,278,0.00028760926
474,474,0.00028757288
402,402,0.00028493986
506,506,0.0002847649
234,234,0.00028450877
277,277,0.00028409314
447,447,0.0002835903
342,342,0.00028351165
285,285,0.00028341086
345,345,0.00028339337
348,348,0.0002823747
300,300,0.00028156798
383,383,0.00028049652
231,231,0.0002790845
203,203,0.00027895247
355,355,0.00027876275
204,204,0.00027841472
216,216,0.0002779351
508,508,0.00027720784
282,282,0.00027655836
297,297,0.00027502645
292,292,0.00027430354
327,327,0.0002727945
100,100,0.000269865
95,95,0.0002694548
187,187,0.0002689126
408,408,0.0002658863
477,477,0.00026576317
384,384,0.0002645117
54,54,0.00026404977
374,374,0.00026287523
420,420,0.00026245107
509,509,0.00026231605
28,28,0.00026166256
449,449,0.0002611203
336,336,0.0002604421
178,178,0.00026030626
299,299,0.00025961167
103,103,0.00025886018
388,388,0.00025811547
271,271,0.00025790904
207,207,0.00025755883
248,248,0.00025613784
249,249,0.00025567278
138,138,0.00025559164
78,78,0.00025549062
269,269,0.00025442135
273,273,0.00025399768
286,286,0.00025389006
12,12,0.0002534591
478,478,0.00025331602
452,452,0.00025299162
27,27,0.0002521485
82,82,0.00025113745
295,295,0.0002508786
201,201,0.0002506164
409,409,0.00025036978
359,359,0.00024992102
394,394,0.00024895562
330,330,0.00024881997
501,501,0.0002484243
64,64,0.00024702528
52,52,0.00024523196
106,106,0.00024233114
175,175,0.00024187689
135,135,0.00024020251
419,419,0.00023802932
139,139,0.00023566972
25,25,0.00023424833
312,312,0.00023372385
469,469,0.00023334593
7,7,0.00023190145
158,158,0.00023087433
60,60,0.00023052357
441,441,0.00022939079
165,165,0.00022774191
59,59,0.00022733606
147,147,0.00022624452
62,62,0.00022606985
144,144,0.00022451063
370,370,0.00022431195
21,21,0.0002239656
369,369,0.00022302035
314,314,0.00022240904
377,377,0.00022210156
406,406,0.00022187224
255,255,0.00022102552
356,356,0.00022071223
472,472,0.00021941346
484,484,0.00021900832
289,289,0.0002186439
137,137,0.00021708063
17,17,0.00021706238
51,51,0.00021587432
174,174,0.0002145808
124,124,0.00021406145
253,253,0.00021273737
251,251,0.00021224513
485,485,0.00021198599
214,214,0.00021052467
227,227,0.00020948028
126,126,0.00020917962
362,362,0.00020837369
473,473,0.00020753275
311,311,0.00020720006
346,346,0.00020569382
243,243,0.00020554132
439,439,0.00020543092
177,177,0.00020462318
86,86,0.00020450725
43,43,0.00020431104
354,354,0.0002021195
323,323,0.00020008704
378,378,0.00019674652
153,153,0.00019593372
324,324,0.00019575354
194,194,0.0001956694
360,360,0.00019427242
188,188,0.0001923151
265,265,0.00019114434
431,431,0.00019109932
219,219,0.0001904252
315,315,0.00019023697
224,224,0.00018825711
412,412,0.0001871387
89,89,0.00018677485
268,268,0.0001852995
257,257,0.00018430859
392,392,0.0001833855
35,35,0.00018329639
20,20,0.00018285586
222,222,0.00018160306
141,141,0.00018157126
398,398,0.00018143529
461,461,0.0001811381
29,29,0.00018102965
318,318,0.00017966877
448,448,0.00017874948
58,58,0.00017864846
329,329,0.00017672384
401,401,0.0001746514
183,183,0.00017442314
142,142,0.00017366462
498,498,0.00017296121
0,0,0.00017286446
504,504,0.00017194722
444,444,0.00017125413
155,155,0.00016847586
87,87,0.00016776803
96,96,0.00016757102
99,99,0.00016714378
340,340,0.00016512058
505,505,0.00016355969
266,266,0.00016327428
49,49,0.00016297943
221,221,0.00016184167
15,15,0.00016183533
303,303,0.00016075459
113,113,0.00016014857
130,130,0.00015612668
215,215,0.00015609166
228,228,0.0001551064
305,305,0.00015308998
335,335,0.00015264843
40,40,0.00015258256
66,66,0.00015006233
200,200,0.00015001767
387,387,0.00014855604
252,252,0.00014628381
164,164,0.00014496325
136,136,0.00014322113
445,445,0.00014278234
123,123,0.00014132661
236,236,0.00013684056
31,31,0.00013596549
34,34,0.00013567152
334,334,0.00013561502
337,337,0.00013048301
70,70,0.00012578799
510,510,0.00012369145
47,47,0.00012088309
13,13,0.00011961328
163,163,0.00010987895
30,30,9.594474e-05
# Copyright (c) SenseTime Research. All rights reserved.
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import pickle
import dnnlib
import re
from typing import List, Optional
import torch
import copy
import numpy as np
from torch_utils import misc
#----------------------------------------------------------------------------
## loading torch pkl
def load_network_pkl(f, force_fp16=False, G_only=False):
data = _LegacyUnpickler(f).load()
if G_only:
f = open('ori_model_Gonly.txt','a+')
else: f = open('ori_model.txt','a+')
for key in data.keys():
f.write(str(data[key]))
f.close()
## We comment out this part, if you want to convert TF pickle, you can use the original script from StyleGAN2-ada-pytorch
# # Legacy TensorFlow pickle => convert.
# if isinstance(data, tuple) and len(data) == 3 and all(isinstance(net, _TFNetworkStub) for net in data):
# tf_G, tf_D, tf_Gs = data
# G = convert_tf_generator(tf_G)
# D = convert_tf_discriminator(tf_D)
# G_ema = convert_tf_generator(tf_Gs)
# data = dict(G=G, D=D, G_ema=G_ema)
# Add missing fields.
if 'training_set_kwargs' not in data:
data['training_set_kwargs'] = None
if 'augment_pipe' not in data:
data['augment_pipe'] = None
# Validate contents.
assert isinstance(data['G_ema'], torch.nn.Module)
if not G_only:
assert isinstance(data['D'], torch.nn.Module)
assert isinstance(data['G'], torch.nn.Module)
assert isinstance(data['training_set_kwargs'], (dict, type(None)))
assert isinstance(data['augment_pipe'], (torch.nn.Module, type(None)))
# Force FP16.
if force_fp16:
if G_only:
convert_list = ['G_ema'] #'G'
else: convert_list = ['G', 'D', 'G_ema']
for key in convert_list:
old = data[key]
kwargs = copy.deepcopy(old.init_kwargs)
if key.startswith('G'):
kwargs.synthesis_kwargs = dnnlib.EasyDict(kwargs.get('synthesis_kwargs', {}))
kwargs.synthesis_kwargs.num_fp16_res = 4
kwargs.synthesis_kwargs.conv_clamp = 256
if key.startswith('D'):
kwargs.num_fp16_res = 4
kwargs.conv_clamp = 256
if kwargs != old.init_kwargs:
new = type(old)(**kwargs).eval().requires_grad_(False)
misc.copy_params_and_buffers(old, new, require_all=True)
data[key] = new
return data
class _TFNetworkStub(dnnlib.EasyDict):
pass
class _LegacyUnpickler(pickle.Unpickler):
def find_class(self, module, name):
if module == 'dnnlib.tflib.network' and name == 'Network':
return _TFNetworkStub
return super().find_class(module, name)
#----------------------------------------------------------------------------
def num_range(s: str) -> List[int]:
'''Accept either a comma separated list of numbers 'a,b,c' or a range 'a-c' and return as a list of ints.'''
range_re = re.compile(r'^(\d+)-(\d+)$')
m = range_re.match(s)
if m:
return list(range(int(m.group(1)), int(m.group(2))+1))
vals = s.split(',')
return [int(x) for x in vals]
#----------------------------------------------------------------------------
#### loading tf pkl
def load_pkl(file_or_url):
with open(file_or_url, 'rb') as file:
return pickle.load(file, encoding='latin1')
#----------------------------------------------------------------------------
### For editing
def visual(output, out_path):
import torch
import cv2
import numpy as np
output = (output + 1)/2
output = torch.clamp(output, 0, 1)
if output.shape[1] == 1:
output = torch.cat([output, output, output], 1)
output = output[0].detach().cpu().permute(1,2,0).numpy()
output = (output*255).astype(np.uint8)
output = output[:,:,::-1]
cv2.imwrite(out_path, output)
def save_obj(obj, path):
with open(path, 'wb+') as f:
pickle.dump(obj, f, protocol=4)
#----------------------------------------------------------------------------
## Converting pkl to pth, change dict info inside pickle
def convert_to_rgb(state_ros, state_nv, ros_name, nv_name):
state_ros[f"{ros_name}.conv.weight"] = state_nv[f"{nv_name}.torgb.weight"].unsqueeze(0)
state_ros[f"{ros_name}.bias"] = state_nv[f"{nv_name}.torgb.bias"].unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
state_ros[f"{ros_name}.conv.modulation.weight"] = state_nv[f"{nv_name}.torgb.affine.weight"]
state_ros[f"{ros_name}.conv.modulation.bias"] = state_nv[f"{nv_name}.torgb.affine.bias"]
def convert_conv(state_ros, state_nv, ros_name, nv_name):
state_ros[f"{ros_name}.conv.weight"] = state_nv[f"{nv_name}.weight"].unsqueeze(0)
state_ros[f"{ros_name}.activate.bias"] = state_nv[f"{nv_name}.bias"]
state_ros[f"{ros_name}.conv.modulation.weight"] = state_nv[f"{nv_name}.affine.weight"]
state_ros[f"{ros_name}.conv.modulation.bias"] = state_nv[f"{nv_name}.affine.bias"]
state_ros[f"{ros_name}.noise.weight"] = state_nv[f"{nv_name}.noise_strength"].unsqueeze(0)
def convert_blur_kernel(state_ros, state_nv, level):
"""Not quite sure why there is a factor of 4 here"""
# They are all the same
state_ros[f"convs.{2*level}.conv.blur.kernel"] = 4*state_nv["synthesis.b4.resample_filter"]
state_ros[f"to_rgbs.{level}.upsample.kernel"] = 4*state_nv["synthesis.b4.resample_filter"]
def determine_config(state_nv):
mapping_names = [name for name in state_nv.keys() if "mapping.fc" in name]
sythesis_names = [name for name in state_nv.keys() if "synthesis.b" in name]
n_mapping = max([int(re.findall("(\d+)", n)[0]) for n in mapping_names]) + 1
resolution = max([int(re.findall("(\d+)", n)[0]) for n in sythesis_names])
n_layers = np.log(resolution/2)/np.log(2)
return n_mapping, n_layers
def convert(network_pkl, output_file, G_only=False):
with dnnlib.util.open_url(network_pkl) as f:
G_nvidia = load_network_pkl(f,G_only=G_only)['G_ema']
state_nv = G_nvidia.state_dict()
n_mapping, n_layers = determine_config(state_nv)
state_ros = {}
for i in range(n_mapping):
state_ros[f"style.{i+1}.weight"] = state_nv[f"mapping.fc{i}.weight"]
state_ros[f"style.{i+1}.bias"] = state_nv[f"mapping.fc{i}.bias"]
for i in range(int(n_layers)):
if i > 0:
for conv_level in range(2):
convert_conv(state_ros, state_nv, f"convs.{2*i-2+conv_level}", f"synthesis.b{4*(2**i)}.conv{conv_level}")
state_ros[f"noises.noise_{2*i-1+conv_level}"] = state_nv[f"synthesis.b{4*(2**i)}.conv{conv_level}.noise_const"].unsqueeze(0).unsqueeze(0)
convert_to_rgb(state_ros, state_nv, f"to_rgbs.{i-1}", f"synthesis.b{4*(2**i)}")
convert_blur_kernel(state_ros, state_nv, i-1)
else:
state_ros[f"input.input"] = state_nv[f"synthesis.b{4*(2**i)}.const"].unsqueeze(0)
convert_conv(state_ros, state_nv, "conv1", f"synthesis.b{4*(2**i)}.conv1")
state_ros[f"noises.noise_{2*i}"] = state_nv[f"synthesis.b{4*(2**i)}.conv1.noise_const"].unsqueeze(0).unsqueeze(0)
convert_to_rgb(state_ros, state_nv, "to_rgb1", f"synthesis.b{4*(2**i)}")
# https://github.com/yuval-alaluf/restyle-encoder/issues/1#issuecomment-828354736
latent_avg = state_nv['mapping.w_avg']
state_dict = {"g_ema": state_ros, "latent_avg": latent_avg}
# if G_only:
# f = open('converted_model_Gonly.txt','a+')
# else:
# f = open('converted_model.txt','a+')
# for key in state_dict['g_ema'].keys():
# f.write(str(key)+': '+str(state_dict['g_ema'][key].shape)+'\n')
# f.close()
torch.save(state_dict, output_file)
import cv2
import numpy as np
import math
import time
from scipy.ndimage.filters import gaussian_filter
import matplotlib.pyplot as plt
import matplotlib
import torch
from torchvision import transforms
from openpose.src import util
from openpose.src.model import bodypose_model
class Body(object):
def __init__(self, model_path):
self.model = bodypose_model()
if torch.cuda.is_available():
self.model = self.model.cuda()
model_dict = util.transfer(self.model, torch.load(model_path))
self.model.load_state_dict(model_dict)
self.model.eval()
def __call__(self, oriImg):
# scale_search = [0.5, 1.0, 1.5, 2.0]
scale_search = [0.5]
boxsize = 368
stride = 8
padValue = 128
thre1 = 0.1
thre2 = 0.05
multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search]
heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19))
paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38))
for m in range(len(multiplier)):
scale = multiplier[m]
imageToTest = cv2.resize(oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
imageToTest_padded, pad = util.padRightDownCorner(imageToTest, stride, padValue)
im = np.transpose(np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5
im = np.ascontiguousarray(im)
data = torch.from_numpy(im).float()
if torch.cuda.is_available():
data = data.cuda()
# data = data.permute([2, 0, 1]).unsqueeze(0).float()
with torch.no_grad():
Mconv7_stage6_L1, Mconv7_stage6_L2 = self.model(data)
Mconv7_stage6_L1 = Mconv7_stage6_L1.cpu().numpy()
Mconv7_stage6_L2 = Mconv7_stage6_L2.cpu().numpy()
# extract outputs, resize, and remove padding
# heatmap = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[1]].data), (1, 2, 0)) # output 1 is heatmaps
heatmap = np.transpose(np.squeeze(Mconv7_stage6_L2), (1, 2, 0)) # output 1 is heatmaps
heatmap = cv2.resize(heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
# paf = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[0]].data), (1, 2, 0)) # output 0 is PAFs
paf = np.transpose(np.squeeze(Mconv7_stage6_L1), (1, 2, 0)) # output 0 is PAFs
paf = cv2.resize(paf, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
paf = paf[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
paf = cv2.resize(paf, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
heatmap_avg += heatmap_avg + heatmap / len(multiplier)
paf_avg += + paf / len(multiplier)
all_peaks = []
peak_counter = 0
for part in range(18):
map_ori = heatmap_avg[:, :, part]
one_heatmap = gaussian_filter(map_ori, sigma=3)
map_left = np.zeros(one_heatmap.shape)
map_left[1:, :] = one_heatmap[:-1, :]
map_right = np.zeros(one_heatmap.shape)
map_right[:-1, :] = one_heatmap[1:, :]
map_up = np.zeros(one_heatmap.shape)
map_up[:, 1:] = one_heatmap[:, :-1]
map_down = np.zeros(one_heatmap.shape)
map_down[:, :-1] = one_heatmap[:, 1:]
peaks_binary = np.logical_and.reduce(
(one_heatmap >= map_left, one_heatmap >= map_right, one_heatmap >= map_up, one_heatmap >= map_down, one_heatmap > thre1))
peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse
peaks_with_score = [x + (map_ori[x[1], x[0]],) for x in peaks]
peak_id = range(peak_counter, peak_counter + len(peaks))
peaks_with_score_and_id = [peaks_with_score[i] + (peak_id[i],) for i in range(len(peak_id))]
all_peaks.append(peaks_with_score_and_id)
peak_counter += len(peaks)
# find connection in the specified sequence, center 29 is in the position 15
limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \
[10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \
[1, 16], [16, 18], [3, 17], [6, 18]]
# the middle joints heatmap correpondence
mapIdx = [[31, 32], [39, 40], [33, 34], [35, 36], [41, 42], [43, 44], [19, 20], [21, 22], \
[23, 24], [25, 26], [27, 28], [29, 30], [47, 48], [49, 50], [53, 54], [51, 52], \
[55, 56], [37, 38], [45, 46]]
connection_all = []
special_k = []
mid_num = 10
for k in range(len(mapIdx)):
score_mid = paf_avg[:, :, [x - 19 for x in mapIdx[k]]]
candA = all_peaks[limbSeq[k][0] - 1]
candB = all_peaks[limbSeq[k][1] - 1]
nA = len(candA)
nB = len(candB)
indexA, indexB = limbSeq[k]
if (nA != 0 and nB != 0):
connection_candidate = []
for i in range(nA):
for j in range(nB):
vec = np.subtract(candB[j][:2], candA[i][:2])
norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1])
norm = max(0.001, norm)
vec = np.divide(vec, norm)
startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), \
np.linspace(candA[i][1], candB[j][1], num=mid_num)))
vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] \
for I in range(len(startend))])
vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] \
for I in range(len(startend))])
score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1])
score_with_dist_prior = sum(score_midpts) / len(score_midpts) + min(
0.5 * oriImg.shape[0] / norm - 1, 0)
criterion1 = len(np.nonzero(score_midpts > thre2)[0]) > 0.8 * len(score_midpts)
criterion2 = score_with_dist_prior > 0
if criterion1 and criterion2:
connection_candidate.append(
[i, j, score_with_dist_prior, score_with_dist_prior + candA[i][2] + candB[j][2]])
connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True)
connection = np.zeros((0, 5))
for c in range(len(connection_candidate)):
i, j, s = connection_candidate[c][0:3]
if (i not in connection[:, 3] and j not in connection[:, 4]):
connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]])
if (len(connection) >= min(nA, nB)):
break
connection_all.append(connection)
else:
special_k.append(k)
connection_all.append([])
# last number in each row is the total parts number of that person
# the second last number in each row is the score of the overall configuration
subset = -1 * np.ones((0, 20))
candidate = np.array([item for sublist in all_peaks for item in sublist])
for k in range(len(mapIdx)):
if k not in special_k:
partAs = connection_all[k][:, 0]
partBs = connection_all[k][:, 1]
indexA, indexB = np.array(limbSeq[k]) - 1
for i in range(len(connection_all[k])): # = 1:size(temp,1)
found = 0
subset_idx = [-1, -1]
for j in range(len(subset)): # 1:size(subset,1):
if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]:
subset_idx[found] = j
found += 1
if found == 1:
j = subset_idx[0]
if subset[j][indexB] != partBs[i]:
subset[j][indexB] = partBs[i]
subset[j][-1] += 1
subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
elif found == 2: # if found 2 and disjoint, merge them
j1, j2 = subset_idx
membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2]
if len(np.nonzero(membership == 2)[0]) == 0: # merge
subset[j1][:-2] += (subset[j2][:-2] + 1)
subset[j1][-2:] += subset[j2][-2:]
subset[j1][-2] += connection_all[k][i][2]
subset = np.delete(subset, j2, 0)
else: # as like found == 1
subset[j1][indexB] = partBs[i]
subset[j1][-1] += 1
subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
# if find no partA in the subset, create a new subset
elif not found and k < 17:
row = -1 * np.ones(20)
row[indexA] = partAs[i]
row[indexB] = partBs[i]
row[-1] = 2
row[-2] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2]
subset = np.vstack([subset, row])
# delete some rows of subset which has few parts occur
deleteIdx = []
for i in range(len(subset)):
if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4:
deleteIdx.append(i)
subset = np.delete(subset, deleteIdx, axis=0)
# subset: n*20 array, 0-17 is the index in candidate, 18 is the total score, 19 is the total parts
# candidate: x, y, score, id
return candidate, subset
if __name__ == "__main__":
body_estimation = Body('../model/body_pose_model.pth')
test_image = '../images/ski.jpg'
oriImg = cv2.imread(test_image) # B,G,R order
candidate, subset = body_estimation(oriImg)
canvas = util.draw_bodypose(oriImg, candidate, subset)
plt.imshow(canvas[:, :, [2, 1, 0]])
plt.show()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment