Commit 7339f0b0 authored by chenzk's avatar chenzk
Browse files

v1.0

parents
import torch
from torch import nn
from torch.autograd import Variable
from torch.nn import functional as F
import torch.utils.data
from torchvision.models.inception import inception_v3
import os
from skimage import io
import cv2
import os
import numpy as np
from scipy.stats import entropy
import torchvision.datasets as dset
import torchvision.transforms as transforms
import argparse
def inception_score(imgs, cuda=True, batch_size=32, resize=False, splits=32):
"""Computes the inception score of the generated images imgs
imgs -- Torch dataset of (3xHxW) numpy images normalized in the range [-1, 1]
cuda -- whether or not to run on GPU
batch_size -- batch size for feeding into Inception v3
splits -- number of splits
"""
N = len(imgs)
assert batch_size > 0
assert N > batch_size
# Set up dtype
if cuda:
dtype = torch.cuda.FloatTensor
else:
if torch.cuda.is_available():
print("WARNING: You have a CUDA device, so you should probably set cuda=True")
dtype = torch.FloatTensor
# Set up dataloader
dataloader = torch.utils.data.DataLoader(imgs, batch_size=batch_size)
# Load inception model
inception_model = inception_v3(pretrained=True, transform_input=False).type(dtype)
inception_model.eval();
up = nn.Upsample(size=(299, 299), mode='bilinear').type(dtype)
def get_pred(x):
if resize:
x = up(x)
x = inception_model(x)
return F.softmax(x).data.cpu().numpy()
# Get predictions
preds = np.zeros((N, 1000))
for i, batch in enumerate(dataloader, 0):
batch = batch.type(dtype)
batchv = Variable(batch)
batch_size_i = batch.size()[0]
preds[i*batch_size:i*batch_size + batch_size_i] = get_pred(batchv)
# Now compute the mean kl-div
split_scores = []
for k in range(splits):
part = preds[k * (N // splits): (k+1) * (N // splits), :]
py = np.mean(part, axis=0)
scores = []
for i in range(part.shape[0]):
pyx = part[i, :]
scores.append(entropy(pyx, py))
split_scores.append(np.exp(np.mean(scores)))
return np.mean(split_scores), np.std(split_scores)
class UnlabeledDataset(torch.utils.data.Dataset):
def __init__(self, folder, transform=None):
self.folder = folder
self.transform = transform
self.image_files = os.listdir(folder)
def __len__(self):
return len(self.image_files)
def __getitem__(self, idx):
image_file = self.image_files[idx]
image_path = os.path.join(self.folder, image_file)
image = io.imread(image_path)
if self.transform:
image = self.transform(image)
return image
class IgnoreLabelDataset(torch.utils.data.Dataset):
def __init__(self, orig):
self.orig = orig
def __getitem__(self, index):
return self.orig[index][0]
def __len__(self):
return len(self.orig)
if __name__ == '__main__':
# cifar = dset.CIFAR10(root='data/', download=True,
# transform=transforms.Compose([
# transforms.Resize(32),``
# transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
# ])
# )
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
# set args
parser = argparse.ArgumentParser()
parser.add_argument('--data-root', type=str, default='/data/wyli/code/TinyDDPM/Output/unet_busi/Gens/')
args = parser.parse_args()
dataset = UnlabeledDataset(args.data_root, transform=transform)
print ("Calculating Inception Score...")
print (inception_score(dataset, cuda=True, batch_size=1, resize=True, splits=10))
download released_models.zip and unzip here
\ No newline at end of file
pytorch-fid==0.30.0
torch==2.3.0
torchvision==0.18.0
tqdm
timm==0.9.16
scikit-image==0.23.1
\ No newline at end of file
import os
from skimage import io, transform
from skimage.util import img_as_ubyte
import numpy as np
# Define the source and destination directories
src_dir = '/data/wyli/data/CVC-ClinicDB/Original/'
dst_dir = '/data/wyli/data/cvc/images_64/'
os.makedirs(dst_dir, exist_ok=True)
# Get a list of all the image files in the source directory
image_files = [f for f in os.listdir(src_dir) if os.path.isfile(os.path.join(src_dir, f))]
# Define the size of the crop box
crop_size = np.array([288 ,288])
# Define the size of the resized image
resize_size = (64, 64)
for image_file in image_files:
# Load the image
image = io.imread(os.path.join(src_dir, image_file))
# print(image.shape)
# Calculate the center of the image
center = np.array(image.shape[:2]) // 2
# Calculate the start and end points of the crop box
start = center - crop_size // 2
end = start + crop_size
# Crop the image
cropped_image = img_as_ubyte(image[start[0]:end[0], start[1]:end[1]])
# Resize the cropped image
resized_image = transform.resize(cropped_image, resize_size, mode='reflect')
# Save the resized image to the destination directory
io.imsave(os.path.join(dst_dir, image_file), img_as_ubyte(resized_image))
\ No newline at end of file
import os
from skimage import io, transform
from skimage.util import img_as_ubyte
import numpy as np
# Define the source and destination directories
src_dir = '/data/wyli/data/busi/images/'
dst_dir = '/data/wyli/data/busi/images_64/'
os.makedirs(dst_dir, exist_ok=True)
# Get a list of all the image files in the source directory
image_files = [f for f in os.listdir(src_dir) if os.path.isfile(os.path.join(src_dir, f))]
# Define the size of the crop box
crop_size = np.array([400 ,400])
# Define the size of the resized image
# resize_size = (64, 64)
resize_size = (64, 64)
for image_file in image_files:
# Load the image
image = io.imread(os.path.join(src_dir, image_file))
print(image.shape)
# Calculate the center of the image
center = np.array(image.shape[:2]) // 2
# Calculate the start and end points of the crop box
start = center - crop_size // 2
end = start + crop_size
# Crop the image
cropped_image = img_as_ubyte(image[start[0]:end[0], start[1]:end[1]])
# Resize the cropped image
resized_image = transform.resize(cropped_image, resize_size, mode='reflect')
# Save the resized image to the destination directory
io.imsave(os.path.join(dst_dir, image_file), img_as_ubyte(resized_image))
\ No newline at end of file
import os
from skimage import io, transform
from skimage.util import img_as_ubyte
import numpy as np
import random
# Define the source and destination directories
src_dir = '/data/wyli/data/glas/images/'
dst_dir = '/data/wyli/data/glas/images_64/'
os.makedirs(dst_dir, exist_ok=True)
# Get a list of all the image files in the source directory
image_files = [f for f in os.listdir(src_dir) if os.path.isfile(os.path.join(src_dir, f))]
# Define the size of the crop box
crop_size = np.array([64, 64])
# Define the number of crops per image
K = 5
for image_file in image_files:
# Load the image
image = io.imread(os.path.join(src_dir, image_file))
# Get the size of the image
image_size = np.array(image.shape[:2])
for i in range(K):
# Calculate a random start point for the crop box
start = np.array([random.randint(0, image_size[0] - crop_size[0]), random.randint(0, image_size[1] - crop_size[1])])
# Calculate the end point of the crop box
end = start + crop_size
# Crop the image
cropped_image = img_as_ubyte(image[start[0]:end[0], start[1]:end[1]])
# Save the cropped image to the destination directory
io.imsave(os.path.join(dst_dir, f"{image_file}_{i}.png"), cropped_image)
\ No newline at end of file
##!/bin/bash
source ~/miniconda3/etc/profile.d/conda.sh
conda activate kan
GPU=0
MODEL='UKan_Hybrid'
EXP_NME='UKan_cvc'
SAVE_ROOT='./Output/'
DATASET='busi'
cd ../
CUDA_VISIBLE_DEVICES=${GPU} python Main.py \
--model ${MODEL} \
--exp_nme ${EXP_NME} \
--batch_size 32 \
--channel 64 \
--dataset ${DATASET} \
--epoch 5000 \
--save_root ${SAVE_ROOT}
# --lr 1e-4
# calcuate FID and IS
CUDA_VISIBLE_DEVICES=${GPU} python -m pytorch_fid "data/${DATASET}/images_64/" "${SAVE_ROOT}/${EXP_NME}/Gens" > "${SAVE_ROOT}/${EXP_NME}/FID.txt" 2>&1
cd inception-score-pytorch
CUDA_VISIBLE_DEVICES=${GPU} python inception_score.py --data-root "${SAVE_ROOT}/${EXP_NME}/Gens" > "${SAVE_ROOT}/${EXP_NME}/IS.txt" 2>&1
##!/bin/bash
source ~/miniconda3/etc/profile.d/conda.sh
conda activate kan
GPU=0
MODEL='UKan_Hybrid'
EXP_NME='UKan_cvc'
SAVE_ROOT='./Output/'
DATASET='cvc'
cd ../
CUDA_VISIBLE_DEVICES=${GPU} python Main.py \
--model ${MODEL} \
--exp_nme ${EXP_NME} \
--batch_size 32 \
--channel 64 \
--dataset ${DATASET} \
--epoch 1000 \
--save_root ${SAVE_ROOT}
# --lr 1e-4
# calcuate FID and IS
CUDA_VISIBLE_DEVICES=${GPU} python -m pytorch_fid "data/${DATASET}/images_64/" "${SAVE_ROOT}/${EXP_NME}/Gens" > "${SAVE_ROOT}/${EXP_NME}/FID.txt" 2>&1
cd inception-score-pytorch
CUDA_VISIBLE_DEVICES=${GPU} python inception_score.py --data-root "${SAVE_ROOT}/${EXP_NME}/Gens" > "${SAVE_ROOT}/${EXP_NME}/IS.txt" 2>&1
This diff is collapsed.
This diff is collapsed.
MIT License
Copyright (c) 2022 Jeya Maria Jose
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment