Commit 9581d648 authored by boomb0om's avatar boomb0om
Browse files

Refactor code

parent fc7a249d
...@@ -5,14 +5,18 @@ import numpy as np ...@@ -5,14 +5,18 @@ import numpy as np
import cv2 import cv2
from .rrdbnet_arch import RRDBNet from .rrdbnet_arch import RRDBNet
from .utils import * from .utils import pad_reflect, split_image_into_overlapping_patches, stich_together, \
unpad_image
class RealESRGAN: class RealESRGAN:
def __init__(self, device, scale=4): def __init__(self, device, scale=4):
self.device = device self.device = device
self.scale = scale self.scale = scale
self.model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=scale) self.model = RRDBNet(
num_in_ch=3, num_out_ch=3, num_feat=64,
num_block=23, num_grow_ch=32, scale=scale
)
def load_weights(self, model_path): def load_weights(self, model_path):
loadnet = torch.load(model_path) loadnet = torch.load(model_path)
...@@ -33,8 +37,9 @@ class RealESRGAN: ...@@ -33,8 +37,9 @@ class RealESRGAN:
lr_image = np.array(lr_image) lr_image = np.array(lr_image)
lr_image = pad_reflect(lr_image, pad_size) lr_image = pad_reflect(lr_image, pad_size)
patches, p_shape = split_image_into_overlapping_patches(lr_image, patch_size=patches_size, patches, p_shape = split_image_into_overlapping_patches(
padding_size=padding) lr_image, patch_size=patches_size, padding_size=padding
)
img = torch.FloatTensor(patches/255).permute((0,3,1,2)).to(device).detach() img = torch.FloatTensor(patches/255).permute((0,3,1,2)).to(device).detach()
with torch.no_grad(): with torch.no_grad():
...@@ -47,8 +52,10 @@ class RealESRGAN: ...@@ -47,8 +52,10 @@ class RealESRGAN:
padded_size_scaled = tuple(np.multiply(p_shape[0:2], scale)) + (3,) padded_size_scaled = tuple(np.multiply(p_shape[0:2], scale)) + (3,)
scaled_image_shape = tuple(np.multiply(lr_image.shape[0:2], scale)) + (3,) scaled_image_shape = tuple(np.multiply(lr_image.shape[0:2], scale)) + (3,)
np_sr_image = stich_together(np_sr_image, padded_image_shape=padded_size_scaled, np_sr_image = stich_together(
target_shape=scaled_image_shape, padding_size=padding * scale) np_sr_image, padded_image_shape=padded_size_scaled,
target_shape=scaled_image_shape, padding_size=padding * scale
)
sr_img = (np_sr_image*255).astype(np.uint8) sr_img = (np_sr_image*255).astype(np.uint8)
sr_img = unpad_image(sr_img, pad_size*scale) sr_img = unpad_image(sr_img, pad_size*scale)
sr_img = Image.fromarray(sr_img) sr_img = Image.fromarray(sr_img)
......
...@@ -3,7 +3,6 @@ import torch ...@@ -3,7 +3,6 @@ import torch
from PIL import Image from PIL import Image
import os import os
import io import io
import imageio
def pad_reflect(image, pad_size): def pad_reflect(image, pad_size):
imsize = image.shape imsize = image.shape
...@@ -22,13 +21,6 @@ def unpad_image(image, pad_size): ...@@ -22,13 +21,6 @@ def unpad_image(image, pad_size):
return image[pad_size:-pad_size, pad_size:-pad_size, :] return image[pad_size:-pad_size, pad_size:-pad_size, :]
def jpegBlur(im,q):
buf = io.BytesIO()
imageio.imwrite(buf,im,format='jpg',quality=q)
s = buf.getbuffer()
return imageio.imread(s,format='jpg')
def process_array(image_array, expand=True): def process_array(image_array, expand=True):
""" Process a 3-dimensional array into a scaled, 4 dimensional batch of size 1. """ """ Process a 3-dimensional array into a scaled, 4 dimensional batch of size 1. """
......
...@@ -3,5 +3,4 @@ opencv-python ...@@ -3,5 +3,4 @@ opencv-python
Pillow Pillow
torch>=1.7 torch>=1.7
torchvision>=0.8.0 torchvision>=0.8.0
tqdm tqdm
imageio \ No newline at end of file
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment