"examples/offline_inference/neuron.py" did not exist on "8cb87a8a443db571f92ec74a7d3f5043bcc5c28f"
model.py 2.26 KB
Newer Older
boomb0om's avatar
boomb0om committed
1
2
3
4
5
6
import torch
from torch.nn import functional as F
from PIL import Image
import numpy as np
import cv2

boomb0om's avatar
boomb0om committed
7
from .rrdbnet_arch import RRDBNet
boomb0om's avatar
boomb0om committed
8
9
from .utils import pad_reflect, split_image_into_overlapping_patches, stich_together, \
                   unpad_image
boomb0om's avatar
boomb0om committed
10
11
12
13
14
15


class RealESRGAN:
    def __init__(self, device, scale=4):
        self.device = device
        self.scale = scale
boomb0om's avatar
boomb0om committed
16
17
18
19
        self.model = RRDBNet(
            num_in_ch=3, num_out_ch=3, num_feat=64, 
            num_block=23, num_grow_ch=32, scale=scale
        )
boomb0om's avatar
boomb0om committed
20
21
22
23
24
25
26
27
28
29
30
31
        
    def load_weights(self, model_path):
        loadnet = torch.load(model_path)
        if 'params' in loadnet:
            self.model.load_state_dict(loadnet['params'], strict=True)
        elif 'params_ema' in loadnet:
            self.model.load_state_dict(loadnet['params_ema'], strict=True)
        else:
            self.model.load_state_dict(loadnet, strict=True)
        self.model.eval()
        self.model.to(self.device)
        
boomb0om's avatar
boomb0om committed
32
    @torch.cuda.amp.autocast()
boomb0om's avatar
boomb0om committed
33
34
35
36
37
38
39
    def predict(self, lr_image, batch_size=4, patches_size=192,
                padding=24, pad_size=15):
        scale = self.scale
        device = self.device
        lr_image = np.array(lr_image)
        lr_image = pad_reflect(lr_image, pad_size)

boomb0om's avatar
boomb0om committed
40
41
42
        patches, p_shape = split_image_into_overlapping_patches(
            lr_image, patch_size=patches_size, padding_size=padding
        )
boomb0om's avatar
boomb0om committed
43
44
45
46
47
48
49
        img = torch.FloatTensor(patches/255).permute((0,3,1,2)).to(device).detach()

        with torch.no_grad():
            res = self.model(img[0:batch_size])
            for i in range(batch_size, img.shape[0], batch_size):
                res = torch.cat((res, self.model(img[i:i+batch_size])), 0)

boomb0om's avatar
boomb0om committed
50
        sr_image = res.permute((0,2,3,1)).clamp_(0, 1).cpu()
boomb0om's avatar
boomb0om committed
51
52
53
54
        np_sr_image = sr_image.numpy()

        padded_size_scaled = tuple(np.multiply(p_shape[0:2], scale)) + (3,)
        scaled_image_shape = tuple(np.multiply(lr_image.shape[0:2], scale)) + (3,)
boomb0om's avatar
boomb0om committed
55
56
57
58
        np_sr_image = stich_together(
            np_sr_image, padded_image_shape=padded_size_scaled, 
            target_shape=scaled_image_shape, padding_size=padding * scale
        )
boomb0om's avatar
boomb0om committed
59
60
61
62
63
        sr_img = (np_sr_image*255).astype(np.uint8)
        sr_img = unpad_image(sr_img, pad_size*scale)
        sr_img = Image.fromarray(sr_img)

        return sr_img