base.py 9.18 KB
Newer Older
Zhang's avatar
v0.4.2  
Zhang committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
###########################################################################
# Created by: Hang Zhang 
# Email: zhang.hang@rutgers.edu 
# Copyright (c) 2017
###########################################################################

import math
import numpy as np

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parallel.data_parallel import DataParallel
from torch.nn.parallel.parallel_apply import parallel_apply
from torch.nn.parallel.scatter_gather import scatter

Hang Zhang's avatar
Hang Zhang committed
17
from . import resnet
Zhang's avatar
v0.4.2  
Zhang committed
18
19
20
21
from ..utils import batch_pix_accuracy, batch_intersection_union

up_kwargs = {'mode': 'bilinear', 'align_corners': True}

22
__all__ = ['BaseNet', 'MultiEvalModule']
Zhang's avatar
v0.4.2  
Zhang committed
23
24
25

class BaseNet(nn.Module):
    def __init__(self, nclass, backbone, aux, se_loss, dilated=True, norm_layer=None,
Hang Zhang's avatar
Hang Zhang committed
26
                 base_size=520, crop_size=480, mean=[.485, .456, .406],
Hang Zhang's avatar
Hang Zhang committed
27
                 std=[.229, .224, .225], root='~/.encoding/models'):
Zhang's avatar
v0.4.2  
Zhang committed
28
29
30
31
32
33
        super(BaseNet, self).__init__()
        self.nclass = nclass
        self.aux = aux
        self.se_loss = se_loss
        self.mean = mean
        self.std = std
Hang Zhang's avatar
Hang Zhang committed
34
35
        self.base_size = base_size
        self.crop_size = crop_size
Zhang's avatar
v0.4.2  
Zhang committed
36
        # copying modules from pretrained models
Hang Zhang's avatar
Hang Zhang committed
37
        self.backbone = backbone
Zhang's avatar
v0.4.2  
Zhang committed
38
        if backbone == 'resnet50':
Hang Zhang's avatar
Hang Zhang committed
39
40
            self.pretrained = resnet.resnet50(pretrained=True, dilated=dilated,
                                              norm_layer=norm_layer, root=root)
Zhang's avatar
v0.4.2  
Zhang committed
41
        elif backbone == 'resnet101':
Hang Zhang's avatar
Hang Zhang committed
42
43
            self.pretrained = resnet.resnet101(pretrained=True, dilated=dilated,
                                               norm_layer=norm_layer, root=root)
Zhang's avatar
v0.4.2  
Zhang committed
44
        elif backbone == 'resnet152':
Hang Zhang's avatar
Hang Zhang committed
45
46
            self.pretrained = resnet.resnet152(pretrained=True, dilated=dilated,
                                               norm_layer=norm_layer, root=root)
Zhang's avatar
v0.4.2  
Zhang committed
47
48
49
50
51
52
        else:
            raise RuntimeError('unknown backbone: {}'.format(backbone))
        # bilinear upsample options
        self._up_kwargs = up_kwargs

    def base_forward(self, x):
Hang Zhang's avatar
Hang Zhang committed
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
        if self.backbone.startswith('wideresnet'):
            x = self.pretrained.mod1(x)
            x = self.pretrained.pool2(x)
            x = self.pretrained.mod2(x)
            x = self.pretrained.pool3(x)
            x = self.pretrained.mod3(x)
            x = self.pretrained.mod4(x)
            x = self.pretrained.mod5(x)
            c3 = x.clone()
            x = self.pretrained.mod6(x)
            x = self.pretrained.mod7(x)
            x = self.pretrained.bn_out(x)
            return None, None, c3, x
        else:
            x = self.pretrained.conv1(x)
            x = self.pretrained.bn1(x)
            x = self.pretrained.relu(x)
            x = self.pretrained.maxpool(x)
            c1 = self.pretrained.layer1(x)
            c2 = self.pretrained.layer2(c1)
            c3 = self.pretrained.layer3(c2)
            c4 = self.pretrained.layer4(c3)
Zhang's avatar
v0.4.2  
Zhang committed
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
        return c1, c2, c3, c4

    def evaluate(self, x, target=None):
        pred = self.forward(x)
        if isinstance(pred, (tuple, list)):
            pred = pred[0]
        if target is None:
            return pred
        correct, labeled = batch_pix_accuracy(pred.data, target.data)
        inter, union = batch_intersection_union(pred.data, target.data, self.nclass)
        return correct, labeled, inter, union


class MultiEvalModule(DataParallel):
    """Multi-size Segmentation Eavluator"""
Hang Zhang's avatar
Hang Zhang committed
90
    def __init__(self, module, nclass, device_ids=None, flip=True,
Zhang's avatar
v0.4.2  
Zhang committed
91
92
93
                 scales=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75]):
        super(MultiEvalModule, self).__init__(module, device_ids)
        self.nclass = nclass
Hang Zhang's avatar
Hang Zhang committed
94
95
        self.base_size = module.base_size
        self.crop_size = module.crop_size
Zhang's avatar
v0.4.2  
Zhang committed
96
97
        self.scales = scales
        self.flip = flip
Hang Zhang's avatar
Hang Zhang committed
98
99
        print('MultiEvalModule: base_size {}, crop_size {}'. \
            format(self.base_size, self.crop_size))
Zhang's avatar
v0.4.2  
Zhang committed
100
101
102
103
104
105
106

    def parallel_forward(self, inputs, **kwargs):
        """Multi-GPU Mult-size Evaluation

        Args:
            inputs: list of Tensors
        """
Hang Zhang's avatar
Hang Zhang committed
107
108
        inputs = [(input.unsqueeze(0).cuda(device),)
                  for input, device in zip(inputs, self.device_ids)]
Zhang's avatar
v0.4.2  
Zhang committed
109
110
111
112
113
114
115
        replicas = self.replicate(self, self.device_ids[:len(inputs)])
        kwargs = scatter(kwargs, target_gpus, dim) if kwargs else []
        if len(inputs) < len(kwargs):
            inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
        elif len(kwargs) < len(inputs):
            kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
        outputs = self.parallel_apply(replicas, inputs, kwargs)
Hang Zhang's avatar
Hang Zhang committed
116
117
        #for out in outputs:
        #    print('out.size()', out.size())
Zhang's avatar
v0.4.2  
Zhang committed
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
        return outputs

    def forward(self, image):
        """Mult-size Evaluation"""
        # only single image is supported for evaluation
        batch, _, h, w = image.size()
        assert(batch == 1)
        stride_rate = 2.0/3.0
        crop_size = self.crop_size
        stride = int(crop_size * stride_rate)
        with torch.cuda.device_of(image):
            scores = image.new().resize_(batch,self.nclass,h,w).zero_().cuda()

        for scale in self.scales:
            long_size = int(math.ceil(self.base_size * scale))
            if h > w:
                height = long_size
                width = int(1.0 * w * long_size / h + 0.5)
                short_size = width
            else:
                width = long_size
                height = int(1.0 * h * long_size / w + 0.5)
                short_size = height
Hang Zhang's avatar
Hang Zhang committed
141
142
143
144
145
146
147
148
149
150
151
            """
            short_size = int(math.ceil(self.base_size * scale))
            if h > w:
                width = short_size
                height = int(1.0 * h * short_size / w)
                long_size = height
            else:
                height = short_size
                width = int(1.0 * w * short_size / h)
                long_size = width
            """
Zhang's avatar
v0.4.2  
Zhang committed
152
            # resize image to current size
153
154
            cur_img = resize_image(image, height, width, **self.module._up_kwargs)
            if long_size <= crop_size:
Zhang's avatar
v0.4.2  
Zhang committed
155
156
                pad_img = pad_image(cur_img, self.module.mean,
                                    self.module.std, crop_size)
157
                outputs = module_inference(self.module, pad_img, self.flip)
Zhang's avatar
v0.4.2  
Zhang committed
158
159
160
161
162
163
164
165
166
167
168
                outputs = crop_image(outputs, 0, height, 0, width)
            else:
                if short_size < crop_size:
                    # pad if needed
                    pad_img = pad_image(cur_img, self.module.mean,
                                        self.module.std, crop_size)
                else:
                    pad_img = cur_img
                _,_,ph,pw = pad_img.size()
                assert(ph >= height and pw >= width)
                # grid forward and normalize
Hang Zhang's avatar
Hang Zhang committed
169
170
                h_grids = int(math.ceil(1.0 * (ph-crop_size)/stride)) + 1
                w_grids = int(math.ceil(1.0 * (pw-crop_size)/stride)) + 1
Zhang's avatar
v0.4.2  
Zhang committed
171
172
173
174
175
176
177
178
179
180
181
182
183
184
                with torch.cuda.device_of(image):
                    outputs = image.new().resize_(batch,self.nclass,ph,pw).zero_().cuda()
                    count_norm = image.new().resize_(batch,1,ph,pw).zero_().cuda()
                # grid evaluation
                for idh in range(h_grids):
                    for idw in range(w_grids):
                        h0 = idh * stride
                        w0 = idw * stride
                        h1 = min(h0 + crop_size, ph)
                        w1 = min(w0 + crop_size, pw)
                        crop_img = crop_image(pad_img, h0, h1, w0, w1)
                        # pad if needed
                        pad_crop_img = pad_image(crop_img, self.module.mean,
                                                 self.module.std, crop_size)
185
                        output = module_inference(self.module, pad_crop_img, self.flip)
Zhang's avatar
v0.4.2  
Zhang committed
186
187
188
189
190
191
192
                        outputs[:,:,h0:h1,w0:w1] += crop_image(output,
                            0, h1-h0, 0, w1-w0)
                        count_norm[:,:,h0:h1,w0:w1] += 1
                assert((count_norm==0).sum()==0)
                outputs = outputs / count_norm
                outputs = outputs[:,:,:height,:width]

193
            score = resize_image(outputs, h, w, **self.module._up_kwargs)
Zhang's avatar
v0.4.2  
Zhang committed
194
195
196
197
198
            scores += score

        return scores


199
200
201
202
203
204
205
def module_inference(module, image, flip=True):
    output = module.evaluate(image)
    if flip:
        fimg = flip_image(image)
        foutput = module.evaluate(fimg)
        output += flip_image(foutput)
    return output.exp()
Zhang's avatar
v0.4.2  
Zhang committed
206

207
def resize_image(img, h, w, **up_kwargs):
Hang Zhang's avatar
Hang Zhang committed
208
    return F.interpolate(img, (h, w), **up_kwargs)
Zhang's avatar
v0.4.2  
Zhang committed
209
210
211
212
213
214
215
216
217
218

def pad_image(img, mean, std, crop_size):
    b,c,h,w = img.size()
    assert(c==3)
    padh = crop_size - h if h < crop_size else 0
    padw = crop_size - w if w < crop_size else 0
    pad_values = -np.array(mean) / np.array(std)
    img_pad = img.new().resize_(b,c,h+padh,w+padw)
    for i in range(c):
        # note that pytorch pad params is in reversed orders
219
        img_pad[:,i,:,:] = F.pad(img[:,i,:,:], (0, padw, 0, padh), value=pad_values[i])
Zhang's avatar
v0.4.2  
Zhang committed
220
221
222
223
224
225
226
227
228
229
230
    assert(img_pad.size(2)>=crop_size and img_pad.size(3)>=crop_size)
    return img_pad

def crop_image(img, h0, h1, w0, w1):
    return img[:,:,h0:h1,w0:w1]

def flip_image(img):
    assert(img.dim()==4)
    with torch.cuda.device_of(img):
        idx = torch.arange(img.size(3)-1, -1, -1).type_as(img).long()
    return img.index_select(3, idx)