import torch.nn as nn def get_parameters(model, keys=None, mode='include'): if keys is None: for name, param in model.named_parameters(): yield param elif mode == 'include': for name, param in model.named_parameters(): flag = False for key in keys: if key in name: flag = True break if flag: yield param elif mode == 'exclude': for name, param in model.named_parameters(): flag = True for key in keys: if key in name: flag = False break if flag: yield param else: raise ValueError('do not support: %s' % mode) def get_same_padding(kernel_size): if isinstance(kernel_size, tuple): assert len(kernel_size) == 2, 'invalid kernel size: %s' % kernel_size p1 = get_same_padding(kernel_size[0]) p2 = get_same_padding(kernel_size[1]) return p1, p2 assert isinstance(kernel_size, int), 'kernel size should be either `int` or `tuple`' assert kernel_size % 2 > 0, 'kernel size should be odd number' return kernel_size // 2 def build_activation(act_func, inplace=True): if act_func == 'relu': return nn.ReLU(inplace=inplace) elif act_func == 'relu6': return nn.ReLU6(inplace=inplace) elif act_func == 'tanh': return nn.Tanh() elif act_func == 'sigmoid': return nn.Sigmoid() elif act_func is None: return None else: raise ValueError('do not support: %s' % act_func) def make_divisible(v, divisor, min_val=None): """ This function is taken from the original tf repo. It ensures that all layers have a channel number that is divisible by 8 It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py """ if min_val is None: min_val = divisor new_v = max(min_val, int(v + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if new_v < 0.9 * v: new_v += divisor return new_v