import torch.nn as nn norm_cfg = { # format: layer_type: (abbreviation, module) 'BN': ('bn', nn.BatchNorm2d), 'SyncBN': ('bn', None), 'GN': ('gn', nn.GroupNorm), # and potentially 'SN' } def build_norm_layer(cfg, num_features, postfix=''): """ Build normalization layer Args: cfg (dict): cfg should contain: type (str): identify norm layer type. layer args: args needed to instantiate a norm layer. frozen (bool): [optional] whether stop gradient updates of norm layer, it is helpful to set frozen mode in backbone's norms. num_features (int): number of channels from input postfix (int, str): appended into norm abbreation to create named layer. Returns: name (str): abbreation + postfix layer (nn.Module): created norm layer """ assert isinstance(cfg, dict) and 'type' in cfg cfg_ = cfg.copy() # eval_mode is supported and popped out for processing in module # having pretrained weight only (e.g. backbone) # raise an exception if eval_mode is in here if 'eval_mode' in cfg: raise Exception('eval_mode for modules without pretrained weights ' 'is not supported') layer_type = cfg_.pop('type') if layer_type not in norm_cfg: raise KeyError('Unrecognized norm type {}'.format(layer_type)) else: abbr, norm_layer = norm_cfg[layer_type] if norm_layer is None: raise NotImplementedError assert isinstance(postfix, (int, str)) name = abbr + str(postfix) frozen = cfg_.pop('frozen', False) cfg_.setdefault('eps', 1e-5) if layer_type != 'GN': layer = norm_layer(num_features, **cfg_) else: assert 'num_groups' in cfg_ layer = norm_layer(num_channels=num_features, **cfg_) if frozen: for param in layer.parameters(): param.requires_grad = False return name, layer