Unverified Commit d367a01a authored by Jirka Borovec's avatar Jirka Borovec Committed by GitHub
Browse files

Use f-strings almost everywhere, and other cleanups by applying pyupgrade (#4585)


Co-authored-by: default avatarNicolas Hug <nicolashug@fb.com>
parent 50dfe207
......@@ -278,7 +278,7 @@ def _mobilenet_v3_conf(
]
last_channel = adjust_channels(1024 // reduce_divider) # C5
else:
raise ValueError("Unsupported model type {}".format(arch))
raise ValueError(f"Unsupported model type {arch}")
return inverted_residual_setting, last_channel
......@@ -294,7 +294,7 @@ def _mobilenet_v3(
model = MobileNetV3(inverted_residual_setting, last_channel, **kwargs)
if pretrained:
if model_urls.get(arch, None) is None:
raise ValueError("No checkpoint is available for model type {}".format(arch))
raise ValueError(f"No checkpoint is available for model type {arch}")
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
......
......@@ -49,7 +49,7 @@ def googlenet(
kwargs["aux_logits"] = False
if kwargs["aux_logits"]:
warnings.warn(
"auxiliary heads in the pretrained googlenet model are NOT pretrained, " "so make sure to train them"
"auxiliary heads in the pretrained googlenet model are NOT pretrained, so make sure to train them"
)
original_aux_logits = kwargs["aux_logits"]
kwargs["aux_logits"] = True
......@@ -67,7 +67,7 @@ def googlenet(
if pretrained:
if quantize:
model_url = quant_model_urls["googlenet" + "_" + backend]
model_url = quant_model_urls["googlenet_" + backend]
else:
model_url = model_urls["googlenet"]
......@@ -84,7 +84,7 @@ def googlenet(
class QuantizableBasicConv2d(BasicConv2d):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(QuantizableBasicConv2d, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
self.relu = nn.ReLU()
def forward(self, x: Tensor) -> Tensor:
......@@ -99,9 +99,7 @@ class QuantizableBasicConv2d(BasicConv2d):
class QuantizableInception(Inception):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(QuantizableInception, self).__init__( # type: ignore[misc]
conv_block=QuantizableBasicConv2d, *args, **kwargs
)
super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc]
self.cat = nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
......@@ -112,9 +110,7 @@ class QuantizableInception(Inception):
class QuantizableInceptionAux(InceptionAux):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(QuantizableInceptionAux, self).__init__( # type: ignore[misc]
conv_block=QuantizableBasicConv2d, *args, **kwargs
)
super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc]
self.relu = nn.ReLU()
def forward(self, x: Tensor) -> Tensor:
......@@ -138,7 +134,7 @@ class QuantizableInceptionAux(InceptionAux):
class QuantizableGoogLeNet(GoogLeNet):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(QuantizableGoogLeNet, self).__init__( # type: ignore[misc]
super().__init__( # type: ignore[misc]
blocks=[QuantizableBasicConv2d, QuantizableInception, QuantizableInceptionAux], *args, **kwargs
)
self.quant = torch.quantization.QuantStub()
......
......@@ -75,7 +75,7 @@ def inception_v3(
if not original_aux_logits:
model.aux_logits = False
model.AuxLogits = None
model_url = quant_model_urls["inception_v3_google" + "_" + backend]
model_url = quant_model_urls["inception_v3_google_" + backend]
else:
model_url = inception_module.model_urls["inception_v3_google"]
......@@ -92,7 +92,7 @@ def inception_v3(
class QuantizableBasicConv2d(inception_module.BasicConv2d):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(QuantizableBasicConv2d, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
self.relu = nn.ReLU()
def forward(self, x: Tensor) -> Tensor:
......@@ -108,9 +108,7 @@ class QuantizableBasicConv2d(inception_module.BasicConv2d):
class QuantizableInceptionA(inception_module.InceptionA):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(QuantizableInceptionA, self).__init__( # type: ignore[misc]
conv_block=QuantizableBasicConv2d, *args, **kwargs
)
super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc]
self.myop = nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
......@@ -121,9 +119,7 @@ class QuantizableInceptionA(inception_module.InceptionA):
class QuantizableInceptionB(inception_module.InceptionB):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(QuantizableInceptionB, self).__init__( # type: ignore[misc]
conv_block=QuantizableBasicConv2d, *args, **kwargs
)
super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc]
self.myop = nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
......@@ -134,9 +130,7 @@ class QuantizableInceptionB(inception_module.InceptionB):
class QuantizableInceptionC(inception_module.InceptionC):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(QuantizableInceptionC, self).__init__( # type: ignore[misc]
conv_block=QuantizableBasicConv2d, *args, **kwargs
)
super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc]
self.myop = nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
......@@ -147,9 +141,7 @@ class QuantizableInceptionC(inception_module.InceptionC):
class QuantizableInceptionD(inception_module.InceptionD):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(QuantizableInceptionD, self).__init__( # type: ignore[misc]
conv_block=QuantizableBasicConv2d, *args, **kwargs
)
super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc]
self.myop = nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
......@@ -160,9 +152,7 @@ class QuantizableInceptionD(inception_module.InceptionD):
class QuantizableInceptionE(inception_module.InceptionE):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(QuantizableInceptionE, self).__init__( # type: ignore[misc]
conv_block=QuantizableBasicConv2d, *args, **kwargs
)
super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc]
self.myop1 = nn.quantized.FloatFunctional()
self.myop2 = nn.quantized.FloatFunctional()
self.myop3 = nn.quantized.FloatFunctional()
......@@ -196,9 +186,7 @@ class QuantizableInceptionE(inception_module.InceptionE):
class QuantizableInceptionAux(inception_module.InceptionAux):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(QuantizableInceptionAux, self).__init__( # type: ignore[misc]
conv_block=QuantizableBasicConv2d, *args, **kwargs
)
super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc]
class QuantizableInception3(inception_module.Inception3):
......@@ -208,7 +196,7 @@ class QuantizableInception3(inception_module.Inception3):
aux_logits: bool = True,
transform_input: bool = False,
) -> None:
super(QuantizableInception3, self).__init__(
super().__init__(
num_classes=num_classes,
aux_logits=aux_logits,
transform_input=transform_input,
......
......@@ -19,7 +19,7 @@ quant_model_urls = {
class QuantizableInvertedResidual(InvertedResidual):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(QuantizableInvertedResidual, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
self.skip_add = nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
......@@ -42,7 +42,7 @@ class QuantizableMobileNetV2(MobileNetV2):
Args:
Inherits args from floating point MobileNetV2
"""
super(QuantizableMobileNetV2, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
self.quant = QuantStub()
self.dequant = DeQuantStub()
......
......@@ -110,7 +110,7 @@ class QuantizableMobileNetV3(MobileNetV3):
def _load_weights(arch: str, model: QuantizableMobileNetV3, model_url: Optional[str], progress: bool) -> None:
if model_url is None:
raise ValueError("No checkpoint is available for {}".format(arch))
raise ValueError(f"No checkpoint is available for {arch}")
state_dict = load_state_dict_from_url(model_url, progress=progress)
model.load_state_dict(state_dict)
......
......@@ -21,7 +21,7 @@ quant_model_urls = {
class QuantizableBasicBlock(BasicBlock):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(QuantizableBasicBlock, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
self.add_relu = torch.nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
......@@ -49,7 +49,7 @@ class QuantizableBasicBlock(BasicBlock):
class QuantizableBottleneck(Bottleneck):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(QuantizableBottleneck, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
self.skip_add_relu = nn.quantized.FloatFunctional()
self.relu1 = nn.ReLU(inplace=False)
self.relu2 = nn.ReLU(inplace=False)
......@@ -80,7 +80,7 @@ class QuantizableBottleneck(Bottleneck):
class QuantizableResNet(ResNet):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(QuantizableResNet, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
self.quant = torch.quantization.QuantStub()
self.dequant = torch.quantization.DeQuantStub()
......
......@@ -26,7 +26,7 @@ quant_model_urls = {
class QuantizableInvertedResidual(shufflenetv2.InvertedResidual):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(QuantizableInvertedResidual, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
self.cat = nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
......@@ -44,9 +44,7 @@ class QuantizableInvertedResidual(shufflenetv2.InvertedResidual):
class QuantizableShuffleNetV2(shufflenetv2.ShuffleNetV2):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(QuantizableShuffleNetV2, self).__init__( # type: ignore[misc]
*args, inverted_residual=QuantizableInvertedResidual, **kwargs
)
super().__init__(*args, inverted_residual=QuantizableInvertedResidual, **kwargs) # type: ignore[misc]
self.quant = torch.quantization.QuantStub()
self.dequant = torch.quantization.DeQuantStub()
......
......@@ -68,7 +68,7 @@ class BasicBlock(nn.Module):
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super(BasicBlock, self).__init__()
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
......@@ -123,7 +123,7 @@ class Bottleneck(nn.Module):
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super(Bottleneck, self).__init__()
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.0)) * groups
......@@ -173,7 +173,7 @@ class ResNet(nn.Module):
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super(ResNet, self).__init__()
super().__init__()
_log_api_usage_once(self)
if norm_layer is None:
norm_layer = nn.BatchNorm2d
......@@ -188,7 +188,7 @@ class ResNet(nn.Module):
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation)
f"or a 3-element tuple, got {replace_stride_with_dilation}"
)
self.groups = groups
self.base_width = width_per_group
......
......@@ -11,7 +11,7 @@ class _SimpleSegmentationModel(nn.Module):
__constants__ = ["aux_classifier"]
def __init__(self, backbone: nn.Module, classifier: nn.Module, aux_classifier: Optional[nn.Module] = None) -> None:
super(_SimpleSegmentationModel, self).__init__()
super().__init__()
self.backbone = backbone
self.classifier = classifier
self.aux_classifier = aux_classifier
......@@ -38,6 +38,6 @@ class _SimpleSegmentationModel(nn.Module):
def _load_weights(arch: str, model: nn.Module, model_url: Optional[str], progress: bool) -> None:
if model_url is None:
raise ValueError("No checkpoint is available for {}".format(arch))
raise ValueError(f"No checkpoint is available for {arch}")
state_dict = load_state_dict_from_url(model_url, progress=progress)
model.load_state_dict(state_dict)
......@@ -47,7 +47,7 @@ class DeepLabV3(_SimpleSegmentationModel):
class DeepLabHead(nn.Sequential):
def __init__(self, in_channels: int, num_classes: int) -> None:
super(DeepLabHead, self).__init__(
super().__init__(
ASPP(in_channels, [12, 24, 36]),
nn.Conv2d(256, 256, 3, padding=1, bias=False),
nn.BatchNorm2d(256),
......@@ -63,12 +63,12 @@ class ASPPConv(nn.Sequential):
nn.BatchNorm2d(out_channels),
nn.ReLU(),
]
super(ASPPConv, self).__init__(*modules)
super().__init__(*modules)
class ASPPPooling(nn.Sequential):
def __init__(self, in_channels: int, out_channels: int) -> None:
super(ASPPPooling, self).__init__(
super().__init__(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
......@@ -84,7 +84,7 @@ class ASPPPooling(nn.Sequential):
class ASPP(nn.Module):
def __init__(self, in_channels: int, atrous_rates: List[int], out_channels: int = 256) -> None:
super(ASPP, self).__init__()
super().__init__()
modules = []
modules.append(
nn.Sequential(nn.Conv2d(in_channels, out_channels, 1, bias=False), nn.BatchNorm2d(out_channels), nn.ReLU())
......
......@@ -44,7 +44,7 @@ class FCNHead(nn.Sequential):
nn.Conv2d(inter_channels, channels, 1),
]
super(FCNHead, self).__init__(*layers)
super().__init__(*layers)
def _fcn_resnet(
......
......@@ -35,7 +35,7 @@ def channel_shuffle(x: Tensor, groups: int) -> Tensor:
class InvertedResidual(nn.Module):
def __init__(self, inp: int, oup: int, stride: int) -> None:
super(InvertedResidual, self).__init__()
super().__init__()
if not (1 <= stride <= 3):
raise ValueError("illegal stride value")
......@@ -99,7 +99,7 @@ class ShuffleNetV2(nn.Module):
num_classes: int = 1000,
inverted_residual: Callable[..., nn.Module] = InvertedResidual,
) -> None:
super(ShuffleNetV2, self).__init__()
super().__init__()
_log_api_usage_once(self)
if len(stages_repeats) != 3:
......@@ -123,7 +123,7 @@ class ShuffleNetV2(nn.Module):
self.stage2: nn.Sequential
self.stage3: nn.Sequential
self.stage4: nn.Sequential
stage_names = ["stage{}".format(i) for i in [2, 3, 4]]
stage_names = [f"stage{i}" for i in [2, 3, 4]]
for name, repeats, output_channels in zip(stage_names, stages_repeats, self._stage_out_channels[1:]):
seq = [inverted_residual(input_channels, output_channels, 2)]
for i in range(repeats - 1):
......@@ -162,7 +162,7 @@ def _shufflenetv2(arch: str, pretrained: bool, progress: bool, *args: Any, **kwa
if pretrained:
model_url = model_urls[arch]
if model_url is None:
raise NotImplementedError("pretrained {} is not supported as of now".format(arch))
raise NotImplementedError(f"pretrained {arch} is not supported as of now")
else:
state_dict = load_state_dict_from_url(model_url, progress=progress)
model.load_state_dict(state_dict)
......
......@@ -17,7 +17,7 @@ model_urls = {
class Fire(nn.Module):
def __init__(self, inplanes: int, squeeze_planes: int, expand1x1_planes: int, expand3x3_planes: int) -> None:
super(Fire, self).__init__()
super().__init__()
self.inplanes = inplanes
self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1)
self.squeeze_activation = nn.ReLU(inplace=True)
......@@ -35,7 +35,7 @@ class Fire(nn.Module):
class SqueezeNet(nn.Module):
def __init__(self, version: str = "1_0", num_classes: int = 1000, dropout: float = 0.5) -> None:
super(SqueezeNet, self).__init__()
super().__init__()
_log_api_usage_once(self)
self.num_classes = num_classes
if version == "1_0":
......@@ -74,7 +74,7 @@ class SqueezeNet(nn.Module):
# FIXME: Is this needed? SqueezeNet should only be called from the
# FIXME: squeezenet1_x() functions
# FIXME: This checking is not done for the other models
raise ValueError("Unsupported SqueezeNet version {version}:" "1_0 or 1_1 expected".format(version=version))
raise ValueError(f"Unsupported SqueezeNet version {version}: 1_0 or 1_1 expected")
# Final convolution is initialized differently from the rest
final_conv = nn.Conv2d(512, self.num_classes, kernel_size=1)
......
......@@ -36,7 +36,7 @@ class VGG(nn.Module):
def __init__(
self, features: nn.Module, num_classes: int = 1000, init_weights: bool = True, dropout: float = 0.5
) -> None:
super(VGG, self).__init__()
super().__init__()
_log_api_usage_once(self)
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
......
......@@ -20,7 +20,7 @@ class Conv3DSimple(nn.Conv3d):
self, in_planes: int, out_planes: int, midplanes: Optional[int] = None, stride: int = 1, padding: int = 1
) -> None:
super(Conv3DSimple, self).__init__(
super().__init__(
in_channels=in_planes,
out_channels=out_planes,
kernel_size=(3, 3, 3),
......@@ -36,7 +36,7 @@ class Conv3DSimple(nn.Conv3d):
class Conv2Plus1D(nn.Sequential):
def __init__(self, in_planes: int, out_planes: int, midplanes: int, stride: int = 1, padding: int = 1) -> None:
super(Conv2Plus1D, self).__init__(
super().__init__(
nn.Conv3d(
in_planes,
midplanes,
......@@ -62,7 +62,7 @@ class Conv3DNoTemporal(nn.Conv3d):
self, in_planes: int, out_planes: int, midplanes: Optional[int] = None, stride: int = 1, padding: int = 1
) -> None:
super(Conv3DNoTemporal, self).__init__(
super().__init__(
in_channels=in_planes,
out_channels=out_planes,
kernel_size=(1, 3, 3),
......@@ -90,7 +90,7 @@ class BasicBlock(nn.Module):
) -> None:
midplanes = (inplanes * planes * 3 * 3 * 3) // (inplanes * 3 * 3 + 3 * planes)
super(BasicBlock, self).__init__()
super().__init__()
self.conv1 = nn.Sequential(
conv_builder(inplanes, planes, midplanes, stride), nn.BatchNorm3d(planes), nn.ReLU(inplace=True)
)
......@@ -125,7 +125,7 @@ class Bottleneck(nn.Module):
downsample: Optional[nn.Module] = None,
) -> None:
super(Bottleneck, self).__init__()
super().__init__()
midplanes = (inplanes * planes * 3 * 3 * 3) // (inplanes * 3 * 3 + 3 * planes)
# 1x1x1
......@@ -166,7 +166,7 @@ class BasicStem(nn.Sequential):
"""The default conv-batchnorm-relu stem"""
def __init__(self) -> None:
super(BasicStem, self).__init__(
super().__init__(
nn.Conv3d(3, 64, kernel_size=(3, 7, 7), stride=(1, 2, 2), padding=(1, 3, 3), bias=False),
nn.BatchNorm3d(64),
nn.ReLU(inplace=True),
......@@ -177,7 +177,7 @@ class R2Plus1dStem(nn.Sequential):
"""R(2+1)D stem is different than the default one as it uses separated 3D convolution"""
def __init__(self) -> None:
super(R2Plus1dStem, self).__init__(
super().__init__(
nn.Conv3d(3, 45, kernel_size=(1, 7, 7), stride=(1, 2, 2), padding=(0, 3, 3), bias=False),
nn.BatchNorm3d(45),
nn.ReLU(inplace=True),
......@@ -208,7 +208,7 @@ class VideoResNet(nn.Module):
num_classes (int, optional): Dimension of the final FC layer. Defaults to 400.
zero_init_residual (bool, optional): Zero init bottleneck residual BN. Defaults to False.
"""
super(VideoResNet, self).__init__()
super().__init__()
_log_api_usage_once(self)
self.inplanes = 64
......
......@@ -38,7 +38,7 @@ def _register_custom_op():
# ONNX doesn't support negative sampling_ratio
if sampling_ratio < 0:
warnings.warn(
"ONNX doesn't support negative sampling ratio," "therefore is is set to 0 in order to be exported."
"ONNX doesn't support negative sampling ratio, therefore is is set to 0 in order to be exported."
)
sampling_ratio = 0
return g.op(
......
......@@ -83,9 +83,7 @@ def deform_conv2d(
raise RuntimeError(
"the shape of the offset tensor at dimension 1 is not valid. It should "
"be a multiple of 2 * weight.size[2] * weight.size[3].\n"
"Got offset.shape[1]={}, while 2 * weight.size[2] * weight.size[3]={}".format(
offset.shape[1], 2 * weights_h * weights_w
)
f"Got offset.shape[1]={offset.shape[1]}, while 2 * weight.size[2] * weight.size[3]={2 * weights_h * weights_w}"
)
return torch.ops.torchvision.deform_conv2d(
......@@ -122,7 +120,7 @@ class DeformConv2d(nn.Module):
groups: int = 1,
bias: bool = True,
):
super(DeformConv2d, self).__init__()
super().__init__()
if in_channels % groups != 0:
raise ValueError("in_channels must be divisible by groups")
......
......@@ -74,7 +74,7 @@ class FeaturePyramidNetwork(nn.Module):
out_channels: int,
extra_blocks: Optional[ExtraFPNBlock] = None,
):
super(FeaturePyramidNetwork, self).__init__()
super().__init__()
self.inner_blocks = nn.ModuleList()
self.layer_blocks = nn.ModuleList()
for in_channels in in_channels_list:
......@@ -180,7 +180,7 @@ class LastLevelP6P7(ExtraFPNBlock):
"""
def __init__(self, in_channels: int, out_channels: int):
super(LastLevelP6P7, self).__init__()
super().__init__()
self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1)
self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1)
for module in [self.p6, self.p7]:
......
......@@ -65,7 +65,7 @@ class FrozenBatchNorm2d(torch.nn.Module):
if n is not None:
warnings.warn("`n` argument is deprecated and has been renamed `num_features`", DeprecationWarning)
num_features = n
super(FrozenBatchNorm2d, self).__init__()
super().__init__()
self.eps = eps
self.register_buffer("weight", torch.ones(num_features))
self.register_buffer("bias", torch.zeros(num_features))
......@@ -86,7 +86,7 @@ class FrozenBatchNorm2d(torch.nn.Module):
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super(FrozenBatchNorm2d, self)._load_from_state_dict(
super()._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
)
......
......@@ -42,7 +42,7 @@ def initLevelMapper(
return LevelMapper(k_min, k_max, canonical_scale, canonical_level, eps)
class LevelMapper(object):
class LevelMapper:
"""Determine which FPN level each RoI in a set of RoIs should map to based
on the heuristic in the FPN paper.
......@@ -129,7 +129,7 @@ class MultiScaleRoIAlign(nn.Module):
canonical_scale: int = 224,
canonical_level: int = 4,
):
super(MultiScaleRoIAlign, self).__init__()
super().__init__()
if isinstance(output_size, int):
output_size = (output_size, output_size)
self.featmap_names = featmap_names
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment