Unverified Commit d367a01a authored by Jirka Borovec's avatar Jirka Borovec Committed by GitHub
Browse files

Use f-strings almost everywhere, and other cleanups by applying pyupgrade (#4585)


Co-authored-by: default avatarNicolas Hug <nicolashug@fb.com>
parent 50dfe207
......@@ -7,7 +7,7 @@ from torch import Tensor, nn
from torchvision.ops.misc import FrozenBatchNorm2d
class BalancedPositiveNegativeSampler(object):
class BalancedPositiveNegativeSampler:
"""
This class samples batches, ensuring that they contain a fixed proportion of positives
"""
......@@ -118,7 +118,7 @@ def encode_boxes(reference_boxes: Tensor, proposals: Tensor, weights: Tensor) ->
return targets
class BoxCoder(object):
class BoxCoder:
"""
This class encodes and decodes a set of bounding boxes into
the representation used for training the regressors.
......@@ -217,7 +217,7 @@ class BoxCoder(object):
return pred_boxes
class Matcher(object):
class Matcher:
"""
This class assigns to each predicted "element" (e.g., a box) a ground-truth
element. Each predicted element will have exactly zero or one matches; each
......@@ -275,9 +275,9 @@ class Matcher(object):
if match_quality_matrix.numel() == 0:
# empty targets or proposals not supported during training
if match_quality_matrix.shape[0] == 0:
raise ValueError("No ground-truth boxes available for one of the images " "during training")
raise ValueError("No ground-truth boxes available for one of the images during training")
else:
raise ValueError("No proposal boxes available for one of the images " "during training")
raise ValueError("No proposal boxes available for one of the images during training")
# match_quality_matrix is M (gt) x N (predicted)
# Max over gt elements (dim 0) to find best gt candidate for each prediction
......
......@@ -37,7 +37,7 @@ class AnchorGenerator(nn.Module):
sizes=((128, 256, 512),),
aspect_ratios=((0.5, 1.0, 2.0),),
):
super(AnchorGenerator, self).__init__()
super().__init__()
if not isinstance(sizes[0], (list, tuple)):
# TODO change this
......@@ -216,7 +216,8 @@ class DefaultBoxGenerator(nn.Module):
for k, f_k in enumerate(grid_sizes):
# Now add the default boxes for each width-height pair
if self.steps is not None:
x_f_k, y_f_k = [img_shape / self.steps[k] for img_shape in image_size]
x_f_k = image_size[0] / self.steps[k]
y_f_k = image_size[1] / self.steps[k]
else:
y_f_k, x_f_k = f_k
......
......@@ -37,7 +37,7 @@ class BackboneWithFPN(nn.Module):
out_channels: int,
extra_blocks: Optional[ExtraFPNBlock] = None,
) -> None:
super(BackboneWithFPN, self).__init__()
super().__init__()
if extra_blocks is None:
extra_blocks = LastLevelMaxPool()
......@@ -145,7 +145,7 @@ def _validate_trainable_layers(
warnings.warn(
"Changing trainable_backbone_layers has not effect if "
"neither pretrained nor pretrained_backbone have been set to True, "
"falling back to trainable_backbone_layers={} so that all layers are trainable".format(max_value)
f"falling back to trainable_backbone_layers={max_value} so that all layers are trainable"
)
trainable_backbone_layers = max_value
......
......@@ -195,7 +195,7 @@ class FasterRCNN(GeneralizedRCNN):
raise ValueError("num_classes should be None when box_predictor is specified")
else:
if box_predictor is None:
raise ValueError("num_classes should not be None when box_predictor " "is not specified")
raise ValueError("num_classes should not be None when box_predictor is not specified")
out_channels = backbone.out_channels
......@@ -255,7 +255,7 @@ class FasterRCNN(GeneralizedRCNN):
image_std = [0.229, 0.224, 0.225]
transform = GeneralizedRCNNTransform(min_size, max_size, image_mean, image_std)
super(FasterRCNN, self).__init__(backbone, rpn, roi_heads, transform)
super().__init__(backbone, rpn, roi_heads, transform)
class TwoMLPHead(nn.Module):
......@@ -268,7 +268,7 @@ class TwoMLPHead(nn.Module):
"""
def __init__(self, in_channels, representation_size):
super(TwoMLPHead, self).__init__()
super().__init__()
self.fc6 = nn.Linear(in_channels, representation_size)
self.fc7 = nn.Linear(representation_size, representation_size)
......@@ -293,7 +293,7 @@ class FastRCNNPredictor(nn.Module):
"""
def __init__(self, in_channels, num_classes):
super(FastRCNNPredictor, self).__init__()
super().__init__()
self.cls_score = nn.Linear(in_channels, num_classes)
self.bbox_pred = nn.Linear(in_channels, num_classes * 4)
......@@ -436,7 +436,7 @@ def _fasterrcnn_mobilenet_v3_large_fpn(
)
if pretrained:
if model_urls.get(weights_name, None) is None:
raise ValueError("No checkpoint is available for model {}".format(weights_name))
raise ValueError(f"No checkpoint is available for model {weights_name}")
state_dict = load_state_dict_from_url(model_urls[weights_name], progress=progress)
model.load_state_dict(state_dict)
return model
......
......@@ -26,7 +26,7 @@ class GeneralizedRCNN(nn.Module):
"""
def __init__(self, backbone, rpn, roi_heads, transform):
super(GeneralizedRCNN, self).__init__()
super().__init__()
_log_api_usage_once(self)
self.transform = transform
self.backbone = backbone
......@@ -65,11 +65,9 @@ class GeneralizedRCNN(nn.Module):
boxes = target["boxes"]
if isinstance(boxes, torch.Tensor):
if len(boxes.shape) != 2 or boxes.shape[-1] != 4:
raise ValueError(
"Expected target boxes to be a tensor" "of shape [N, 4], got {:}.".format(boxes.shape)
)
raise ValueError(f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.")
else:
raise ValueError("Expected target boxes to be of type " "Tensor, got {:}.".format(type(boxes)))
raise ValueError(f"Expected target boxes to be of type Tensor, got {type(boxes)}.")
original_image_sizes: List[Tuple[int, int]] = []
for img in images:
......@@ -91,7 +89,7 @@ class GeneralizedRCNN(nn.Module):
degen_bb: List[float] = boxes[bb_idx].tolist()
raise ValueError(
"All bounding boxes should have positive height and width."
" Found invalid box {} for target at index {}.".format(degen_bb, target_idx)
f" Found invalid box {degen_bb} for target at index {target_idx}."
)
features = self.backbone(images.tensors)
......
......@@ -4,7 +4,7 @@ import torch
from torch import Tensor
class ImageList(object):
class ImageList:
"""
Structure that holds a list of images (of possibly
varying sizes) as a single tensor.
......
......@@ -212,7 +212,7 @@ class KeypointRCNN(FasterRCNN):
keypoint_dim_reduced = 512 # == keypoint_layers[-1]
keypoint_predictor = KeypointRCNNPredictor(keypoint_dim_reduced, num_keypoints)
super(KeypointRCNN, self).__init__(
super().__init__(
backbone,
num_classes,
# transform parameters
......@@ -260,7 +260,7 @@ class KeypointRCNNHeads(nn.Sequential):
d.append(nn.Conv2d(next_feature, out_channels, 3, stride=1, padding=1))
d.append(nn.ReLU(inplace=True))
next_feature = out_channels
super(KeypointRCNNHeads, self).__init__(*d)
super().__init__(*d)
for m in self.children():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
......@@ -269,7 +269,7 @@ class KeypointRCNNHeads(nn.Sequential):
class KeypointRCNNPredictor(nn.Module):
def __init__(self, in_channels, num_keypoints):
super(KeypointRCNNPredictor, self).__init__()
super().__init__()
input_features = in_channels
deconv_kernel = 4
self.kps_score_lowres = nn.ConvTranspose2d(
......
......@@ -212,7 +212,7 @@ class MaskRCNN(FasterRCNN):
mask_dim_reduced = 256
mask_predictor = MaskRCNNPredictor(mask_predictor_in_channels, mask_dim_reduced, num_classes)
super(MaskRCNN, self).__init__(
super().__init__(
backbone,
num_classes,
# transform parameters
......@@ -263,13 +263,13 @@ class MaskRCNNHeads(nn.Sequential):
d = OrderedDict()
next_feature = in_channels
for layer_idx, layer_features in enumerate(layers, 1):
d["mask_fcn{}".format(layer_idx)] = nn.Conv2d(
d[f"mask_fcn{layer_idx}"] = nn.Conv2d(
next_feature, layer_features, kernel_size=3, stride=1, padding=dilation, dilation=dilation
)
d["relu{}".format(layer_idx)] = nn.ReLU(inplace=True)
d[f"relu{layer_idx}"] = nn.ReLU(inplace=True)
next_feature = layer_features
super(MaskRCNNHeads, self).__init__(d)
super().__init__(d)
for name, param in self.named_parameters():
if "weight" in name:
nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
......@@ -279,7 +279,7 @@ class MaskRCNNHeads(nn.Sequential):
class MaskRCNNPredictor(nn.Sequential):
def __init__(self, in_channels, dim_reduced, num_classes):
super(MaskRCNNPredictor, self).__init__(
super().__init__(
OrderedDict(
[
("conv5_mask", nn.ConvTranspose2d(in_channels, dim_reduced, 2, 2, 0)),
......
......@@ -493,11 +493,9 @@ class RetinaNet(nn.Module):
boxes = target["boxes"]
if isinstance(boxes, torch.Tensor):
if len(boxes.shape) != 2 or boxes.shape[-1] != 4:
raise ValueError(
"Expected target boxes to be a tensor" "of shape [N, 4], got {:}.".format(boxes.shape)
)
raise ValueError(f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.")
else:
raise ValueError("Expected target boxes to be of type " "Tensor, got {:}.".format(type(boxes)))
raise ValueError(f"Expected target boxes to be of type Tensor, got {type(boxes)}.")
# get the original image sizes
original_image_sizes: List[Tuple[int, int]] = []
......@@ -521,7 +519,7 @@ class RetinaNet(nn.Module):
degen_bb: List[float] = boxes[bb_idx].tolist()
raise ValueError(
"All bounding boxes should have positive height and width."
" Found invalid box {} for target at index {}.".format(degen_bb, target_idx)
f" Found invalid box {degen_bb} for target at index {target_idx}."
)
# get the features from the backbone
......
......@@ -517,7 +517,7 @@ class RoIHeads(nn.Module):
keypoint_head=None,
keypoint_predictor=None,
):
super(RoIHeads, self).__init__()
super().__init__()
self.box_similarity = box_ops.box_iou
# assign ground-truth boxes for each proposal
......
......@@ -34,7 +34,7 @@ class RPNHead(nn.Module):
"""
def __init__(self, in_channels: int, num_anchors: int) -> None:
super(RPNHead, self).__init__()
super().__init__()
self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
self.cls_logits = nn.Conv2d(in_channels, num_anchors, kernel_size=1, stride=1)
self.bbox_pred = nn.Conv2d(in_channels, num_anchors * 4, kernel_size=1, stride=1)
......@@ -132,7 +132,7 @@ class RegionProposalNetwork(torch.nn.Module):
nms_thresh: float,
score_thresh: float = 0.0,
) -> None:
super(RegionProposalNetwork, self).__init__()
super().__init__()
self.anchor_generator = anchor_generator
self.head = head
self.box_coder = det_utils.BoxCoder(weights=(1.0, 1.0, 1.0, 1.0))
......
......@@ -313,11 +313,9 @@ class SSD(nn.Module):
boxes = target["boxes"]
if isinstance(boxes, torch.Tensor):
if len(boxes.shape) != 2 or boxes.shape[-1] != 4:
raise ValueError(
"Expected target boxes to be a tensor" "of shape [N, 4], got {:}.".format(boxes.shape)
)
raise ValueError(f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.")
else:
raise ValueError("Expected target boxes to be of type " "Tensor, got {:}.".format(type(boxes)))
raise ValueError(f"Expected target boxes to be of type Tensor, got {type(boxes)}.")
# get the original image sizes
original_image_sizes: List[Tuple[int, int]] = []
......@@ -339,7 +337,7 @@ class SSD(nn.Module):
degen_bb: List[float] = boxes[bb_idx].tolist()
raise ValueError(
"All bounding boxes should have positive height and width."
" Found invalid box {} for target at index {}.".format(degen_bb, target_idx)
f" Found invalid box {degen_bb} for target at index {target_idx}."
)
# get the features from the backbone
......@@ -625,7 +623,7 @@ def ssd300_vgg16(
if pretrained:
weights_name = "ssd300_vgg16_coco"
if model_urls.get(weights_name, None) is None:
raise ValueError("No checkpoint is available for model {}".format(weights_name))
raise ValueError(f"No checkpoint is available for model {weights_name}")
state_dict = load_state_dict_from_url(model_urls[weights_name], progress=progress)
model.load_state_dict(state_dict)
return model
......@@ -268,7 +268,7 @@ def ssdlite320_mobilenet_v3_large(
if pretrained:
weights_name = "ssdlite320_mobilenet_v3_large_coco"
if model_urls.get(weights_name, None) is None:
raise ValueError("No checkpoint is available for model {}".format(weights_name))
raise ValueError(f"No checkpoint is available for model {weights_name}")
state_dict = load_state_dict_from_url(model_urls[weights_name], progress=progress)
model.load_state_dict(state_dict)
return model
......@@ -92,7 +92,7 @@ class GeneralizedRCNNTransform(nn.Module):
size_divisible: int = 32,
fixed_size: Optional[Tuple[int, int]] = None,
):
super(GeneralizedRCNNTransform, self).__init__()
super().__init__()
if not isinstance(min_size, (list, tuple)):
min_size = (min_size,)
self.min_size = min_size
......@@ -123,9 +123,7 @@ class GeneralizedRCNNTransform(nn.Module):
target_index = targets[i] if targets is not None else None
if image.dim() != 3:
raise ValueError(
"images is expected to be a list of 3d tensors " "of shape [C, H, W], got {}".format(image.shape)
)
raise ValueError(f"images is expected to be a list of 3d tensors of shape [C, H, W], got {image.shape}")
image = self.normalize(image)
image, target_index = self.resize(image, target_index)
images[i] = image
......@@ -264,10 +262,8 @@ class GeneralizedRCNNTransform(nn.Module):
def __repr__(self) -> str:
format_string = self.__class__.__name__ + "("
_indent = "\n "
format_string += "{0}Normalize(mean={1}, std={2})".format(_indent, self.image_mean, self.image_std)
format_string += "{0}Resize(min_size={1}, max_size={2}, mode='bilinear')".format(
_indent, self.min_size, self.max_size
)
format_string += f"{_indent}Normalize(mean={self.image_mean}, std={self.image_std})"
format_string += f"{_indent}Resize(min_size={self.min_size}, max_size={self.max_size}, mode='bilinear')"
format_string += "\n)"
return format_string
......
......@@ -197,7 +197,7 @@ class EfficientNet(nn.Module):
)
# building inverted residual blocks
total_stage_blocks = sum([cnf.num_layers for cnf in inverted_residual_setting])
total_stage_blocks = sum(cnf.num_layers for cnf in inverted_residual_setting)
stage_block_id = 0
for cnf in inverted_residual_setting:
stage: List[nn.Module] = []
......@@ -287,7 +287,7 @@ def _efficientnet(
model = EfficientNet(inverted_residual_setting, dropout, **kwargs)
if pretrained:
if model_urls.get(arch, None) is None:
raise ValueError("No checkpoint is available for model type {}".format(arch))
raise ValueError(f"No checkpoint is available for model type {arch}")
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
......
......@@ -26,7 +26,7 @@ class LeafModuleAwareTracer(fx.Tracer):
if "leaf_modules" in kwargs:
leaf_modules = kwargs.pop("leaf_modules")
self.leaf_modules = leaf_modules
super(LeafModuleAwareTracer, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
def is_leaf_module(self, m: nn.Module, module_qualname: str) -> bool:
if isinstance(m, tuple(self.leaf_modules)):
......@@ -54,7 +54,7 @@ class NodePathTracer(LeafModuleAwareTracer):
"""
def __init__(self, *args, **kwargs):
super(NodePathTracer, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
# Track the qualified name of the Node being traced
self.current_module_qualname = ""
# A map from FX Node to the qualified name\#
......@@ -168,7 +168,7 @@ def _warn_graph_differences(train_tracer: NodePathTracer, eval_tracer: NodePathT
"are a subsequence of those obtained in eval mode. "
)
else:
msg = "The nodes obtained by tracing the model in train mode " "are different to those obtained in eval mode. "
msg = "The nodes obtained by tracing the model in train mode are different to those obtained in eval mode. "
warnings.warn(msg + suggestion_msg)
......@@ -399,17 +399,17 @@ def create_feature_extractor(
"""
is_training = model.training
assert any(arg is not None for arg in [return_nodes, train_return_nodes, eval_return_nodes]), (
"Either `return_nodes` or `train_return_nodes` and " "`eval_return_nodes` together, should be specified"
)
assert any(
arg is not None for arg in [return_nodes, train_return_nodes, eval_return_nodes]
), "Either `return_nodes` or `train_return_nodes` and `eval_return_nodes` together, should be specified"
assert not ((train_return_nodes is None) ^ (eval_return_nodes is None)), (
"If any of `train_return_nodes` and `eval_return_nodes` are " "specified, then both should be specified"
)
assert not (
(train_return_nodes is None) ^ (eval_return_nodes is None)
), "If any of `train_return_nodes` and `eval_return_nodes` are specified, then both should be specified"
assert (return_nodes is None) ^ (train_return_nodes is None), (
"If `train_return_nodes` and `eval_return_nodes` are specified, " "then both should be specified"
)
assert (return_nodes is None) ^ (
train_return_nodes is None
), "If `train_return_nodes` and `eval_return_nodes` are specified, then both should be specified"
# Put *_return_nodes into Dict[str, str] format
def to_strdict(n) -> Dict[str, str]:
......
......@@ -45,7 +45,7 @@ def googlenet(pretrained: bool = False, progress: bool = True, **kwargs: Any) ->
kwargs["aux_logits"] = False
if kwargs["aux_logits"]:
warnings.warn(
"auxiliary heads in the pretrained googlenet model are NOT pretrained, " "so make sure to train them"
"auxiliary heads in the pretrained googlenet model are NOT pretrained, so make sure to train them"
)
original_aux_logits = kwargs["aux_logits"]
kwargs["aux_logits"] = True
......@@ -75,7 +75,7 @@ class GoogLeNet(nn.Module):
dropout: float = 0.2,
dropout_aux: float = 0.7,
) -> None:
super(GoogLeNet, self).__init__()
super().__init__()
_log_api_usage_once(self)
if blocks is None:
blocks = [BasicConv2d, Inception, InceptionAux]
......@@ -231,7 +231,7 @@ class Inception(nn.Module):
pool_proj: int,
conv_block: Optional[Callable[..., nn.Module]] = None,
) -> None:
super(Inception, self).__init__()
super().__init__()
if conv_block is None:
conv_block = BasicConv2d
self.branch1 = conv_block(in_channels, ch1x1, kernel_size=1)
......@@ -274,7 +274,7 @@ class InceptionAux(nn.Module):
conv_block: Optional[Callable[..., nn.Module]] = None,
dropout: float = 0.7,
) -> None:
super(InceptionAux, self).__init__()
super().__init__()
if conv_block is None:
conv_block = BasicConv2d
self.conv = conv_block(in_channels, 128, kernel_size=1)
......@@ -303,7 +303,7 @@ class InceptionAux(nn.Module):
class BasicConv2d(nn.Module):
def __init__(self, in_channels: int, out_channels: int, **kwargs: Any) -> None:
super(BasicConv2d, self).__init__()
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
......
......@@ -73,7 +73,7 @@ class Inception3(nn.Module):
init_weights: Optional[bool] = None,
dropout: float = 0.5,
) -> None:
super(Inception3, self).__init__()
super().__init__()
_log_api_usage_once(self)
if inception_blocks is None:
inception_blocks = [BasicConv2d, InceptionA, InceptionB, InceptionC, InceptionD, InceptionE, InceptionAux]
......@@ -214,7 +214,7 @@ class InceptionA(nn.Module):
def __init__(
self, in_channels: int, pool_features: int, conv_block: Optional[Callable[..., nn.Module]] = None
) -> None:
super(InceptionA, self).__init__()
super().__init__()
if conv_block is None:
conv_block = BasicConv2d
self.branch1x1 = conv_block(in_channels, 64, kernel_size=1)
......@@ -251,7 +251,7 @@ class InceptionA(nn.Module):
class InceptionB(nn.Module):
def __init__(self, in_channels: int, conv_block: Optional[Callable[..., nn.Module]] = None) -> None:
super(InceptionB, self).__init__()
super().__init__()
if conv_block is None:
conv_block = BasicConv2d
self.branch3x3 = conv_block(in_channels, 384, kernel_size=3, stride=2)
......@@ -281,7 +281,7 @@ class InceptionC(nn.Module):
def __init__(
self, in_channels: int, channels_7x7: int, conv_block: Optional[Callable[..., nn.Module]] = None
) -> None:
super(InceptionC, self).__init__()
super().__init__()
if conv_block is None:
conv_block = BasicConv2d
self.branch1x1 = conv_block(in_channels, 192, kernel_size=1)
......@@ -325,7 +325,7 @@ class InceptionC(nn.Module):
class InceptionD(nn.Module):
def __init__(self, in_channels: int, conv_block: Optional[Callable[..., nn.Module]] = None) -> None:
super(InceptionD, self).__init__()
super().__init__()
if conv_block is None:
conv_block = BasicConv2d
self.branch3x3_1 = conv_block(in_channels, 192, kernel_size=1)
......@@ -356,7 +356,7 @@ class InceptionD(nn.Module):
class InceptionE(nn.Module):
def __init__(self, in_channels: int, conv_block: Optional[Callable[..., nn.Module]] = None) -> None:
super(InceptionE, self).__init__()
super().__init__()
if conv_block is None:
conv_block = BasicConv2d
self.branch1x1 = conv_block(in_channels, 320, kernel_size=1)
......@@ -405,7 +405,7 @@ class InceptionAux(nn.Module):
def __init__(
self, in_channels: int, num_classes: int, conv_block: Optional[Callable[..., nn.Module]] = None
) -> None:
super(InceptionAux, self).__init__()
super().__init__()
if conv_block is None:
conv_block = BasicConv2d
self.conv0 = conv_block(in_channels, 128, kernel_size=1)
......@@ -434,7 +434,7 @@ class InceptionAux(nn.Module):
class BasicConv2d(nn.Module):
def __init__(self, in_channels: int, out_channels: int, **kwargs: Any) -> None:
super(BasicConv2d, self).__init__()
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
......
......@@ -26,7 +26,7 @@ class _InvertedResidual(nn.Module):
def __init__(
self, in_ch: int, out_ch: int, kernel_size: int, stride: int, expansion_factor: int, bn_momentum: float = 0.1
) -> None:
super(_InvertedResidual, self).__init__()
super().__init__()
assert stride in [1, 2]
assert kernel_size in [3, 5]
mid_ch = in_ch * expansion_factor
......@@ -97,7 +97,7 @@ class MNASNet(torch.nn.Module):
_version = 2
def __init__(self, alpha: float, num_classes: int = 1000, dropout: float = 0.2) -> None:
super(MNASNet, self).__init__()
super().__init__()
_log_api_usage_once(self)
assert alpha > 0.0
self.alpha = alpha
......@@ -193,14 +193,14 @@ class MNASNet(torch.nn.Module):
UserWarning,
)
super(MNASNet, self)._load_from_state_dict(
super()._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
)
def _load_pretrained(model_name: str, model: nn.Module, progress: bool) -> None:
if model_name not in _MODEL_URLS or _MODEL_URLS[model_name] is None:
raise ValueError("No checkpoint is available for model type {}".format(model_name))
raise ValueError(f"No checkpoint is available for model type {model_name}")
checkpoint_url = _MODEL_URLS[model_name]
model.load_state_dict(load_state_dict_from_url(checkpoint_url, progress=progress))
......
......@@ -42,7 +42,7 @@ class InvertedResidual(nn.Module):
def __init__(
self, inp: int, oup: int, stride: int, expand_ratio: int, norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(InvertedResidual, self).__init__()
super().__init__()
self.stride = stride
assert stride in [1, 2]
......@@ -110,7 +110,7 @@ class MobileNetV2(nn.Module):
dropout (float): The droupout probability
"""
super(MobileNetV2, self).__init__()
super().__init__()
_log_api_usage_once(self)
if block is None:
......@@ -137,8 +137,7 @@ class MobileNetV2(nn.Module):
# only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError(
"inverted_residual_setting should be non-empty "
"or a 4-element list, got {}".format(inverted_residual_setting)
f"inverted_residual_setting should be non-empty or a 4-element list, got {inverted_residual_setting}"
)
# building first layer
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment