Unverified Commit f7fae490 authored by Vasilis Vryniotis's avatar Vasilis Vryniotis Committed by GitHub
Browse files

Static analysis improvements python (#3263)

* Simplify code and remove used vars.

* Simplify expressions and remove used parenthesis.

* Jit fixes.

* Making check more readable.

* fixing styles
parent bf211dac
...@@ -471,8 +471,8 @@ def read_sn3_pascalvincent_tensor(path: Union[str, IO], strict: bool = True) -> ...@@ -471,8 +471,8 @@ def read_sn3_pascalvincent_tensor(path: Union[str, IO], strict: bool = True) ->
magic = get_int(data[0:4]) magic = get_int(data[0:4])
nd = magic % 256 nd = magic % 256
ty = magic // 256 ty = magic // 256
assert nd >= 1 and nd <= 3 assert 1 <= nd <= 3
assert ty >= 8 and ty <= 14 assert 8 <= ty <= 14
m = SN3_PASCALVINCENT_TYPEMAP[ty] m = SN3_PASCALVINCENT_TYPEMAP[ty]
s = [get_int(data[4 * (i + 1): 4 * (i + 2)]) for i in range(nd)] s = [get_int(data[4 * (i + 1): 4 * (i + 2)]) for i in range(nd)]
parsed = np.frombuffer(data, dtype=m[1], offset=(4 * (nd + 1))) parsed = np.frombuffer(data, dtype=m[1], offset=(4 * (nd + 1)))
......
...@@ -192,7 +192,6 @@ def read_info_file(data_dir: str, info_file: str) -> torch.Tensor: ...@@ -192,7 +192,6 @@ def read_info_file(data_dir: str, info_file: str) -> torch.Tensor:
"""Return a Tensor containing the list of labels """Return a Tensor containing the list of labels
Read the file and keep only the ID of the 3D point. Read the file and keep only the ID of the 3D point.
""" """
labels = []
with open(os.path.join(data_dir, info_file), 'r') as f: with open(os.path.join(data_dir, info_file), 'r') as f:
labels = [int(line.split()[0]) for line in f] labels = [int(line.split()[0]) for line in f]
return torch.LongTensor(labels) return torch.LongTensor(labels)
......
...@@ -303,7 +303,7 @@ def _read_video_timestamps_from_file(filename): ...@@ -303,7 +303,7 @@ def _read_video_timestamps_from_file(filename):
1, # audio_timebase_den 1, # audio_timebase_den
) )
_vframes, vframe_pts, vtimebase, vfps, vduration, \ _vframes, vframe_pts, vtimebase, vfps, vduration, \
_aframes, aframe_pts, atimebase, asample_rate, aduration = (result) _aframes, aframe_pts, atimebase, asample_rate, aduration = result
info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration) info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration)
vframe_pts = vframe_pts.numpy().tolist() vframe_pts = vframe_pts.numpy().tolist()
......
...@@ -87,7 +87,7 @@ def resnet_fpn_backbone( ...@@ -87,7 +87,7 @@ def resnet_fpn_backbone(
norm_layer=norm_layer) norm_layer=norm_layer)
# select layers that wont be frozen # select layers that wont be frozen
assert trainable_layers <= 5 and trainable_layers >= 0 assert 0 <= trainable_layers <= 5
layers_to_train = ['layer4', 'layer3', 'layer2', 'layer1', 'conv1'][:trainable_layers] layers_to_train = ['layer4', 'layer3', 'layer2', 'layer1', 'conv1'][:trainable_layers]
# freeze layers only if pretrained backbone is used # freeze layers only if pretrained backbone is used
for name, parameter in backbone.named_parameters(): for name, parameter in backbone.named_parameters():
......
...@@ -106,6 +106,6 @@ class GeneralizedRCNN(nn.Module): ...@@ -106,6 +106,6 @@ class GeneralizedRCNN(nn.Module):
if not self._has_warned: if not self._has_warned:
warnings.warn("RCNN always returns a (Losses, Detections) tuple in scripting") warnings.warn("RCNN always returns a (Losses, Detections) tuple in scripting")
self._has_warned = True self._has_warned = True
return (losses, detections) return losses, detections
else: else:
return self.eager_outputs(losses, detections) return self.eager_outputs(losses, detections)
...@@ -190,7 +190,7 @@ def _onnx_heatmaps_to_keypoints(maps, maps_i, roi_map_width, roi_map_height, ...@@ -190,7 +190,7 @@ def _onnx_heatmaps_to_keypoints(maps, maps_i, roi_map_width, roi_map_height,
xy_preds_i_0 = x + offset_x_i.to(dtype=torch.float32) xy_preds_i_0 = x + offset_x_i.to(dtype=torch.float32)
xy_preds_i_1 = y + offset_y_i.to(dtype=torch.float32) xy_preds_i_1 = y + offset_y_i.to(dtype=torch.float32)
xy_preds_i_2 = torch.ones((xy_preds_i_1.shape), dtype=torch.float32) xy_preds_i_2 = torch.ones(xy_preds_i_1.shape, dtype=torch.float32)
xy_preds_i = torch.stack([xy_preds_i_0.to(dtype=torch.float32), xy_preds_i = torch.stack([xy_preds_i_0.to(dtype=torch.float32),
xy_preds_i_1.to(dtype=torch.float32), xy_preds_i_1.to(dtype=torch.float32),
xy_preds_i_2.to(dtype=torch.float32)], 0) xy_preds_i_2.to(dtype=torch.float32)], 0)
...@@ -795,7 +795,6 @@ class RoIHeads(nn.Module): ...@@ -795,7 +795,6 @@ class RoIHeads(nn.Module):
mask_features = self.mask_head(mask_features) mask_features = self.mask_head(mask_features)
mask_logits = self.mask_predictor(mask_features) mask_logits = self.mask_predictor(mask_features)
else: else:
mask_logits = torch.tensor(0)
raise Exception("Expected mask_roi_pool to be not None") raise Exception("Expected mask_roi_pool to be not None")
loss_mask = {} loss_mask = {}
......
...@@ -30,7 +30,7 @@ class Conv3DSimple(nn.Conv3d): ...@@ -30,7 +30,7 @@ class Conv3DSimple(nn.Conv3d):
@staticmethod @staticmethod
def get_downsample_stride(stride): def get_downsample_stride(stride):
return (stride, stride, stride) return stride, stride, stride
class Conv2Plus1D(nn.Sequential): class Conv2Plus1D(nn.Sequential):
...@@ -53,7 +53,7 @@ class Conv2Plus1D(nn.Sequential): ...@@ -53,7 +53,7 @@ class Conv2Plus1D(nn.Sequential):
@staticmethod @staticmethod
def get_downsample_stride(stride): def get_downsample_stride(stride):
return (stride, stride, stride) return stride, stride, stride
class Conv3DNoTemporal(nn.Conv3d): class Conv3DNoTemporal(nn.Conv3d):
...@@ -75,7 +75,7 @@ class Conv3DNoTemporal(nn.Conv3d): ...@@ -75,7 +75,7 @@ class Conv3DNoTemporal(nn.Conv3d):
@staticmethod @staticmethod
def get_downsample_stride(stride): def get_downsample_stride(stride):
return (1, stride, stride) return 1, stride, stride
class BasicBlock(nn.Module): class BasicBlock(nn.Module):
......
...@@ -99,9 +99,7 @@ class FeaturePyramidNetwork(nn.Module): ...@@ -99,9 +99,7 @@ class FeaturePyramidNetwork(nn.Module):
This is equivalent to self.inner_blocks[idx](x), This is equivalent to self.inner_blocks[idx](x),
but torchscript doesn't support this yet but torchscript doesn't support this yet
""" """
num_blocks = 0 num_blocks = len(self.inner_blocks)
for m in self.inner_blocks:
num_blocks += 1
if idx < 0: if idx < 0:
idx += num_blocks idx += num_blocks
i = 0 i = 0
...@@ -117,9 +115,7 @@ class FeaturePyramidNetwork(nn.Module): ...@@ -117,9 +115,7 @@ class FeaturePyramidNetwork(nn.Module):
This is equivalent to self.layer_blocks[idx](x), This is equivalent to self.layer_blocks[idx](x),
but torchscript doesn't support this yet but torchscript doesn't support this yet
""" """
num_blocks = 0 num_blocks = len(self.layer_blocks)
for m in self.layer_blocks:
num_blocks += 1
if idx < 0: if idx < 0:
idx += num_blocks idx += num_blocks
i = 0 i = 0
......
...@@ -45,7 +45,7 @@ def _max_value(dtype: torch.dtype) -> float: ...@@ -45,7 +45,7 @@ def _max_value(dtype: torch.dtype) -> float:
max_value = next_value max_value = next_value
bits *= 2 bits *= 2
else: else:
return max_value.item() break
return max_value.item() return max_value.item()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment