Commit 06bd20da authored by Eric Mintun's avatar Eric Mintun
Browse files

Fix lint.

parent ca981bf3
...@@ -144,8 +144,8 @@ class Block(nn.Module): ...@@ -144,8 +144,8 @@ class Block(nn.Module):
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks. If it equals 0, then window_size (int): Window size for window attention blocks. If it equals 0, then
use global attention. use global attention.
input_size (tuple(int, int) or None): Input resolution for calculating the relative positional input_size (tuple(int, int) or None): Input resolution for calculating the relative
parameter size. positional parameter size.
""" """
super().__init__() super().__init__()
self.norm1 = norm_layer(dim) self.norm1 = norm_layer(dim)
...@@ -201,8 +201,8 @@ class Attention(nn.Module): ...@@ -201,8 +201,8 @@ class Attention(nn.Module):
qkv_bias (bool): If True, add a learnable bias to query, key, value. qkv_bias (bool): If True, add a learnable bias to query, key, value.
rel_pos (bool): If True, add relative positional embeddings to the attention map. rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
input_size (tuple(int, int) or None): Input resolution for calculating the relative positional input_size (tuple(int, int) or None): Input resolution for calculating the relative
parameter size. positional parameter size.
""" """
super().__init__() super().__init__()
self.num_heads = num_heads self.num_heads = num_heads
......
...@@ -82,7 +82,7 @@ class SamOnnxModel(nn.Module): ...@@ -82,7 +82,7 @@ class SamOnnxModel(nn.Module):
) )
prepadded_size = self.resize_longest_image_size(orig_im_size, self.img_size).to(torch.int64) prepadded_size = self.resize_longest_image_size(orig_im_size, self.img_size).to(torch.int64)
masks = masks[..., : prepadded_size[0], : prepadded_size[1]] masks = masks[..., : prepadded_size[0], : prepadded_size[1]] # type: ignore
orig_im_size = orig_im_size.to(torch.int64) orig_im_size = orig_im_size.to(torch.int64)
h, w = orig_im_size[0], orig_im_size[1] h, w = orig_im_size[0], orig_im_size[1]
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment