"git@developer.sourcefind.cn:OpenDAS/ollama.git" did not exist on "64a9cc8f05c44a7267c73eaa8bd61ea077c5280a"
Unverified Commit 9c660c65 authored by vfdev's avatar vfdev Committed by GitHub
Browse files

[proto] Few minor code improvements (#6562)



- call torch.log once can save few ms
- all masks should be selected with is_within_crop_area in RandomIoUCrop
Co-authored-by: default avatarVasilis Vryniotis <datumbox@users.noreply.github.com>
parent a4f53308
......@@ -43,6 +43,8 @@ class RandomErasing(_RandomApplyTransform):
self.value = value
self.inplace = inplace
self._log_ratio = torch.log(torch.tensor(self.ratio))
def _get_params(self, sample: Any) -> Dict[str, Any]:
img_c, img_h, img_w = query_chw(sample)
......@@ -62,7 +64,7 @@ class RandomErasing(_RandomApplyTransform):
area = img_h * img_w
log_ratio = torch.log(torch.tensor(self.ratio))
log_ratio = self._log_ratio
for _ in range(10):
erase_area = area * torch.empty(1).uniform_(self.scale[0], self.scale[1]).item()
aspect_ratio = torch.exp(
......
......@@ -94,6 +94,8 @@ class RandomResizedCrop(Transform):
self.interpolation = interpolation
self.antialias = antialias
self._log_ratio = torch.log(torch.tensor(self.ratio))
def _get_params(self, sample: Any) -> Dict[str, Any]:
# vfdev-5: techically, this op can work on bboxes/segm masks only inputs without image in samples
# What if we have multiple images/bboxes/masks of different sizes ?
......@@ -101,7 +103,7 @@ class RandomResizedCrop(Transform):
_, height, width = query_chw(sample)
area = height * width
log_ratio = torch.log(torch.tensor(self.ratio))
log_ratio = self._log_ratio
for _ in range(10):
target_area = area * torch.empty(1).uniform_(self.scale[0], self.scale[1]).item()
aspect_ratio = torch.exp(
......@@ -706,7 +708,7 @@ class RandomIoUCrop(Transform):
bboxes = output[is_within_crop_area]
bboxes = F.clamp_bounding_box(bboxes, output.format, output.image_size)
output = features.BoundingBox.new_like(output, bboxes)
elif isinstance(output, features.Mask) and output.shape[-3] > 1:
elif isinstance(output, features.Mask):
# apply is_within_crop_area if mask is one-hot encoded
masks = output[is_within_crop_area]
output = features.Mask.new_like(output, masks)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment