"git@developer.sourcefind.cn:chenpangpang/transformers.git" did not exist on "6e85bccafc8a175c0cc2eed27fd61af087483085"
Unverified Commit a35ea570 authored by Sounak Dey's avatar Sounak Dey Committed by GitHub
Browse files

Update image_processing_deformable_detr.py (#28561)

* Update image_processing_deformable_detr.py

* Changes after running make fix-copies
parent e201864b
...@@ -1414,13 +1414,14 @@ class ConditionalDetrImageProcessor(BaseImageProcessor): ...@@ -1414,13 +1414,14 @@ class ConditionalDetrImageProcessor(BaseImageProcessor):
boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4)) boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
# and from relative [0, 1] to absolute [0, height] coordinates # and from relative [0, 1] to absolute [0, height] coordinates
if isinstance(target_sizes, List): if target_sizes is not None:
img_h = torch.Tensor([i[0] for i in target_sizes]) if isinstance(target_sizes, List):
img_w = torch.Tensor([i[1] for i in target_sizes]) img_h = torch.Tensor([i[0] for i in target_sizes])
else: img_w = torch.Tensor([i[1] for i in target_sizes])
img_h, img_w = target_sizes.unbind(1) else:
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) img_h, img_w = target_sizes.unbind(1)
boxes = boxes * scale_fct[:, None, :] scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
boxes = boxes * scale_fct[:, None, :]
results = [] results = []
for s, l, b in zip(scores, labels, boxes): for s, l, b in zip(scores, labels, boxes):
......
...@@ -1411,13 +1411,14 @@ class DeformableDetrImageProcessor(BaseImageProcessor): ...@@ -1411,13 +1411,14 @@ class DeformableDetrImageProcessor(BaseImageProcessor):
boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4)) boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
# and from relative [0, 1] to absolute [0, height] coordinates # and from relative [0, 1] to absolute [0, height] coordinates
if isinstance(target_sizes, List): if target_sizes is not None:
img_h = torch.Tensor([i[0] for i in target_sizes]) if isinstance(target_sizes, List):
img_w = torch.Tensor([i[1] for i in target_sizes]) img_h = torch.Tensor([i[0] for i in target_sizes])
else: img_w = torch.Tensor([i[1] for i in target_sizes])
img_h, img_w = target_sizes.unbind(1) else:
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) img_h, img_w = target_sizes.unbind(1)
boxes = boxes * scale_fct[:, None, :] scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
boxes = boxes * scale_fct[:, None, :]
results = [] results = []
for s, l, b in zip(scores, labels, boxes): for s, l, b in zip(scores, labels, boxes):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment