"examples/git@developer.sourcefind.cn:OpenDAS/nni.git" did not exist on "a6621cef61d7befcab499950c4ed1416cdaa3cf6"
Unverified Commit 24124709 authored by Yih-Dar's avatar Yih-Dar Committed by GitHub
Browse files

Fix torch device issues (#20584)


Co-authored-by: default avatarydshieh <ydshieh@users.noreply.github.com>
parent 699e9043
...@@ -1290,7 +1290,7 @@ class ConditionalDetrImageProcessor(BaseImageProcessor): ...@@ -1290,7 +1290,7 @@ class ConditionalDetrImageProcessor(BaseImageProcessor):
# and from relative [0, 1] to absolute [0, height] coordinates # and from relative [0, 1] to absolute [0, height] coordinates
img_h, img_w = target_sizes.unbind(1) img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
boxes = boxes * scale_fct[:, None, :] boxes = boxes * scale_fct[:, None, :]
results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)] results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)]
...@@ -1340,7 +1340,7 @@ class ConditionalDetrImageProcessor(BaseImageProcessor): ...@@ -1340,7 +1340,7 @@ class ConditionalDetrImageProcessor(BaseImageProcessor):
img_w = torch.Tensor([i[1] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes])
else: else:
img_h, img_w = target_sizes.unbind(1) img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
boxes = boxes * scale_fct[:, None, :] boxes = boxes * scale_fct[:, None, :]
results = [] results = []
......
...@@ -1288,7 +1288,7 @@ class DeformableDetrImageProcessor(BaseImageProcessor): ...@@ -1288,7 +1288,7 @@ class DeformableDetrImageProcessor(BaseImageProcessor):
# and from relative [0, 1] to absolute [0, height] coordinates # and from relative [0, 1] to absolute [0, height] coordinates
img_h, img_w = target_sizes.unbind(1) img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
boxes = boxes * scale_fct[:, None, :] boxes = boxes * scale_fct[:, None, :]
results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)] results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)]
...@@ -1337,7 +1337,7 @@ class DeformableDetrImageProcessor(BaseImageProcessor): ...@@ -1337,7 +1337,7 @@ class DeformableDetrImageProcessor(BaseImageProcessor):
img_w = torch.Tensor([i[1] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes])
else: else:
img_h, img_w = target_sizes.unbind(1) img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
boxes = boxes * scale_fct[:, None, :] boxes = boxes * scale_fct[:, None, :]
results = [] results = []
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment