Unverified Commit f61bb642 authored by RunningLeon's avatar RunningLeon Committed by GitHub
Browse files

[Enhance]: Better support for pytorch2onnx for detection models (#648)

* [feature]: Support convertion from Pytorch to ONNX for detection models

* [fix] RuntimeError with integer divide when converting to onnx

* Resolve pylint error

* add comment
parent 23ecd83d
...@@ -364,6 +364,22 @@ adaptive_avg_pool3d = _adaptive_pool('adaptive_avg_pool3d', 'AveragePool', ...@@ -364,6 +364,22 @@ adaptive_avg_pool3d = _adaptive_pool('adaptive_avg_pool3d', 'AveragePool',
_triple) _triple)
def new_full(g,
self,
size,
fill_value,
dtype,
layout,
device,
pin_memory=False):
from torch.onnx.symbolic_opset9 import full
if dtype is None and self.isCompleteTensor():
dtype = self.type().scalarType()
dtype = sym_help.scalar_type_to_onnx.index(
sym_help.cast_pytorch_to_onnx[dtype])
return full(g, size, fill_value, dtype, layout, device, pin_memory)
def register_extra_symbolics(opset=11): def register_extra_symbolics(opset=11):
register_op('one_hot', one_hot, '', opset) register_op('one_hot', one_hot, '', opset)
register_op('im2col', im2col, '', opset) register_op('im2col', im2col, '', opset)
...@@ -387,3 +403,4 @@ def register_extra_symbolics(opset=11): ...@@ -387,3 +403,4 @@ def register_extra_symbolics(opset=11):
register_op('upsample_bilinear2d', upsample_bilinear2d, '', opset) register_op('upsample_bilinear2d', upsample_bilinear2d, '', opset)
register_op('upsample_trilinear3d', upsample_trilinear3d, '', opset) register_op('upsample_trilinear3d', upsample_trilinear3d, '', opset)
register_op('upsample_bicubic2d', upsample_bicubic2d, '', opset) register_op('upsample_bicubic2d', upsample_bicubic2d, '', opset)
register_op('new_full', new_full, '', opset)
...@@ -263,7 +263,9 @@ class CARAFEPack(nn.Module): ...@@ -263,7 +263,9 @@ class CARAFEPack(nn.Module):
def kernel_normalizer(self, mask): def kernel_normalizer(self, mask):
mask = F.pixel_shuffle(mask, self.scale_factor) mask = F.pixel_shuffle(mask, self.scale_factor)
n, mask_c, h, w = mask.size() n, mask_c, h, w = mask.size()
mask_channel = int(mask_c / (self.up_kernel * self.up_kernel)) # use float division explicitly,
# to void inconsistency while exporting to onnx
mask_channel = int(mask_c / float(self.up_kernel**2))
mask = mask.view(n, mask_channel, -1, h, w) mask = mask.view(n, mask_channel, -1, h, w)
mask = F.softmax(mask, dim=2) mask = F.softmax(mask, dim=2)
......
...@@ -21,7 +21,6 @@ class NMSop(torch.autograd.Function): ...@@ -21,7 +21,6 @@ class NMSop(torch.autograd.Function):
@staticmethod @staticmethod
def symbolic(g, bboxes, scores, iou_threshold, offset): def symbolic(g, bboxes, scores, iou_threshold, offset):
from torch.onnx.symbolic_opset9 import select, squeeze, unsqueeze from torch.onnx.symbolic_opset9 import select, squeeze, unsqueeze
boxes = unsqueeze(g, bboxes, 0) boxes = unsqueeze(g, bboxes, 0)
scores = unsqueeze(g, unsqueeze(g, scores, 0), 0) scores = unsqueeze(g, unsqueeze(g, scores, 0), 0)
max_output_per_class = g.op( max_output_per_class = g.op(
...@@ -248,14 +247,14 @@ def batched_nms(boxes, scores, idxs, nms_cfg, class_agnostic=False): ...@@ -248,14 +247,14 @@ def batched_nms(boxes, scores, idxs, nms_cfg, class_agnostic=False):
boxes_for_nms = boxes boxes_for_nms = boxes
else: else:
max_coordinate = boxes.max() max_coordinate = boxes.max()
offsets = idxs.to(boxes) * (max_coordinate + 1) offsets = idxs.to(boxes) * (max_coordinate + torch.tensor(1).to(boxes))
boxes_for_nms = boxes + offsets[:, None] boxes_for_nms = boxes + offsets[:, None]
nms_type = nms_cfg_.pop('type', 'nms') nms_type = nms_cfg_.pop('type', 'nms')
nms_op = eval(nms_type) nms_op = eval(nms_type)
split_thr = nms_cfg_.pop('split_thr', 10000) split_thr = nms_cfg_.pop('split_thr', 10000)
if len(boxes_for_nms) < split_thr: if boxes_for_nms.shape[0] < split_thr:
dets, keep = nms_op(boxes_for_nms, scores, **nms_cfg_) dets, keep = nms_op(boxes_for_nms, scores, **nms_cfg_)
boxes = boxes[keep] boxes = boxes[keep]
scores = dets[:, -1] scores = dets[:, -1]
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment