Commit bbaa6f3b authored by Marco Martinelli's avatar Marco Martinelli Committed by Francisco Massa
Browse files

Changes in docstring example for RCNN based models. (#1763)

* Type of input featmap_names fixed in example.

* Added missing imports.
parent c5e972a6
......@@ -123,10 +123,10 @@ class FasterRCNN(GeneralizedRCNN):
>>> # use to perform the region of interest cropping, as well as
>>> # the size of the crop after rescaling.
>>> # if your backbone returns a Tensor, featmap_names is expected to
>>> # be [0]. More generally, the backbone should return an
>>> # be ['0']. More generally, the backbone should return an
>>> # OrderedDict[Tensor], and in featmap_names you can choose which
>>> # feature maps to use.
>>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0],
>>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],
>>> output_size=7,
>>> sampling_ratio=2)
>>>
......
......@@ -102,6 +102,7 @@ class KeypointRCNN(FasterRCNN):
Example::
>>> import torch
>>> import torchvision
>>> from torchvision.models.detection import KeypointRCNN
>>> from torchvision.models.detection.rpn import AnchorGenerator
......@@ -126,17 +127,17 @@ class KeypointRCNN(FasterRCNN):
>>> # use to perform the region of interest cropping, as well as
>>> # the size of the crop after rescaling.
>>> # if your backbone returns a Tensor, featmap_names is expected to
>>> # be [0]. More generally, the backbone should return an
>>> # be ['0']. More generally, the backbone should return an
>>> # OrderedDict[Tensor], and in featmap_names you can choose which
>>> # feature maps to use.
>>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0],
>>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],
>>> output_size=7,
>>> sampling_ratio=2)
>>>
>>> keypoint_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0],
>>> keypoint_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],
>>> output_size=14,
>>> sampling_ratio=2)
>>> # put the pieces together inside a FasterRCNN model
>>> # put the pieces together inside a KeypointRCNN model
>>> model = KeypointRCNN(backbone,
>>> num_classes=2,
>>> rpn_anchor_generator=anchor_generator,
......
......@@ -104,6 +104,7 @@ class MaskRCNN(FasterRCNN):
Example::
>>> import torch
>>> import torchvision
>>> from torchvision.models.detection import MaskRCNN
>>> from torchvision.models.detection.rpn import AnchorGenerator
......@@ -128,17 +129,17 @@ class MaskRCNN(FasterRCNN):
>>> # use to perform the region of interest cropping, as well as
>>> # the size of the crop after rescaling.
>>> # if your backbone returns a Tensor, featmap_names is expected to
>>> # be [0]. More generally, the backbone should return an
>>> # be ['0']. More generally, the backbone should return an
>>> # OrderedDict[Tensor], and in featmap_names you can choose which
>>> # feature maps to use.
>>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0],
>>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],
>>> output_size=7,
>>> sampling_ratio=2)
>>>
>>> mask_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0],
>>> mask_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],
>>> output_size=14,
>>> sampling_ratio=2)
>>> # put the pieces together inside a FasterRCNN model
>>> # put the pieces together inside a MaskRCNN model
>>> model = MaskRCNN(backbone,
>>> num_classes=2,
>>> rpn_anchor_generator=anchor_generator,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment