Unverified Commit 3d60f498 authored by Samuel Marks's avatar Samuel Marks Committed by GitHub
Browse files

[*.py] Rename "Arguments:" to "Args:" (#3203)


Co-authored-by: default avatarVasilis Vryniotis <datumbox@users.noreply.github.com>
parent ca6fdd6d
......@@ -26,7 +26,7 @@ class GroupedBatchSampler(BatchSampler):
It enforces that the batch only contain elements from the same group.
It also tries to provide mini-batches which follows an ordering which is
as close as possible to the ordering from the original sampler.
Arguments:
Args:
sampler (Sampler): Base sampler.
group_ids (list[int]): If the sampler produces indices in range [0, N),
`group_ids` must be a list of `N` ints which contains the group id of each sample.
......
......@@ -111,7 +111,7 @@ class UniformClipSampler(Sampler):
When number of unique clips in the video is fewer than num_video_clips_per_video,
repeat the clips until `num_video_clips_per_video` clips are collected
Arguments:
Args:
video_clips (VideoClips): video clips to sample from
num_clips_per_video (int): number of clips to be sampled per video
"""
......@@ -151,7 +151,7 @@ class RandomClipSampler(Sampler):
"""
Samples at most `max_video_clips_per_video` clips for each video randomly
Arguments:
Args:
video_clips (VideoClips): video clips to sample from
max_clips_per_video (int): maximum number of clips to be sampled per video
"""
......
......@@ -88,7 +88,7 @@ class VideoClips(object):
Recreating the clips for different clip lengths is fast, and can be done
with the `compute_clips` method.
Arguments:
Args:
video_paths (List[str]): paths to the video files
clip_length_in_frames (int): size of a clip in number of frames
frames_between_clips (int): step (in frames) between each clip
......@@ -227,7 +227,7 @@ class VideoClips(object):
Always returns clips of size `num_frames`, meaning that the
last few frames in a video can potentially be dropped.
Arguments:
Args:
num_frames (int): number of frames for the clip
step (int): distance between two clips
"""
......@@ -285,7 +285,7 @@ class VideoClips(object):
"""
Gets a subclip from a list of videos.
Arguments:
Args:
idx (int): index of the subclip. Must be between 0 and num_clips().
Returns:
......
......@@ -71,7 +71,7 @@ def read_file(path: str) -> torch.Tensor:
Reads and outputs the bytes contents of a file as a uint8 Tensor
with one dimension.
Arguments:
Args:
path (str): the path to the file to be read
Returns:
......@@ -86,7 +86,7 @@ def write_file(filename: str, data: torch.Tensor) -> None:
Writes the contents of a uint8 tensor with one dimension to a
file.
Arguments:
Args:
filename (str): the path to the file to be written
data (Tensor): the contents to be written to the output file
"""
......@@ -99,7 +99,7 @@ def decode_png(input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHANGE
Optionally converts the image to the desired format.
The values of the output tensor are uint8 between 0 and 255.
Arguments:
Args:
input (Tensor[1]): a one dimensional uint8 tensor containing
the raw bytes of the PNG image.
mode (ImageReadMode): the read mode used for optionally
......@@ -162,7 +162,7 @@ def decode_jpeg(input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHANG
Optionally converts the image to the desired format.
The values of the output tensor are uint8 between 0 and 255.
Arguments:
Args:
input (Tensor[1]): a one dimensional uint8 tensor containing
the raw bytes of the JPEG image.
mode (ImageReadMode): the read mode used for optionally
......
......@@ -18,7 +18,7 @@ class IntermediateLayerGetter(nn.ModuleDict):
assigned to the model. So if `model` is passed, `model.feature1` can
be returned, but not `model.feature1.layer2`.
Arguments:
Args:
model (nn.Module): model on which we will extract the features
return_layers (Dict[name, new_name]): a dict containing the names
of the modules for which the activations will be returned as
......
......@@ -15,7 +15,7 @@ class BalancedPositiveNegativeSampler(object):
def __init__(self, batch_size_per_image, positive_fraction):
# type: (int, float) -> None
"""
Arguments:
Args:
batch_size_per_image (int): number of elements to be selected per image
positive_fraction (float): percentace of positive elements per batch
"""
......@@ -25,7 +25,7 @@ class BalancedPositiveNegativeSampler(object):
def __call__(self, matched_idxs):
# type: (List[Tensor]) -> Tuple[List[Tensor], List[Tensor]]
"""
Arguments:
Args:
matched idxs: list of tensors containing -1, 0 or positive values.
Each tensor corresponds to a specific image.
-1 values are ignored, 0 are considered as negatives and > 0 as
......@@ -83,7 +83,7 @@ def encode_boxes(reference_boxes, proposals, weights):
Encode a set of proposals with respect to some
reference boxes
Arguments:
Args:
reference_boxes (Tensor): reference boxes
proposals (Tensor): boxes to be encoded
"""
......@@ -133,7 +133,7 @@ class BoxCoder(object):
def __init__(self, weights, bbox_xform_clip=math.log(1000. / 16)):
# type: (Tuple[float, float, float, float], float) -> None
"""
Arguments:
Args:
weights (4-element tuple)
bbox_xform_clip (float)
"""
......@@ -153,7 +153,7 @@ class BoxCoder(object):
Encode a set of proposals with respect to some
reference boxes
Arguments:
Args:
reference_boxes (Tensor): reference boxes
proposals (Tensor): boxes to be encoded
"""
......@@ -183,7 +183,7 @@ class BoxCoder(object):
From a set of original boxes and encoded relative box offsets,
get the decoded boxes.
Arguments:
Args:
rel_codes (Tensor): encoded boxes
boxes (Tensor): reference boxes.
"""
......@@ -361,7 +361,7 @@ def overwrite_eps(model, eps):
only when the pretrained weights are loaded to maintain compatibility
with previous versions.
Arguments:
Args:
model (nn.Module): The model on which we perform the overwrite.
eps (float): The new value of eps.
"""
......
......@@ -22,7 +22,7 @@ class AnchorGenerator(nn.Module):
and AnchorGenerator will output a set of sizes[i] * aspect_ratios[i] anchors
per spatial location for feature map i.
Arguments:
Args:
sizes (Tuple[Tuple[int]]):
aspect_ratios (Tuple[Tuple[float]]):
"""
......
......@@ -14,7 +14,7 @@ class BackboneWithFPN(nn.Module):
Internally, it uses torchvision.models._utils.IntermediateLayerGetter to
extract a submodel that returns the feature maps specified in return_layers.
The same limitations of IntermediatLayerGetter apply here.
Arguments:
Args:
backbone (nn.Module)
return_layers (Dict[name, new_name]): a dict containing the names
of the modules for which the activations will be returned as
......@@ -73,7 +73,7 @@ def resnet_fpn_backbone(
>>> ('3', torch.Size([1, 256, 2, 2])),
>>> ('pool', torch.Size([1, 256, 1, 1]))]
Arguments:
Args:
backbone_name (string): resnet architecture. Possible values are 'ResNet', 'resnet18', 'resnet34', 'resnet50',
'resnet101', 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', 'wide_resnet50_2', 'wide_resnet101_2'
norm_layer (torchvision.ops): it is recommended to use the default value. For details visit:
......
......@@ -49,7 +49,7 @@ class FasterRCNN(GeneralizedRCNN):
- labels (Int64Tensor[N]): the predicted labels for each image
- scores (Tensor[N]): the scores or each prediction
Arguments:
Args:
backbone (nn.Module): the network used to compute the features for the model.
It should contain a out_channels attribute, which indicates the number of output
channels that each feature map has (and it should be the same for all feature maps).
......@@ -239,7 +239,7 @@ class TwoMLPHead(nn.Module):
"""
Standard heads for FPN-based models
Arguments:
Args:
in_channels (int): number of input channels
representation_size (int): size of the intermediate representation
"""
......@@ -264,7 +264,7 @@ class FastRCNNPredictor(nn.Module):
Standard classification + bounding box regression layers
for Fast R-CNN.
Arguments:
Args:
in_channels (int): number of input channels
num_classes (int): number of output classes (including background)
"""
......@@ -341,7 +341,7 @@ def fasterrcnn_resnet50_fpn(pretrained=False, progress=True,
>>> # optionally, if you want to export the model to ONNX:
>>> torch.onnx.export(model, x, "faster_rcnn.onnx", opset_version = 11)
Arguments:
Args:
pretrained (bool): If True, returns a model pre-trained on COCO train2017
progress (bool): If True, displays a progress bar of the download to stderr
pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet
......
......@@ -14,7 +14,7 @@ class GeneralizedRCNN(nn.Module):
"""
Main class for Generalized R-CNN.
Arguments:
Args:
backbone (nn.Module):
rpn (nn.Module):
roi_heads (nn.Module): takes the features + the proposals from the RPN and computes
......@@ -43,7 +43,7 @@ class GeneralizedRCNN(nn.Module):
def forward(self, images, targets=None):
# type: (List[Tensor], Optional[List[Dict[str, Tensor]]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]
"""
Arguments:
Args:
images (list[Tensor]): images to be processed
targets (list[Dict[Tensor]]): ground-truth boxes present in the image (optional)
......
......@@ -14,7 +14,7 @@ class ImageList(object):
def __init__(self, tensors: Tensor, image_sizes: List[Tuple[int, int]]):
"""
Arguments:
Args:
tensors (tensor)
image_sizes (list[tuple[int, int]])
"""
......
......@@ -44,7 +44,7 @@ class KeypointRCNN(FasterRCNN):
- scores (Tensor[N]): the scores or each prediction
- keypoints (FloatTensor[N, K, 3]): the locations of the predicted keypoints, in [x, y, v] format.
Arguments:
Args:
backbone (nn.Module): the network used to compute the features for the model.
It should contain a out_channels attribute, which indicates the number of output
channels that each feature map has (and it should be the same for all feature maps).
......@@ -309,7 +309,7 @@ def keypointrcnn_resnet50_fpn(pretrained=False, progress=True,
>>> # optionally, if you want to export the model to ONNX:
>>> torch.onnx.export(model, x, "keypoint_rcnn.onnx", opset_version = 11)
Arguments:
Args:
pretrained (bool): If True, returns a model pre-trained on COCO train2017
progress (bool): If True, displays a progress bar of the download to stderr
pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet
......
......@@ -48,7 +48,7 @@ class MaskRCNN(FasterRCNN):
obtain the final segmentation masks, the soft masks can be thresholded, generally
with a value of 0.5 (mask >= 0.5)
Arguments:
Args:
backbone (nn.Module): the network used to compute the features for the model.
It should contain a out_channels attribute, which indicates the number of output
channels that each feature map has (and it should be the same for all feature maps).
......@@ -222,7 +222,7 @@ class MaskRCNN(FasterRCNN):
class MaskRCNNHeads(nn.Sequential):
def __init__(self, in_channels, layers, dilation):
"""
Arguments:
Args:
in_channels (int): number of input channels
layers (list): feature dimensions of each FCN layer
dilation (int): dilation rate of kernel
......@@ -308,7 +308,7 @@ def maskrcnn_resnet50_fpn(pretrained=False, progress=True,
>>> # optionally, if you want to export the model to ONNX:
>>> torch.onnx.export(model, x, "mask_rcnn.onnx", opset_version = 11)
Arguments:
Args:
pretrained (bool): If True, returns a model pre-trained on COCO train2017
progress (bool): If True, displays a progress bar of the download to stderr
pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet
......
......@@ -34,7 +34,7 @@ class RetinaNetHead(nn.Module):
"""
A regression and classification head for use in RetinaNet.
Arguments:
Args:
in_channels (int): number of channels of the input feature
num_anchors (int): number of anchors to be predicted
num_classes (int): number of classes to be predicted
......@@ -64,7 +64,7 @@ class RetinaNetClassificationHead(nn.Module):
"""
A classification head for use in RetinaNet.
Arguments:
Args:
in_channels (int): number of channels of the input feature
num_anchors (int): number of anchors to be predicted
num_classes (int): number of classes to be predicted
......@@ -149,7 +149,7 @@ class RetinaNetRegressionHead(nn.Module):
"""
A regression head for use in RetinaNet.
Arguments:
Args:
in_channels (int): number of channels of the input feature
num_anchors (int): number of anchors to be predicted
"""
......@@ -251,7 +251,7 @@ class RetinaNet(nn.Module):
- labels (Int64Tensor[N]): the predicted labels for each image
- scores (Tensor[N]): the scores for each prediction
Arguments:
Args:
backbone (nn.Module): the network used to compute the features for the model.
It should contain an out_channels attribute, which indicates the number of output
channels that each feature map has (and it should be the same for all feature maps).
......@@ -457,7 +457,7 @@ class RetinaNet(nn.Module):
def forward(self, images, targets=None):
# type: (List[Tensor], Optional[List[Dict[str, Tensor]]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]
"""
Arguments:
Args:
images (list[Tensor]): images to be processed
targets (list[Dict[Tensor]]): ground-truth boxes present in the image (optional)
......@@ -597,7 +597,7 @@ def retinanet_resnet50_fpn(pretrained=False, progress=True,
>>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
>>> predictions = model(x)
Arguments:
Args:
pretrained (bool): If True, returns a model pre-trained on COCO train2017
progress (bool): If True, displays a progress bar of the download to stderr
"""
......
......@@ -18,7 +18,7 @@ def fastrcnn_loss(class_logits, box_regression, labels, regression_targets):
"""
Computes the loss for Faster R-CNN.
Arguments:
Args:
class_logits (Tensor)
box_regression (Tensor)
labels (list[BoxList])
......@@ -61,7 +61,7 @@ def maskrcnn_inference(x, labels):
probability (which are of fixed size and directly output
by the CNN) and return the masks in the mask field of the BoxList.
Arguments:
Args:
x (Tensor): the mask logits
labels (list[BoxList]): bounding boxes that are used as
reference, one for ech image
......@@ -101,7 +101,7 @@ def project_masks_on_boxes(gt_masks, boxes, matched_idxs, M):
def maskrcnn_loss(mask_logits, proposals, gt_masks, gt_labels, mask_matched_idxs):
# type: (Tensor, List[Tensor], List[Tensor], List[Tensor], List[Tensor]) -> Tensor
"""
Arguments:
Args:
proposals (list[BoxList])
mask_logits (Tensor)
targets (list[BoxList])
......@@ -727,7 +727,7 @@ class RoIHeads(nn.Module):
):
# type: (...) -> Tuple[List[Dict[str, Tensor]], Dict[str, Tensor]]
"""
Arguments:
Args:
features (List[Tensor])
proposals (List[Tensor[N, 4]])
image_shapes (List[Tuple[H, W]])
......
......@@ -31,7 +31,7 @@ class RPNHead(nn.Module):
"""
Adds a simple RPN Head with classification and regression heads
Arguments:
Args:
in_channels (int): number of channels of the input feature
num_anchors (int): number of anchors to be predicted
"""
......@@ -105,7 +105,7 @@ class RegionProposalNetwork(torch.nn.Module):
"""
Implements Region Proposal Network (RPN).
Arguments:
Args:
anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature
maps.
head (nn.Module): module that computes the objectness and regression deltas
......@@ -269,7 +269,7 @@ class RegionProposalNetwork(torch.nn.Module):
def compute_loss(self, objectness, pred_bbox_deltas, labels, regression_targets):
# type: (Tensor, Tensor, List[Tensor], List[Tensor]) -> Tuple[Tensor, Tensor]
"""
Arguments:
Args:
objectness (Tensor)
pred_bbox_deltas (Tensor)
labels (List[Tensor])
......@@ -311,7 +311,7 @@ class RegionProposalNetwork(torch.nn.Module):
):
# type: (...) -> Tuple[List[Tensor], Dict[str, Tensor]]
"""
Arguments:
Args:
images (ImageList): images for which we want to compute the predictions
features (OrderedDict[Tensor]): features computed from the images that are
used for computing the predictions. Each tensor in the list
......
......@@ -14,7 +14,7 @@ class DeepLabV3(_SimpleSegmentationModel):
`"Rethinking Atrous Convolution for Semantic Image Segmentation"
<https://arxiv.org/abs/1706.05587>`_.
Arguments:
Args:
backbone (nn.Module): the network used to compute the features for the model.
The backbone should return an OrderedDict[Tensor], with the key being
"out" for the last feature map used, and "aux" if an auxiliary classifier
......
......@@ -10,7 +10,7 @@ class FCN(_SimpleSegmentationModel):
"""
Implements a Fully-Convolutional Network for semantic segmentation.
Arguments:
Args:
backbone (nn.Module): the network used to compute the features for the model.
The backbone should return an OrderedDict[Tensor], with the key being
"out" for the last feature map used, and "aux" if an auxiliary classifier
......
......@@ -7,7 +7,7 @@ def _box_cxcywh_to_xyxy(boxes: Tensor) -> Tensor:
Converts bounding boxes from (cx, cy, w, h) format to (x1, y1, x2, y2) format.
(cx, cy) refers to center of bounding box
(w, h) are width and height of bounding box
Arguments:
Args:
boxes (Tensor[N, 4]): boxes in (cx, cy, w, h) format which will be converted.
Returns:
......@@ -30,7 +30,7 @@ def _box_xyxy_to_cxcywh(boxes: Tensor) -> Tensor:
Converts bounding boxes from (x1, y1, x2, y2) format to (cx, cy, w, h) format.
(x1, y1) refer to top left of bounding box
(x2, y2) refer to bottom right of bounding box
Arguments:
Args:
boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) format which will be converted.
Returns:
......@@ -52,7 +52,7 @@ def _box_xywh_to_xyxy(boxes: Tensor) -> Tensor:
Converts bounding boxes from (x, y, w, h) format to (x1, y1, x2, y2) format.
(x, y) refers to top left of bouding box.
(w, h) refers to width and height of box.
Arguments:
Args:
boxes (Tensor[N, 4]): boxes in (x, y, w, h) which will be converted.
Returns:
......@@ -68,7 +68,7 @@ def _box_xyxy_to_xywh(boxes: Tensor) -> Tensor:
Converts bounding boxes from (x1, y1, x2, y2) format to (x, y, w, h) format.
(x1, y1) refer to top left of bounding box
(x2, y2) refer to bottom right of bounding box
Arguments:
Args:
boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) which will be converted.
Returns:
......
......@@ -93,7 +93,7 @@ def remove_small_boxes(boxes: Tensor, min_size: float) -> Tensor:
"""
Remove boxes which contains at least one side smaller than min_size.
Arguments:
Args:
boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) format
min_size (float): minimum size
......@@ -111,7 +111,7 @@ def clip_boxes_to_image(boxes: Tensor, size: Tuple[int, int]) -> Tensor:
"""
Clip boxes so that they lie inside an image of size `size`.
Arguments:
Args:
boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) format
size (Tuple[height, width]): size of the image
......@@ -148,7 +148,7 @@ def box_convert(boxes: Tensor, in_fmt: str, out_fmt: str) -> Tensor:
'cxcywh' : boxes are represented via centre, width and height, cx, cy being center of box, w, h
being width and height.
Arguments:
Args:
boxes (Tensor[N, 4]): boxes which will be converted.
in_fmt (str): Input format of given boxes. Supported formats are ['xyxy', 'xywh', 'cxcywh'].
out_fmt (str): Output format of given boxes. Supported formats are ['xyxy', 'xywh', 'cxcywh']
......@@ -190,7 +190,7 @@ def box_area(boxes: Tensor) -> Tensor:
Computes the area of a set of bounding boxes, which are specified by its
(x1, y1, x2, y2) coordinates.
Arguments:
Args:
boxes (Tensor[N, 4]): boxes for which the area will be computed. They
are expected to be in (x1, y1, x2, y2) format
......@@ -208,7 +208,7 @@ def box_iou(boxes1: Tensor, boxes2: Tensor) -> Tensor:
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
Arguments:
Args:
boxes1 (Tensor[N, 4])
boxes2 (Tensor[M, 4])
......@@ -235,7 +235,7 @@ def generalized_box_iou(boxes1: Tensor, boxes2: Tensor) -> Tensor:
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
Arguments:
Args:
boxes1 (Tensor[N, 4])
boxes2 (Tensor[M, 4])
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment