"vscode:/vscode.git/clone" did not exist on "08ad34b19e619c0d2cc69d108e9f98a290e0cfc6"
Unverified Commit 305f41e4 authored by Alara Dirik's avatar Alara Dirik Committed by GitHub
Browse files

Fix post_process_object_detection method descriptions (#20977)

fix post_process_object_detection descriptions
parent 367fdf33
......@@ -1302,8 +1302,8 @@ class ConditionalDetrImageProcessor(BaseImageProcessor):
self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None
):
"""
Converts the output of [`ConditionalDetrForObjectDetection`] into the format expected by the COCO api. Only
supports PyTorch.
Converts the raw output of [`ConditionalDetrForObjectDetection`] into final bounding boxes in (top_left_x,
top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch.
Args:
outputs ([`DetrObjectDetectionOutput`]):
......
......@@ -1251,8 +1251,8 @@ class DeformableDetrImageProcessor(BaseImageProcessor):
# POSTPROCESSING METHODS - TODO: add support for other frameworks
def post_process(self, outputs, target_sizes):
"""
Converts the output of [`DeformableDetrForObjectDetection`] into the format expected by the COCO api. Only
supports PyTorch.
Converts the raw output of [`DeformableDetrForObjectDetection`] into final bounding boxes in (top_left_x,
top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch.
Args:
outputs ([`DeformableDetrObjectDetectionOutput`]):
......@@ -1299,8 +1299,8 @@ class DeformableDetrImageProcessor(BaseImageProcessor):
self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None
):
"""
Converts the output of [`DeformableDetrForObjectDetection`] into the format expected by the COCO api. Only
supports PyTorch.
Converts the raw output of [`DeformableDetrForObjectDetection`] into final bounding boxes in (top_left_x,
top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch.
Args:
outputs ([`DetrObjectDetectionOutput`]):
......
......@@ -1222,8 +1222,8 @@ class DetrImageProcessor(BaseImageProcessor):
# inspired by https://github.com/facebookresearch/detr/blob/master/models/detr.py#L258
def post_process(self, outputs, target_sizes):
"""
Converts the output of [`DetrForObjectDetection`] into the format expected by the COCO api. Only supports
PyTorch.
Converts the raw output of [`DetrForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
bottom_right_x, bottom_right_y) format. Only supports PyTorch.
Args:
outputs ([`DetrObjectDetectionOutput`]):
......@@ -1499,8 +1499,8 @@ class DetrImageProcessor(BaseImageProcessor):
self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None
):
"""
Converts the output of [`DetrForObjectDetection`] into the format expected by the COCO api. Only supports
PyTorch.
Converts the raw output of [`DetrForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
bottom_right_x, bottom_right_y) format. Only supports PyTorch.
Args:
outputs ([`DetrObjectDetectionOutput`]):
......
......@@ -329,7 +329,8 @@ class OwlViTImageProcessor(BaseImageProcessor):
def post_process(self, outputs, target_sizes):
"""
Converts the output of [`OwlViTForObjectDetection`] into the format expected by the COCO api.
Converts the raw output of [`OwlViTForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
bottom_right_x, bottom_right_y) format.
Args:
outputs ([`OwlViTObjectDetectionOutput`]):
......
......@@ -1116,14 +1116,14 @@ class YolosImageProcessor(BaseImageProcessor):
return encoded_inputs
# POSTPROCESSING METHODS - TODO: add support for other frameworks
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.post_process
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.post_process with Detr->Yolos
def post_process(self, outputs, target_sizes):
"""
Converts the output of [`DetrForObjectDetection`] into the format expected by the COCO api. Only supports
PyTorch.
Converts the raw output of [`YolosForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
bottom_right_x, bottom_right_y) format. Only supports PyTorch.
Args:
outputs ([`DetrObjectDetectionOutput`]):
outputs ([`YolosObjectDetectionOutput`]):
Raw outputs of the model.
target_sizes (`torch.Tensor` of shape `(batch_size, 2)`):
Tensor containing the size (height, width) of each image of the batch. For evaluation, this must be the
......@@ -1164,8 +1164,8 @@ class YolosImageProcessor(BaseImageProcessor):
self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None
):
"""
Converts the output of [`YolosForObjectDetection`] into the format expected by the COCO api. Only supports
PyTorch.
Converts the raw output of [`YolosForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
bottom_right_x, bottom_right_y) format. Only supports PyTorch.
Args:
outputs ([`YolosObjectDetectionOutput`]):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment