Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
textmonkey_pytorch
Commits
b1e6136c
Commit
b1e6136c
authored
Dec 26, 2023
by
yuluoyun
Browse files
data generation
parent
00946203
Changes
404
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
481 additions
and
0 deletions
+481
-0
data_generation/grit/third_party/CenterNet2/detectron2/utils/registry.py
.../grit/third_party/CenterNet2/detectron2/utils/registry.py
+60
-0
data_generation/grit/third_party/CenterNet2/detectron2/utils/serialize.py
...grit/third_party/CenterNet2/detectron2/utils/serialize.py
+32
-0
data_generation/grit/third_party/CenterNet2/detectron2/utils/testing.py
...n/grit/third_party/CenterNet2/detectron2/utils/testing.py
+137
-0
data_generation/grit/third_party/CenterNet2/detectron2/utils/video_visualizer.py
...ird_party/CenterNet2/detectron2/utils/video_visualizer.py
+252
-0
No files found.
Too many changes to show.
To preserve performance only
404 of 404+
files are displayed.
Plain diff
Email patch
data_generation/grit/third_party/CenterNet2/detectron2/utils/registry.py
0 → 100644
View file @
b1e6136c
# Copyright (c) Facebook, Inc. and its affiliates.
from
typing
import
Any
import
pydoc
from
fvcore.common.registry
import
Registry
# for backward compatibility.
"""
``Registry`` and `locate` provide ways to map a string (typically found
in config files) to callable objects.
"""
__all__
=
[
"Registry"
,
"locate"
]
def
_convert_target_to_string
(
t
:
Any
)
->
str
:
"""
Inverse of ``locate()``.
Args:
t: any object with ``__module__`` and ``__qualname__``
"""
module
,
qualname
=
t
.
__module__
,
t
.
__qualname__
# Compress the path to this object, e.g. ``module.submodule._impl.class``
# may become ``module.submodule.class``, if the later also resolves to the same
# object. This simplifies the string, and also is less affected by moving the
# class implementation.
module_parts
=
module
.
split
(
"."
)
for
k
in
range
(
1
,
len
(
module_parts
)):
prefix
=
"."
.
join
(
module_parts
[:
k
])
candidate
=
f
"
{
prefix
}
.
{
qualname
}
"
try
:
if
locate
(
candidate
)
is
t
:
return
candidate
except
ImportError
:
pass
return
f
"
{
module
}
.
{
qualname
}
"
def
locate
(
name
:
str
)
->
Any
:
"""
Locate and return an object ``x`` using an input string ``{x.__module__}.{x.__qualname__}``,
such as "module.submodule.class_name".
Raise Exception if it cannot be found.
"""
obj
=
pydoc
.
locate
(
name
)
# Some cases (e.g. torch.optim.sgd.SGD) not handled correctly
# by pydoc.locate. Try a private function from hydra.
if
obj
is
None
:
try
:
# from hydra.utils import get_method - will print many errors
from
hydra.utils
import
_locate
except
ImportError
as
e
:
raise
ImportError
(
f
"Cannot dynamically locate object
{
name
}
!"
)
from
e
else
:
obj
=
_locate
(
name
)
# it raises if fails
return
obj
data_generation/grit/third_party/CenterNet2/detectron2/utils/serialize.py
0 → 100644
View file @
b1e6136c
# Copyright (c) Facebook, Inc. and its affiliates.
import
cloudpickle
class
PicklableWrapper
(
object
):
"""
Wrap an object to make it more picklable, note that it uses
heavy weight serialization libraries that are slower than pickle.
It's best to use it only on closures (which are usually not picklable).
This is a simplified version of
https://github.com/joblib/joblib/blob/master/joblib/externals/loky/cloudpickle_wrapper.py
"""
def
__init__
(
self
,
obj
):
while
isinstance
(
obj
,
PicklableWrapper
):
# Wrapping an object twice is no-op
obj
=
obj
.
_obj
self
.
_obj
=
obj
def
__reduce__
(
self
):
s
=
cloudpickle
.
dumps
(
self
.
_obj
)
return
cloudpickle
.
loads
,
(
s
,)
def
__call__
(
self
,
*
args
,
**
kwargs
):
return
self
.
_obj
(
*
args
,
**
kwargs
)
def
__getattr__
(
self
,
attr
):
# Ensure that the wrapped object can be used seamlessly as the previous object.
if
attr
not
in
[
"_obj"
]:
return
getattr
(
self
.
_obj
,
attr
)
return
getattr
(
self
,
attr
)
data_generation/grit/third_party/CenterNet2/detectron2/utils/testing.py
0 → 100644
View file @
b1e6136c
# Copyright (c) Facebook, Inc. and its affiliates.
import
io
import
numpy
as
np
import
torch
from
detectron2
import
model_zoo
from
detectron2.data
import
DatasetCatalog
from
detectron2.data.detection_utils
import
read_image
from
detectron2.modeling
import
build_model
from
detectron2.structures
import
Boxes
,
Instances
,
ROIMasks
from
detectron2.utils.file_io
import
PathManager
"""
Internal utilities for tests. Don't use except for writing tests.
"""
def
get_model_no_weights
(
config_path
):
"""
Like model_zoo.get, but do not load any weights (even pretrained)
"""
cfg
=
model_zoo
.
get_config
(
config_path
)
if
not
torch
.
cuda
.
is_available
():
cfg
.
MODEL
.
DEVICE
=
"cpu"
return
build_model
(
cfg
)
def
random_boxes
(
num_boxes
,
max_coord
=
100
,
device
=
"cpu"
):
"""
Create a random Nx4 boxes tensor, with coordinates < max_coord.
"""
boxes
=
torch
.
rand
(
num_boxes
,
4
,
device
=
device
)
*
(
max_coord
*
0.5
)
boxes
.
clamp_
(
min
=
1.0
)
# tiny boxes cause numerical instability in box regression
# Note: the implementation of this function in torchvision is:
# boxes[:, 2:] += torch.rand(N, 2) * 100
# but it does not guarantee non-negative widths/heights constraints:
# boxes[:, 2] >= boxes[:, 0] and boxes[:, 3] >= boxes[:, 1]:
boxes
[:,
2
:]
+=
boxes
[:,
:
2
]
return
boxes
def
get_sample_coco_image
(
tensor
=
True
):
"""
Args:
tensor (bool): if True, returns 3xHxW tensor.
else, returns a HxWx3 numpy array.
Returns:
an image, in BGR color.
"""
try
:
file_name
=
DatasetCatalog
.
get
(
"coco_2017_val_100"
)[
0
][
"file_name"
]
if
not
PathManager
.
exists
(
file_name
):
raise
FileNotFoundError
()
except
IOError
:
# for public CI to run
file_name
=
PathManager
.
get_local_path
(
"http://images.cocodataset.org/train2017/000000000009.jpg"
)
ret
=
read_image
(
file_name
,
format
=
"BGR"
)
if
tensor
:
ret
=
torch
.
from_numpy
(
np
.
ascontiguousarray
(
ret
.
transpose
(
2
,
0
,
1
)))
return
ret
def
convert_scripted_instances
(
instances
):
"""
Convert a scripted Instances object to a regular :class:`Instances` object
"""
assert
hasattr
(
instances
,
"image_size"
),
f
"Expect an Instances object, but got
{
type
(
instances
)
}
!"
ret
=
Instances
(
instances
.
image_size
)
for
name
in
instances
.
_field_names
:
val
=
getattr
(
instances
,
"_"
+
name
,
None
)
if
val
is
not
None
:
ret
.
set
(
name
,
val
)
return
ret
def
assert_instances_allclose
(
input
,
other
,
*
,
rtol
=
1e-5
,
msg
=
""
,
size_as_tensor
=
False
):
"""
Args:
input, other (Instances):
size_as_tensor: compare image_size of the Instances as tensors (instead of tuples).
Useful for comparing outputs of tracing.
"""
if
not
isinstance
(
input
,
Instances
):
input
=
convert_scripted_instances
(
input
)
if
not
isinstance
(
other
,
Instances
):
other
=
convert_scripted_instances
(
other
)
if
not
msg
:
msg
=
"Two Instances are different! "
else
:
msg
=
msg
.
rstrip
()
+
" "
size_error_msg
=
msg
+
f
"image_size is
{
input
.
image_size
}
vs.
{
other
.
image_size
}
!"
if
size_as_tensor
:
assert
torch
.
equal
(
torch
.
tensor
(
input
.
image_size
),
torch
.
tensor
(
other
.
image_size
)
),
size_error_msg
else
:
assert
input
.
image_size
==
other
.
image_size
,
size_error_msg
fields
=
sorted
(
input
.
get_fields
().
keys
())
fields_other
=
sorted
(
other
.
get_fields
().
keys
())
assert
fields
==
fields_other
,
msg
+
f
"Fields are
{
fields
}
vs
{
fields_other
}
!"
for
f
in
fields
:
val1
,
val2
=
input
.
get
(
f
),
other
.
get
(
f
)
if
isinstance
(
val1
,
(
Boxes
,
ROIMasks
)):
# boxes in the range of O(100) and can have a larger tolerance
assert
torch
.
allclose
(
val1
.
tensor
,
val2
.
tensor
,
atol
=
100
*
rtol
),
(
msg
+
f
"Field
{
f
}
differs too much!"
)
elif
isinstance
(
val1
,
torch
.
Tensor
):
if
val1
.
dtype
.
is_floating_point
:
mag
=
torch
.
abs
(
val1
).
max
().
cpu
().
item
()
assert
torch
.
allclose
(
val1
,
val2
,
atol
=
mag
*
rtol
),
(
msg
+
f
"Field
{
f
}
differs too much!"
)
else
:
assert
torch
.
equal
(
val1
,
val2
),
msg
+
f
"Field
{
f
}
is different!"
else
:
raise
ValueError
(
f
"Don't know how to compare type
{
type
(
val1
)
}
"
)
def
reload_script_model
(
module
):
"""
Save a jit module and load it back.
Similar to the `getExportImportCopy` function in torch/testing/
"""
buffer
=
io
.
BytesIO
()
torch
.
jit
.
save
(
module
,
buffer
)
buffer
.
seek
(
0
)
return
torch
.
jit
.
load
(
buffer
)
data_generation/grit/third_party/CenterNet2/detectron2/utils/video_visualizer.py
0 → 100644
View file @
b1e6136c
# Copyright (c) Facebook, Inc. and its affiliates.
import
numpy
as
np
import
pycocotools.mask
as
mask_util
from
detectron2.utils.visualizer
import
(
ColorMode
,
Visualizer
,
_create_text_labels
,
_PanopticPrediction
,
)
from
.colormap
import
random_color
class
_DetectedInstance
:
"""
Used to store data about detected objects in video frame,
in order to transfer color to objects in the future frames.
Attributes:
label (int):
bbox (tuple[float]):
mask_rle (dict):
color (tuple[float]): RGB colors in range (0, 1)
ttl (int): time-to-live for the instance. For example, if ttl=2,
the instance color can be transferred to objects in the next two frames.
"""
__slots__
=
[
"label"
,
"bbox"
,
"mask_rle"
,
"color"
,
"ttl"
]
def
__init__
(
self
,
label
,
bbox
,
mask_rle
,
color
,
ttl
):
self
.
label
=
label
self
.
bbox
=
bbox
self
.
mask_rle
=
mask_rle
self
.
color
=
color
self
.
ttl
=
ttl
class
VideoVisualizer
:
def
__init__
(
self
,
metadata
,
instance_mode
=
ColorMode
.
IMAGE
):
"""
Args:
metadata (MetadataCatalog): image metadata.
"""
self
.
metadata
=
metadata
self
.
_old_instances
=
[]
assert
instance_mode
in
[
ColorMode
.
IMAGE
,
ColorMode
.
IMAGE_BW
,
],
"Other mode not supported yet."
self
.
_instance_mode
=
instance_mode
def
draw_instance_predictions
(
self
,
frame
,
predictions
):
"""
Draw instance-level prediction results on an image.
Args:
frame (ndarray): an RGB image of shape (H, W, C), in the range [0, 255].
predictions (Instances): the output of an instance detection/segmentation
model. Following fields will be used to draw:
"pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle").
Returns:
output (VisImage): image object with visualizations.
"""
frame_visualizer
=
Visualizer
(
frame
,
self
.
metadata
)
num_instances
=
len
(
predictions
)
if
num_instances
==
0
:
return
frame_visualizer
.
output
boxes
=
predictions
.
pred_boxes
.
tensor
.
numpy
()
if
predictions
.
has
(
"pred_boxes"
)
else
None
scores
=
predictions
.
scores
if
predictions
.
has
(
"scores"
)
else
None
classes
=
predictions
.
pred_classes
.
numpy
()
if
predictions
.
has
(
"pred_classes"
)
else
None
keypoints
=
predictions
.
pred_keypoints
if
predictions
.
has
(
"pred_keypoints"
)
else
None
colors
=
predictions
.
COLOR
if
predictions
.
has
(
"COLOR"
)
else
[
None
]
*
len
(
predictions
)
durations
=
predictions
.
ID_duration
if
predictions
.
has
(
"ID_duration"
)
else
None
duration_threshold
=
self
.
metadata
.
get
(
"duration_threshold"
,
0
)
visibilities
=
None
if
durations
is
None
else
[
x
>
duration_threshold
for
x
in
durations
]
if
predictions
.
has
(
"pred_masks"
):
masks
=
predictions
.
pred_masks
# mask IOU is not yet enabled
# masks_rles = mask_util.encode(np.asarray(masks.permute(1, 2, 0), order="F"))
# assert len(masks_rles) == num_instances
else
:
masks
=
None
detected
=
[
_DetectedInstance
(
classes
[
i
],
boxes
[
i
],
mask_rle
=
None
,
color
=
colors
[
i
],
ttl
=
8
)
for
i
in
range
(
num_instances
)
]
if
not
predictions
.
has
(
"COLOR"
):
colors
=
self
.
_assign_colors
(
detected
)
labels
=
_create_text_labels
(
classes
,
scores
,
self
.
metadata
.
get
(
"thing_classes"
,
None
))
if
self
.
_instance_mode
==
ColorMode
.
IMAGE_BW
:
# any() returns uint8 tensor
frame_visualizer
.
output
.
reset_image
(
frame_visualizer
.
_create_grayscale_image
(
(
masks
.
any
(
dim
=
0
)
>
0
).
numpy
()
if
masks
is
not
None
else
None
)
)
alpha
=
0.3
else
:
alpha
=
0.5
labels
=
(
None
if
labels
is
None
else
[
y
[
0
]
for
y
in
filter
(
lambda
x
:
x
[
1
],
zip
(
labels
,
visibilities
))]
)
# noqa
assigned_colors
=
(
None
if
colors
is
None
else
[
y
[
0
]
for
y
in
filter
(
lambda
x
:
x
[
1
],
zip
(
colors
,
visibilities
))]
)
# noqa
frame_visualizer
.
overlay_instances
(
boxes
=
None
if
masks
is
not
None
else
boxes
[
visibilities
],
# boxes are a bit distracting
masks
=
None
if
masks
is
None
else
masks
[
visibilities
],
labels
=
labels
,
keypoints
=
None
if
keypoints
is
None
else
keypoints
[
visibilities
],
assigned_colors
=
assigned_colors
,
alpha
=
alpha
,
)
return
frame_visualizer
.
output
def
draw_sem_seg
(
self
,
frame
,
sem_seg
,
area_threshold
=
None
):
"""
Args:
sem_seg (ndarray or Tensor): semantic segmentation of shape (H, W),
each value is the integer label.
area_threshold (Optional[int]): only draw segmentations larger than the threshold
"""
# don't need to do anything special
frame_visualizer
=
Visualizer
(
frame
,
self
.
metadata
)
frame_visualizer
.
draw_sem_seg
(
sem_seg
,
area_threshold
=
None
)
return
frame_visualizer
.
output
def
draw_panoptic_seg_predictions
(
self
,
frame
,
panoptic_seg
,
segments_info
,
area_threshold
=
None
,
alpha
=
0.5
):
frame_visualizer
=
Visualizer
(
frame
,
self
.
metadata
)
pred
=
_PanopticPrediction
(
panoptic_seg
,
segments_info
,
self
.
metadata
)
if
self
.
_instance_mode
==
ColorMode
.
IMAGE_BW
:
frame_visualizer
.
output
.
reset_image
(
frame_visualizer
.
_create_grayscale_image
(
pred
.
non_empty_mask
())
)
# draw mask for all semantic segments first i.e. "stuff"
for
mask
,
sinfo
in
pred
.
semantic_masks
():
category_idx
=
sinfo
[
"category_id"
]
try
:
mask_color
=
[
x
/
255
for
x
in
self
.
metadata
.
stuff_colors
[
category_idx
]]
except
AttributeError
:
mask_color
=
None
frame_visualizer
.
draw_binary_mask
(
mask
,
color
=
mask_color
,
text
=
self
.
metadata
.
stuff_classes
[
category_idx
],
alpha
=
alpha
,
area_threshold
=
area_threshold
,
)
all_instances
=
list
(
pred
.
instance_masks
())
if
len
(
all_instances
)
==
0
:
return
frame_visualizer
.
output
# draw mask for all instances second
masks
,
sinfo
=
list
(
zip
(
*
all_instances
))
num_instances
=
len
(
masks
)
masks_rles
=
mask_util
.
encode
(
np
.
asarray
(
np
.
asarray
(
masks
).
transpose
(
1
,
2
,
0
),
dtype
=
np
.
uint8
,
order
=
"F"
)
)
assert
len
(
masks_rles
)
==
num_instances
category_ids
=
[
x
[
"category_id"
]
for
x
in
sinfo
]
detected
=
[
_DetectedInstance
(
category_ids
[
i
],
bbox
=
None
,
mask_rle
=
masks_rles
[
i
],
color
=
None
,
ttl
=
8
)
for
i
in
range
(
num_instances
)
]
colors
=
self
.
_assign_colors
(
detected
)
labels
=
[
self
.
metadata
.
thing_classes
[
k
]
for
k
in
category_ids
]
frame_visualizer
.
overlay_instances
(
boxes
=
None
,
masks
=
masks
,
labels
=
labels
,
keypoints
=
None
,
assigned_colors
=
colors
,
alpha
=
alpha
,
)
return
frame_visualizer
.
output
def
_assign_colors
(
self
,
instances
):
"""
Naive tracking heuristics to assign same color to the same instance,
will update the internal state of tracked instances.
Returns:
list[tuple[float]]: list of colors.
"""
# Compute iou with either boxes or masks:
is_crowd
=
np
.
zeros
((
len
(
instances
),),
dtype
=
np
.
bool
)
if
instances
[
0
].
bbox
is
None
:
assert
instances
[
0
].
mask_rle
is
not
None
# use mask iou only when box iou is None
# because box seems good enough
rles_old
=
[
x
.
mask_rle
for
x
in
self
.
_old_instances
]
rles_new
=
[
x
.
mask_rle
for
x
in
instances
]
ious
=
mask_util
.
iou
(
rles_old
,
rles_new
,
is_crowd
)
threshold
=
0.5
else
:
boxes_old
=
[
x
.
bbox
for
x
in
self
.
_old_instances
]
boxes_new
=
[
x
.
bbox
for
x
in
instances
]
ious
=
mask_util
.
iou
(
boxes_old
,
boxes_new
,
is_crowd
)
threshold
=
0.6
if
len
(
ious
)
==
0
:
ious
=
np
.
zeros
((
len
(
self
.
_old_instances
),
len
(
instances
)),
dtype
=
"float32"
)
# Only allow matching instances of the same label:
for
old_idx
,
old
in
enumerate
(
self
.
_old_instances
):
for
new_idx
,
new
in
enumerate
(
instances
):
if
old
.
label
!=
new
.
label
:
ious
[
old_idx
,
new_idx
]
=
0
matched_new_per_old
=
np
.
asarray
(
ious
).
argmax
(
axis
=
1
)
max_iou_per_old
=
np
.
asarray
(
ious
).
max
(
axis
=
1
)
# Try to find match for each old instance:
extra_instances
=
[]
for
idx
,
inst
in
enumerate
(
self
.
_old_instances
):
if
max_iou_per_old
[
idx
]
>
threshold
:
newidx
=
matched_new_per_old
[
idx
]
if
instances
[
newidx
].
color
is
None
:
instances
[
newidx
].
color
=
inst
.
color
continue
# If an old instance does not match any new instances,
# keep it for the next frame in case it is just missed by the detector
inst
.
ttl
-=
1
if
inst
.
ttl
>
0
:
extra_instances
.
append
(
inst
)
# Assign random color to newly-detected instances:
for
inst
in
instances
:
if
inst
.
color
is
None
:
inst
.
color
=
random_color
(
rgb
=
True
,
maximum
=
1
)
self
.
_old_instances
=
instances
[:]
+
extra_instances
return
[
d
.
color
for
d
in
instances
]
Prev
1
…
17
18
19
20
21
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment