"test/ref_ops_test.cpp" did not exist on "3eb4f7756ae321cc61af866248f9c1766bccd22a"
Commit a8562a56 authored by luopl's avatar luopl
Browse files

Initial commit

parents
Pipeline #1564 canceled with stages
# model settings
model = dict(
type='RPN',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
neck=None,
rpn_head=dict(
type='RPNHead',
in_channels=1024,
feat_channels=1024,
anchor_generator=dict(
type='AnchorGenerator',
scales=[2, 4, 8, 16, 32],
ratios=[0.5, 1.0, 2.0],
strides=[16]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=12000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0)))
# model settings
model = dict(
type='RPN',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=2000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0)))
# model settings
input_size = 300
model = dict(
type='SingleStageDetector',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[1, 1, 1],
bgr_to_rgb=True,
pad_size_divisor=1),
backbone=dict(
type='SSDVGG',
depth=16,
with_last_pool=False,
ceil_mode=True,
out_indices=(3, 4),
out_feature_indices=(22, 34),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://vgg16_caffe')),
neck=dict(
type='SSDNeck',
in_channels=(512, 1024),
out_channels=(512, 1024, 512, 256, 256, 256),
level_strides=(2, 2, 1, 1),
level_paddings=(1, 1, 0, 0),
l2_norm_scale=20),
bbox_head=dict(
type='SSDHead',
in_channels=(512, 1024, 512, 256, 256, 256),
num_classes=80,
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=input_size,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2])),
# model training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.,
ignore_iof_thr=-1,
gt_max_assign_all=False),
sampler=dict(type='PseudoSampler'),
smoothl1_beta=1.,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False),
test_cfg=dict(
nms_pre=1000,
nms=dict(type='nms', iou_threshold=0.45),
min_bbox_size=0,
score_thr=0.02,
max_per_img=200))
cudnn_benchmark = True
# training schedule for 1x
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
# training schedule for 20e
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=20, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=20,
by_epoch=True,
milestones=[16, 19],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
# training schedule for 2x
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=24, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=24,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
openxlab: true
voc2007:
dataset: OpenDataLab/PASCAL_VOC2007
download_root: data
data_root: data
script: tools/dataset_converters/scripts/preprocess_voc2007.sh
voc2012:
dataset: OpenDataLab/PASCAL_VOC2012
download_root: data
data_root: data
script: tools/dataset_converters/scripts/preprocess_voc2012.sh
coco2017:
dataset: OpenDataLab/COCO_2017
download_root: data
data_root: data/coco
script: tools/dataset_converters/scripts/preprocess_coco2017.sh
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import mmcv
import numpy as np
from mmengine.utils import scandir
try:
import imageio
except ImportError:
imageio = None
# TODO verify after refactoring analyze_results.py
def parse_args():
parser = argparse.ArgumentParser(description='Create GIF for demo')
parser.add_argument(
'image_dir',
help='directory where result '
'images save path generated by ‘analyze_results.py’')
parser.add_argument(
'--out',
type=str,
default='result.gif',
help='gif path where will be saved')
args = parser.parse_args()
return args
def _generate_batch_data(sampler, batch_size):
batch = []
for idx in sampler:
batch.append(idx)
if len(batch) == batch_size:
yield batch
batch = []
if len(batch) > 0:
yield batch
def create_gif(frames, gif_name, duration=2):
"""Create gif through imageio.
Args:
frames (list[ndarray]): Image frames
gif_name (str): Saved gif name
duration (int): Display interval (s),
Default: 2
"""
if imageio is None:
raise RuntimeError('imageio is not installed,'
'Please use “pip install imageio” to install')
imageio.mimsave(gif_name, frames, 'GIF', duration=duration)
def create_frame_by_matplotlib(image_dir,
nrows=1,
fig_size=(300, 300),
font_size=15):
"""Create gif frame image through matplotlib.
Args:
image_dir (str): Root directory of result images
nrows (int): Number of rows displayed, Default: 1
fig_size (tuple): Figure size of the pyplot figure.
Default: (300, 300)
font_size (int): Font size of texts. Default: 15
Returns:
list[ndarray]: image frames
"""
result_dir_names = os.listdir(image_dir)
assert len(result_dir_names) == 2
# Longer length has higher priority
result_dir_names.reverse()
images_list = []
for dir_names in result_dir_names:
images_list.append(scandir(osp.join(image_dir, dir_names)))
frames = []
for paths in _generate_batch_data(zip(*images_list), nrows):
fig, axes = plt.subplots(nrows=nrows, ncols=2)
fig.suptitle('Good/bad case selected according '
'to the COCO mAP of the single image')
det_patch = mpatches.Patch(color='salmon', label='prediction')
gt_patch = mpatches.Patch(color='royalblue', label='ground truth')
# bbox_to_anchor may need to be finetuned
plt.legend(
handles=[det_patch, gt_patch],
bbox_to_anchor=(1, -0.18),
loc='lower right',
borderaxespad=0.)
if nrows == 1:
axes = [axes]
dpi = fig.get_dpi()
# set fig size and margin
fig.set_size_inches(
(fig_size[0] * 2 + fig_size[0] // 20) / dpi,
(fig_size[1] * nrows + fig_size[1] // 3) / dpi,
)
fig.tight_layout()
# set subplot margin
plt.subplots_adjust(
hspace=.05,
wspace=0.05,
left=0.02,
right=0.98,
bottom=0.02,
top=0.98)
for i, (path_tuple, ax_tuple) in enumerate(zip(paths, axes)):
image_path_left = osp.join(
osp.join(image_dir, result_dir_names[0], path_tuple[0]))
image_path_right = osp.join(
osp.join(image_dir, result_dir_names[1], path_tuple[1]))
image_left = mmcv.imread(image_path_left)
image_left = mmcv.rgb2bgr(image_left)
image_right = mmcv.imread(image_path_right)
image_right = mmcv.rgb2bgr(image_right)
if i == 0:
ax_tuple[0].set_title(
result_dir_names[0], fontdict={'size': font_size})
ax_tuple[1].set_title(
result_dir_names[1], fontdict={'size': font_size})
ax_tuple[0].imshow(
image_left, extent=(0, *fig_size, 0), interpolation='bilinear')
ax_tuple[0].axis('off')
ax_tuple[1].imshow(
image_right,
extent=(0, *fig_size, 0),
interpolation='bilinear')
ax_tuple[1].axis('off')
canvas = fig.canvas
s, (width, height) = canvas.print_to_buffer()
buffer = np.frombuffer(s, dtype='uint8')
img_rgba = buffer.reshape(height, width, 4)
rgb, alpha = np.split(img_rgba, [3], axis=2)
img = rgb.astype('uint8')
frames.append(img)
return frames
def main():
args = parse_args()
frames = create_frame_by_matplotlib(args.image_dir)
create_gif(frames, args.out)
if __name__ == '__main__':
main()
File added
# Copyright (c) OpenMMLab. All rights reserved.
"""Support for multi-model fusion, and currently only the Weighted Box Fusion
(WBF) fusion method is supported.
References: https://github.com/ZFTurbo/Weighted-Boxes-Fusion
Example:
python demo/demo_multi_model.py demo/demo.jpg \
./configs/faster_rcnn/faster-rcnn_r50-caffe_fpn_1x_coco.py \
./configs/retinanet/retinanet_r50-caffe_fpn_1x_coco.py \
--checkpoints \
https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco/faster_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.378_20200504_180032-c5925ee5.pth \ # noqa
https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_caffe_fpn_1x_coco/retinanet_r50_caffe_fpn_1x_coco_20200531-f11027c5.pth \
--weights 1 2
"""
import argparse
import os.path as osp
import mmcv
import mmengine
from mmengine.fileio import isdir, join_path, list_dir_or_file
from mmengine.logging import print_log
from mmengine.structures import InstanceData
from mmdet.apis import DetInferencer
from mmdet.models.utils import weighted_boxes_fusion
from mmdet.registry import VISUALIZERS
from mmdet.structures import DetDataSample
IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif',
'.tiff', '.webp')
def parse_args():
parser = argparse.ArgumentParser(
description='MMDetection multi-model inference demo')
parser.add_argument(
'inputs', type=str, help='Input image file or folder path.')
parser.add_argument(
'config',
type=str,
nargs='*',
help='Config file(s), support receive multiple files')
parser.add_argument(
'--checkpoints',
type=str,
nargs='*',
help='Checkpoint file(s), support receive multiple files, '
'remember to correspond to the above config',
)
parser.add_argument(
'--weights',
type=float,
nargs='*',
default=None,
help='weights for each model, remember to '
'correspond to the above config')
parser.add_argument(
'--fusion-iou-thr',
type=float,
default=0.55,
help='IoU value for boxes to be a match in wbf')
parser.add_argument(
'--skip-box-thr',
type=float,
default=0.0,
help='exclude boxes with score lower than this variable in wbf')
parser.add_argument(
'--conf-type',
type=str,
default='avg', # avg, max, box_and_model_avg, absent_model_aware_avg
help='how to calculate confidence in weighted boxes in wbf')
parser.add_argument(
'--out-dir',
type=str,
default='outputs',
help='Output directory of images or prediction results.')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--pred-score-thr',
type=float,
default=0.3,
help='bbox score threshold')
parser.add_argument(
'--batch-size', type=int, default=1, help='Inference batch size.')
parser.add_argument(
'--show',
action='store_true',
help='Display the image in a popup window.')
parser.add_argument(
'--no-save-vis',
action='store_true',
help='Do not save detection vis results')
parser.add_argument(
'--no-save-pred',
action='store_true',
help='Do not save detection json results')
parser.add_argument(
'--palette',
default='none',
choices=['coco', 'voc', 'citys', 'random', 'none'],
help='Color palette used for visualization')
args = parser.parse_args()
if args.no_save_vis and args.no_save_pred:
args.out_dir = ''
return args
def main():
args = parse_args()
results = []
cfg_visualizer = None
dataset_meta = None
inputs = []
filename_list = []
if isdir(args.inputs):
dir = list_dir_or_file(
args.inputs, list_dir=False, suffix=IMG_EXTENSIONS)
for filename in dir:
img = mmcv.imread(join_path(args.inputs, filename))
inputs.append(img)
filename_list.append(filename)
else:
img = mmcv.imread(args.inputs)
inputs.append(img)
img_name = osp.basename(args.inputs)
filename_list.append(img_name)
for i, (config,
checkpoint) in enumerate(zip(args.config, args.checkpoints)):
inferencer = DetInferencer(
config, checkpoint, device=args.device, palette=args.palette)
result_raw = inferencer(
inputs=inputs,
batch_size=args.batch_size,
no_save_vis=True,
pred_score_thr=args.pred_score_thr)
if i == 0:
cfg_visualizer = inferencer.cfg.visualizer
dataset_meta = inferencer.model.dataset_meta
results = [{
'bboxes_list': [],
'scores_list': [],
'labels_list': []
} for _ in range(len(result_raw['predictions']))]
for res, raw in zip(results, result_raw['predictions']):
res['bboxes_list'].append(raw['bboxes'])
res['scores_list'].append(raw['scores'])
res['labels_list'].append(raw['labels'])
visualizer = VISUALIZERS.build(cfg_visualizer)
visualizer.dataset_meta = dataset_meta
for i in range(len(results)):
bboxes, scores, labels = weighted_boxes_fusion(
results[i]['bboxes_list'],
results[i]['scores_list'],
results[i]['labels_list'],
weights=args.weights,
iou_thr=args.fusion_iou_thr,
skip_box_thr=args.skip_box_thr,
conf_type=args.conf_type)
pred_instances = InstanceData()
pred_instances.bboxes = bboxes
pred_instances.scores = scores
pred_instances.labels = labels
fusion_result = DetDataSample(pred_instances=pred_instances)
img_name = filename_list[i]
if not args.no_save_pred:
out_json_path = (
args.out_dir + '/preds/' + img_name.split('.')[0] + '.json')
mmengine.dump(
{
'labels': labels.tolist(),
'scores': scores.tolist(),
'bboxes': bboxes.tolist()
}, out_json_path)
out_file = osp.join(args.out_dir, 'vis',
img_name) if not args.no_save_vis else None
visualizer.add_datasample(
img_name,
inputs[i][..., ::-1],
data_sample=fusion_result,
show=args.show,
draw_gt=False,
wait_time=0,
pred_score_thr=args.pred_score_thr,
out_file=out_file)
if not args.no_save_vis:
print_log(f'results have been saved at {args.out_dir}')
if __name__ == '__main__':
main()
# Copyright (c) OpenMMLab. All rights reserved.
"""Image Demo.
This script adopts a new infenence class, currently supports image path,
np.array and folder input formats, and will support video and webcam
in the future.
Example:
Save visualizations and predictions results::
python demo/image_demo.py demo/demo.jpg rtmdet-s
python demo/image_demo.py demo/demo.jpg \
configs/rtmdet/rtmdet_s_8xb32-300e_coco.py \
--weights rtmdet_s_8xb32-300e_coco_20220905_161602-387a891e.pth
python demo/image_demo.py demo/demo.jpg \
glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365 --texts bench
python demo/image_demo.py demo/demo.jpg \
glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365 --texts 'bench . car .'
python demo/image_demo.py demo/demo.jpg \
glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365
--texts 'bench . car .' -c
python demo/image_demo.py demo/demo.jpg \
glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365 \
--texts 'There are a lot of cars here.'
python demo/image_demo.py demo/demo.jpg \
glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365 \
--texts '$: coco'
python demo/image_demo.py demo/demo.jpg \
glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365 \
--texts '$: lvis' --pred-score-thr 0.7 \
--palette random --chunked-size 80
python demo/image_demo.py demo/demo.jpg \
grounding_dino_swin-t_pretrain_obj365_goldg_cap4m \
--texts '$: lvis' --pred-score-thr 0.4 \
--palette random --chunked-size 80
python demo/image_demo.py demo/demo.jpg \
grounding_dino_swin-t_pretrain_obj365_goldg_cap4m \
--texts "a red car in the upper right corner" \
--tokens-positive -1
Visualize prediction results::
python demo/image_demo.py demo/demo.jpg rtmdet-ins-s --show
python demo/image_demo.py demo/demo.jpg rtmdet-ins_s_8xb32-300e_coco \
--show
"""
import ast
from argparse import ArgumentParser
from mmengine.logging import print_log
from mmdet.apis import DetInferencer
from mmdet.evaluation import get_classes
def parse_args():
parser = ArgumentParser()
parser.add_argument(
'inputs', type=str, help='Input image file or folder path.')
parser.add_argument(
'model',
type=str,
help='Config or checkpoint .pth file or the model name '
'and alias defined in metafile. The model configuration '
'file will try to read from .pth if the parameter is '
'a .pth weights file.')
parser.add_argument('--weights', default=None, help='Checkpoint file')
parser.add_argument(
'--out-dir',
type=str,
default='outputs',
help='Output directory of images or prediction results.')
# Once you input a format similar to $: xxx, it indicates that
# the prompt is based on the dataset class name.
# support $: coco, $: voc, $: cityscapes, $: lvis, $: imagenet_det.
# detail to `mmdet/evaluation/functional/class_names.py`
parser.add_argument(
'--texts', help='text prompt, such as "bench . car .", "$: coco"')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--pred-score-thr',
type=float,
default=0.3,
help='bbox score threshold')
parser.add_argument(
'--batch-size', type=int, default=1, help='Inference batch size.')
parser.add_argument(
'--show',
action='store_true',
help='Display the image in a popup window.')
parser.add_argument(
'--no-save-vis',
action='store_true',
help='Do not save detection vis results')
parser.add_argument(
'--no-save-pred',
action='store_true',
help='Do not save detection json results')
parser.add_argument(
'--print-result',
action='store_true',
help='Whether to print the results.')
parser.add_argument(
'--palette',
default='none',
choices=['coco', 'voc', 'citys', 'random', 'none'],
help='Color palette used for visualization')
# only for GLIP and Grounding DINO
parser.add_argument(
'--custom-entities',
'-c',
action='store_true',
help='Whether to customize entity names? '
'If so, the input text should be '
'"cls_name1 . cls_name2 . cls_name3 ." format')
parser.add_argument(
'--chunked-size',
'-s',
type=int,
default=-1,
help='If the number of categories is very large, '
'you can specify this parameter to truncate multiple predictions.')
# only for Grounding DINO
parser.add_argument(
'--tokens-positive',
'-p',
type=str,
help='Used to specify which locations in the input text are of '
'interest to the user. -1 indicates that no area is of interest, '
'None indicates ignoring this parameter. '
'The two-dimensional array represents the start and end positions.')
call_args = vars(parser.parse_args())
if call_args['no_save_vis'] and call_args['no_save_pred']:
call_args['out_dir'] = ''
if call_args['model'].endswith('.pth'):
print_log('The model is a weight file, automatically '
'assign the model to --weights')
call_args['weights'] = call_args['model']
call_args['model'] = None
if call_args['texts'] is not None:
if call_args['texts'].startswith('$:'):
dataset_name = call_args['texts'][3:].strip()
class_names = get_classes(dataset_name)
call_args['texts'] = [tuple(class_names)]
if call_args['tokens_positive'] is not None:
call_args['tokens_positive'] = ast.literal_eval(
call_args['tokens_positive'])
init_kws = ['model', 'weights', 'device', 'palette']
init_args = {}
for init_kw in init_kws:
init_args[init_kw] = call_args.pop(init_kw)
return init_args, call_args
def main():
init_args, call_args = parse_args()
# TODO: Video and Webcam are currently not supported and
# may consume too much memory if your input folder has a lot of images.
# We will be optimized later.
inferencer = DetInferencer(**init_args)
chunked_size = call_args.pop('chunked_size')
inferencer.model.test_cfg.chunked_size = chunked_size
inferencer(**call_args)
if call_args['out_dir'] != '' and not (call_args['no_save_vis']
and call_args['no_save_pred']):
print_log(f'results have been saved at {call_args["out_dir"]}')
if __name__ == '__main__':
main()
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "gCMycQ_2U8SA"
},
"source": [
"<div align=\"center\">\n",
" <img src=\"https://github.com/open-mmlab/mmdetection/raw/3.x/resources/mmdet-logo.png\" width=\"600\"/>\n",
" <div>&nbsp;</div>\n",
" <div align=\"center\">\n",
" <b><font size=\"5\">OpenMMLab website</font></b>\n",
" <sup>\n",
" <a href=\"https://openmmlab.com\">\n",
" <i><font size=\"4\">HOT</font></i>\n",
" </a>\n",
" </sup>\n",
" &nbsp;&nbsp;&nbsp;&nbsp;\n",
" <b><font size=\"5\">OpenMMLab platform</font></b>\n",
" <sup>\n",
" <a href=\"https://platform.openmmlab.com\">\n",
" <i><font size=\"4\">TRY IT OUT</font></i>\n",
" </a>\n",
" </sup>\n",
" </div>\n",
" <div>&nbsp;</div>\n",
"\n",
"<a href=\"https://colab.research.google.com/github/open-mmlab/mmdetection/blob/dev-3.x/demo/inference_demo.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a>\n",
"\n",
"[![PyPI](https://img.shields.io/pypi/v/mmdet)](https://pypi.org/project/mmdet)\n",
"[![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmdetection.readthedocs.io/en/latest/)\n",
"[![badge](https://github.com/open-mmlab/mmdetection/workflows/build/badge.svg)](https://github.com/open-mmlab/mmdetection/actions)\n",
"[![codecov](https://codecov.io/gh/open-mmlab/mmdetection/branch/master/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmdetection)\n",
"[![license](https://img.shields.io/github/license/open-mmlab/mmdetection.svg)](https://github.com/open-mmlab/mmdetection/blob/master/LICENSE)\n",
"[![open issues](https://isitmaintained.com/badge/open/open-mmlab/mmdetection.svg)](https://github.com/open-mmlab/mmdetection/issues)\n",
"[![issue resolution](https://isitmaintained.com/badge/resolution/open-mmlab/mmdetection.svg)](https://github.com/open-mmlab/mmdetection/issues)\n",
"\n",
"[📘Documentation](https://mmdetection.readthedocs.io/en/3.x/) |\n",
"[🛠️Installation](https://mmdetection.readthedocs.io/en/3.x/get_started.html) |\n",
"[👀Model Zoo](https://mmdetection.readthedocs.io/en/3.x/model_zoo.html) |\n",
"[🆕Update News](https://mmdetection.readthedocs.io/en/3.x/notes/changelog.html) |\n",
"[🚀Ongoing Projects](https://github.com/open-mmlab/mmdetection/projects) |\n",
"[🤔Reporting Issues](https://github.com/open-mmlab/mmdetection/issues/new/choose)\n",
"\n",
"</div>\n",
"\n",
"<div align=\"center\">\n",
" <a href=\"https://openmmlab.medium.com/\" style=\"text-decoration:none;\">\n",
" <img src=\"https://user-images.githubusercontent.com/25839884/219255827-67c1a27f-f8c5-46a9-811d-5e57448c61d1.png\" width=\"3%\" alt=\"\" /></a>\n",
" <img src=\"https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png\" width=\"3%\" alt=\"\" />\n",
" <a href=\"https://discord.com/channels/1037617289144569886/1046608014234370059\" style=\"text-decoration:none;\">\n",
" <img src=\"https://user-images.githubusercontent.com/25839884/218347213-c080267f-cbb6-443e-8532-8e1ed9a58ea9.png\" width=\"3%\" alt=\"\" /></a>\n",
" <img src=\"https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png\" width=\"3%\" alt=\"\" />\n",
" <a href=\"https://twitter.com/OpenMMLab\" style=\"text-decoration:none;\">\n",
" <img src=\"https://user-images.githubusercontent.com/25839884/218346637-d30c8a0f-3eba-4699-8131-512fb06d46db.png\" width=\"3%\" alt=\"\" /></a>\n",
" <img src=\"https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png\" width=\"3%\" alt=\"\" />\n",
" <a href=\"https://www.youtube.com/openmmlab\" style=\"text-decoration:none;\">\n",
" <img src=\"https://user-images.githubusercontent.com/25839884/218346691-ceb2116a-465a-40af-8424-9f30d2348ca9.png\" width=\"3%\" alt=\"\" /></a>\n",
" <img src=\"https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png\" width=\"3%\" alt=\"\" />\n",
" <a href=\"https://space.bilibili.com/1293512903\" style=\"text-decoration:none;\">\n",
" <img src=\"https://user-images.githubusercontent.com/25839884/219026751-d7d14cce-a7c9-4e82-9942-8375fca65b99.png\" width=\"3%\" alt=\"\" /></a>\n",
" <img src=\"https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png\" width=\"3%\" alt=\"\" />\n",
" <a href=\"https://www.zhihu.com/people/openmmlab\" style=\"text-decoration:none;\">\n",
" <img src=\"https://user-images.githubusercontent.com/25839884/219026120-ba71e48b-6e94-4bd4-b4e9-b7d175b5e362.png\" width=\"3%\" alt=\"\" /></a>\n",
"</div>"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "aGYwt_UjIrqp"
},
"source": [
"# Inferencer\n",
"\n",
"In this tutorial, you will learn how to perform inference with a MMDetection `DetInferencer`.\n",
"\n",
"Let's start!\n",
"\n",
"```{note}\n",
"The commands in this tutorial are mainly for Colab.\n",
"You can click the button above, `Open in Colab`, to run this notebook in Colab.\n",
"```"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "tJxJHruNLb7Y"
},
"source": [
"## Install MMDetection"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "Wi4LPmsR66sy",
"outputId": "0077a9a3-0183-4002-fe7a-2a12f020cf69"
},
"outputs": [],
"source": [
"# Check nvcc version\n",
"!nvcc -V\n",
"# Check GCC version\n",
"!gcc --version"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "gkGnB9WyHSXB",
"outputId": "5945fe0b-13a5-4f1b-dff9-8beb1df67ab0"
},
"outputs": [],
"source": [
"# install dependencies\n",
"%pip install -U openmim\n",
"!mim install \"mmengine>=0.7.0\"\n",
"!mim install \"mmcv>=2.0.0rc4\"\n",
"\n",
"# Install mmdetection\n",
"!rm -rf mmdetection\n",
"!git clone https://github.com/open-mmlab/mmdetection.git -b dev-3.x\n",
"%cd mmdetection\n",
"\n",
"%pip install -e ."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "_YeUiqAoCaoV",
"outputId": "06e4c803-ac46-49e6-b8fa-1a85c23fa482"
},
"outputs": [],
"source": [
"from mmengine.utils import get_git_hash\n",
"from mmengine.utils.dl_utils import collect_env as collect_base_env\n",
"\n",
"import mmdet\n",
"\n",
"\n",
"def collect_env():\n",
" \"\"\"Collect the information of the running environments.\"\"\"\n",
" env_info = collect_base_env()\n",
" env_info['MMDetection'] = f'{mmdet.__version__}+{get_git_hash()[:7]}'\n",
" return env_info\n",
"\n",
"\n",
"if __name__ == '__main__':\n",
" for name, val in collect_env().items():\n",
" print(f'{name}: {val}')"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "fLgFRMtP91ue"
},
"source": [
"## `DetInferencer`\n",
"\n",
"### Basic Usage\n",
"\n",
"We use the high-level API `DetInferencer` implemented in the MMDetection. This API is created to ease the inference process. The details of the codes can be found [here](https://github.com/open-mmlab/mmdetection/blob/dev-3.x/mmdet/apis/det_inferencer.py)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 1000,
"referenced_widgets": [
"6fa2cda48fda43f9bf53a0f533392eba",
"0226fedc26044ab2abdccc4fcbe226f8"
]
},
"id": "WJHpC402p2w9",
"outputId": "c2326326-d198-4fce-ec0e-a9cc2e35ba09"
},
"outputs": [],
"source": [
"from mmdet.apis import DetInferencer\n",
"\n",
"# Initialize the DetInferencer\n",
"inferencer = DetInferencer('rtmdet_tiny_8xb32-300e_coco')\n",
"\n",
"# Perform inference\n",
"inferencer('demo/demo.jpg', out_dir='./output')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 444
},
"id": "34JfPWRRSlNh",
"outputId": "8eec8bc4-4824-47ac-b10f-41538422fb28"
},
"outputs": [],
"source": [
"# Show the output image\n",
"from PIL import Image\n",
"Image.open('./output/vis/demo.jpg')"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "-53WPeyBqRHe"
},
"source": [
"### Initialization\n",
"\n",
"Each Inferencer must be initialized with a model. You can also choose the inference device during initialization.\n",
"\n",
"#### Model Initialization\n",
"\n",
"- To infer with MMDetection's pre-trained model, passing its name to the argument `model` can work. The weights will be automatically downloaded and loaded from OpenMMLab's model zoo."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "bbMu3IPtv-cX",
"outputId": "2bceb594-06c8-4c18-e8c6-b1816b0acb23"
},
"outputs": [],
"source": [
"inferencer = DetInferencer(model='rtmdet_tiny_8xb32-300e_coco')"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "AwKtnol3TQlM"
},
"source": [
"There is a very easy to list all model names in MMDetection."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "3kYfK3ssTIQE",
"outputId": "88c4fbb9-bb92-42af-baaa-14cee5b5bdc1"
},
"outputs": [],
"source": [
"# models is a list of model names, and them will print automatically\n",
"models = DetInferencer.list_models('mmdet')"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "G-25HR9HTZvr"
},
"source": [
"You can load another weight by passing its path/url to `weights`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "j4doHX4exvS1",
"outputId": "54ac0be2-835f-4390-aa0e-3be5236d8cc9"
},
"outputs": [],
"source": [
"!mkdir ./checkpoints\n",
"!mim download mmdet --config rtmdet_tiny_8xb32-300e_coco --dest ./checkpoints"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "8LQB4EC-Tako",
"outputId": "2cc0960e-d5b5-4c3a-8a0c-cec23989f6a0"
},
"outputs": [],
"source": [
"# Setup a checkpoint file to load\n",
"checkpoint = './checkpoints/rtmdet_tiny_8xb32-300e_coco_20220902_112414-78e30dcc.pth'\n",
"\n",
"# Initialize the DetInferencer\n",
"inferencer = DetInferencer(model='rtmdet_tiny_8xb32-300e_coco', weights=checkpoint)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "Atft9tjcwgeD"
},
"source": [
"- To load custom config and weight, you can pass the path to the config file to `model` and the path to the weight to `weights`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "eukDD4Rzwp9P",
"outputId": "0a34392c-0544-4a90-c844-7628d184efc0"
},
"outputs": [],
"source": [
"# Choose to use a config\n",
"config_path = './configs/rtmdet/rtmdet_tiny_8xb32-300e_coco.py'\n",
"\n",
"# Setup a checkpoint file to load\n",
"checkpoint = './checkpoints/rtmdet_tiny_8xb32-300e_coco_20220902_112414-78e30dcc.pth'\n",
"\n",
"# Initialize the DetInferencer\n",
"inferencer = DetInferencer(model=config_path, weights=checkpoint)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "FC1je9iiTuMS"
},
"source": [
"- By default, [MMEngine](https://github.com/open-mmlab/mmengine/) dumps config to the weight. If you have a weight trained on MMEngine, you can also pass the path to the weight file to `weights` without specifying `model`:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "Kenyo80RTx63",
"outputId": "46fe8219-d1a7-4e45-b5a1-b6d21c30be42"
},
"outputs": [],
"source": [
"# It will raise an error if the config file cannot be found in the weight. Currently, within the MMDetection model repository, only the weights of ddq-detr-4scale_r50 can be loaded in this manner.\n",
"inferencer = DetInferencer(weights='https://download.openmmlab.com/mmdetection/v3.0/ddq/ddq-detr-4scale_r50_8xb2-12e_coco/ddq-detr-4scale_r50_8xb2-12e_coco_20230809_170711-42528127.pth')"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "b-AlYOw4T3AO"
},
"source": [
"- Passing config file to `model` without specifying `weight` will result in a randomly initialized model."
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "quFQ8abYT6As"
},
"source": [
"### Device\n",
"\n",
"Each Inferencer instance is bound to a device.\n",
"By default, the best device is automatically decided by [MMEngine](https://github.com/open-mmlab/mmengine/). You can also alter the device by specifying the `device` argument. For example, you can use the following code to create an Inferencer on GPU 0."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "Wi6DRpsQPEmV",
"outputId": "9eac2017-cce6-491a-ef51-3e7e2560f107"
},
"outputs": [],
"source": [
"inferencer = DetInferencer(model='rtmdet_tiny_8xb32-300e_coco', device='cuda:0')"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "h3pgIACHUXEv"
},
"source": [
"To create an Inferencer on CPU:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "JsAotaiRUXWH",
"outputId": "531b1cb0-e986-4e0a-91c9-6d3ad65544e7"
},
"outputs": [],
"source": [
"inferencer = DetInferencer(model='rtmdet_tiny_8xb32-300e_coco', device='cpu')"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "0a4Zw5plUisX"
},
"source": [
"### Inference\n",
"\n",
"Once the Inferencer is initialized, you can directly pass in the raw data to be inferred and get the inference results from return values.\n",
"\n",
"#### Input\n",
"\n",
"Input can be either of these types:\n",
"\n",
"- str: Path/URL to the image."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 1000,
"referenced_widgets": [
"2abd7eef6f1f4b9c865a466b3dd5ef24",
"8951ec1ee7164f7ca7239a37e80e98ea"
]
},
"id": "C4McAmYdUnCL",
"outputId": "50bea3e2-a912-497e-cee9-26109dccdc12"
},
"outputs": [],
"source": [
"inferencer = DetInferencer(model='rtmdet_tiny_8xb32-300e_coco', device='cuda:0')\n",
"inferencer('demo/demo.jpg')"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "3G_TPKrMUp2T"
},
"source": [
"- array: Image in numpy array. It should be in BGR order."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 1000,
"referenced_widgets": [
"59bfd22c751f4ed4baefa466e7653315",
"0164804ae2f842fe8d2a4c5414c4a0c2"
]
},
"id": "-M1qGlfaUpha",
"outputId": "5a06cfe8-e056-4d56-c8e9-489e8f6633a0"
},
"outputs": [],
"source": [
"import mmcv\n",
"array = mmcv.imread('demo/demo.jpg')\n",
"inferencer(array)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "I45B_CtzUuh2"
},
"source": [
"- list: A list of basic types above. Each element in the list will be processed separately."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 1000,
"referenced_widgets": [
"a64a6eb038c44236b80579b2bfc4b8e3",
"eef25a0509854f98883395a2c0fc2134",
"f87f0b153b0342ad99dcd320a1302c92",
"f6634888109048069b6844e9f9b4ec13"
]
},
"id": "k1IXIWXHUwKP",
"outputId": "0af73b0b-d703-4cbc-91ad-052f0b521d50"
},
"outputs": [],
"source": [
"inferencer(['tests/data/color.jpg', 'tests/data/gray.jpg'])\n",
"# You can even mix the types\n",
"inferencer(['tests/data/color.jpg', array])"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "hUGrTtxrVBAS"
},
"source": [
"- str: Path to the directory. All images in the directory will be processed."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 1000,
"referenced_widgets": [
"07ed8efcd87a40059af36f0c43ef5147",
"69ce7e58e27f4e1186ab0afcb99d37c3"
]
},
"id": "JWK10ZD6VDDE",
"outputId": "91418597-d9ea-4613-b141-16bc8bcc8caf"
},
"outputs": [],
"source": [
"inferencer('tests/data/')"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "BQxEVr2pVGen"
},
"source": [
"### Output\n",
"\n",
"By default, each `Inferencer` returns the prediction results in a dictionary format.\n",
"\n",
"- `visualization` contains the visualized predictions.\n",
"\n",
"- `predictions` contains the predictions results in a json-serializable format. But it's an empty list by default unless `return_vis=True`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 306,
"referenced_widgets": [
"95674a6baa1842d2981fe60b31ab6cad",
"7e62816d1f6c441fb98c1f8e942fff1d"
]
},
"id": "m6a8T4goU8Sq",
"outputId": "6f74098f-a3d3-4897-c58f-68fae88889af"
},
"outputs": [],
"source": [
"# Show the structure of result dict\n",
"from rich.pretty import pprint\n",
"\n",
"result = inferencer('demo/demo.jpg')\n",
"pprint(result, max_length=4)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "a93hFT0jVkrR"
},
"source": [
"If you wish to get the raw outputs from the model, you can set `return_datasamples` to `True` to get the original `DataSample`, which will be stored in `predictions`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 1000,
"referenced_widgets": [
"060f510b5bda498583d7212060bb528c",
"1bb724cb12c240a18f651dd99842e5b0"
]
},
"id": "U5DFI7QAVbnP",
"outputId": "effaf3ec-2476-4b64-dcbd-802a18a26479"
},
"outputs": [],
"source": [
"result = inferencer('demo/demo.jpg', return_datasamples=True)\n",
"pprint(result, max_length=4)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "JHdcUnGzVsk1"
},
"source": [
"#### Dumping Results\n",
"\n",
"Apart from obtaining predictions from the return value, you can also export the predictions/visualizations to files by setting `out_dir` and `no_save_pred`/`no_save_vis` arguments."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 1000,
"referenced_widgets": [
"38083c2f29604d1d9a7dcf9845dfbf33",
"54cdfe55e0f04df9ab844961a089fe2f"
]
},
"id": "0dr-ixmfVtng",
"outputId": "af22d458-9aed-41e2-f675-e017a0cb588b"
},
"outputs": [],
"source": [
"inferencer('demo/demo.jpg', out_dir='outputs/', no_save_pred=False)"
]
}
],
"metadata": {
"accelerator": "GPU",
"colab": {
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.13"
},
"widgets": {
"application/vnd.jupyter.widget-state+json": {
"0164804ae2f842fe8d2a4c5414c4a0c2": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"0226fedc26044ab2abdccc4fcbe226f8": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"060f510b5bda498583d7212060bb528c": {
"model_module": "@jupyter-widgets/output",
"model_module_version": "1.0.0",
"model_name": "OutputModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/output",
"_model_module_version": "1.0.0",
"_model_name": "OutputModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/output",
"_view_module_version": "1.0.0",
"_view_name": "OutputView",
"layout": "IPY_MODEL_1bb724cb12c240a18f651dd99842e5b0",
"msg_id": "",
"outputs": [
{
"data": {
"text/html": "<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\">Inference <span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #3a3a3a; text-decoration-color: #3a3a3a\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span><span style=\"color: #f92672; text-decoration-color: #f92672\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #3a3a3a; text-decoration-color: #3a3a3a\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span><span style=\"color: #f92672; text-decoration-color: #f92672\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span> <span style=\"color: #008080; text-decoration-color: #008080\"> </span>\n</pre>\n",
"text/plain": "Inference \u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;58;58;58m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m\u001b[38;2;249;38;114m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;58;58;58m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m\u001b[38;2;249;38;114m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m \u001b[36m \u001b[0m\n"
},
"metadata": {},
"output_type": "display_data"
}
]
}
},
"07ed8efcd87a40059af36f0c43ef5147": {
"model_module": "@jupyter-widgets/output",
"model_module_version": "1.0.0",
"model_name": "OutputModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/output",
"_model_module_version": "1.0.0",
"_model_name": "OutputModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/output",
"_view_module_version": "1.0.0",
"_view_name": "OutputView",
"layout": "IPY_MODEL_69ce7e58e27f4e1186ab0afcb99d37c3",
"msg_id": "",
"outputs": [
{
"data": {
"text/html": "<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\">Inference <span style=\"color: #f92672; text-decoration-color: #f92672\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #3a3a3a; text-decoration-color: #3a3a3a\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span><span style=\"color: #f92672; text-decoration-color: #f92672\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #3a3a3a; text-decoration-color: #3a3a3a\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span> <span style=\"color: #800080; text-decoration-color: #800080\">13.5 it/s</span> <span style=\"color: #008080; text-decoration-color: #008080\"> </span>\n</pre>\n",
"text/plain": "Inference \u001b[38;2;249;38;114m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;58;58;58m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m\u001b[38;2;249;38;114m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;58;58;58m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m \u001b[35m13.5 it/s\u001b[0m \u001b[36m \u001b[0m\n"
},
"metadata": {},
"output_type": "display_data"
}
]
}
},
"1bb724cb12c240a18f651dd99842e5b0": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"2abd7eef6f1f4b9c865a466b3dd5ef24": {
"model_module": "@jupyter-widgets/output",
"model_module_version": "1.0.0",
"model_name": "OutputModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/output",
"_model_module_version": "1.0.0",
"_model_name": "OutputModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/output",
"_view_module_version": "1.0.0",
"_view_name": "OutputView",
"layout": "IPY_MODEL_8951ec1ee7164f7ca7239a37e80e98ea",
"msg_id": "",
"outputs": [
{
"data": {
"text/html": "<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\">Inference <span style=\"color: #3a3a3a; text-decoration-color: #3a3a3a\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span><span style=\"color: #f92672; text-decoration-color: #f92672\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #3a3a3a; text-decoration-color: #3a3a3a\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span><span style=\"color: #f92672; text-decoration-color: #f92672\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span> <span style=\"color: #008080; text-decoration-color: #008080\"> </span>\n</pre>\n",
"text/plain": "Inference \u001b[38;2;58;58;58m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m\u001b[38;2;249;38;114m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;58;58;58m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m\u001b[38;2;249;38;114m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m \u001b[36m \u001b[0m\n"
},
"metadata": {},
"output_type": "display_data"
}
]
}
},
"38083c2f29604d1d9a7dcf9845dfbf33": {
"model_module": "@jupyter-widgets/output",
"model_module_version": "1.0.0",
"model_name": "OutputModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/output",
"_model_module_version": "1.0.0",
"_model_name": "OutputModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/output",
"_view_module_version": "1.0.0",
"_view_name": "OutputView",
"layout": "IPY_MODEL_54cdfe55e0f04df9ab844961a089fe2f",
"msg_id": "",
"outputs": [
{
"data": {
"text/html": "<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\">Inference <span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #3a3a3a; text-decoration-color: #3a3a3a\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span><span style=\"color: #f92672; text-decoration-color: #f92672\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #3a3a3a; text-decoration-color: #3a3a3a\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span><span style=\"color: #f92672; text-decoration-color: #f92672\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span> <span style=\"color: #008080; text-decoration-color: #008080\"> </span>\n</pre>\n",
"text/plain": "Inference \u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;58;58;58m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m\u001b[38;2;249;38;114m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;58;58;58m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m\u001b[38;2;249;38;114m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m \u001b[36m \u001b[0m\n"
},
"metadata": {},
"output_type": "display_data"
}
]
}
},
"54cdfe55e0f04df9ab844961a089fe2f": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"59bfd22c751f4ed4baefa466e7653315": {
"model_module": "@jupyter-widgets/output",
"model_module_version": "1.0.0",
"model_name": "OutputModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/output",
"_model_module_version": "1.0.0",
"_model_name": "OutputModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/output",
"_view_module_version": "1.0.0",
"_view_name": "OutputView",
"layout": "IPY_MODEL_0164804ae2f842fe8d2a4c5414c4a0c2",
"msg_id": "",
"outputs": [
{
"data": {
"text/html": "<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\">Inference <span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #3a3a3a; text-decoration-color: #3a3a3a\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span><span style=\"color: #f92672; text-decoration-color: #f92672\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #3a3a3a; text-decoration-color: #3a3a3a\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span><span style=\"color: #f92672; text-decoration-color: #f92672\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span> <span style=\"color: #008080; text-decoration-color: #008080\"> </span>\n</pre>\n",
"text/plain": "Inference \u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;58;58;58m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m\u001b[38;2;249;38;114m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;58;58;58m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m\u001b[38;2;249;38;114m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m \u001b[36m \u001b[0m\n"
},
"metadata": {},
"output_type": "display_data"
}
]
}
},
"69ce7e58e27f4e1186ab0afcb99d37c3": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"6fa2cda48fda43f9bf53a0f533392eba": {
"model_module": "@jupyter-widgets/output",
"model_module_version": "1.0.0",
"model_name": "OutputModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/output",
"_model_module_version": "1.0.0",
"_model_name": "OutputModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/output",
"_view_module_version": "1.0.0",
"_view_name": "OutputView",
"layout": "IPY_MODEL_0226fedc26044ab2abdccc4fcbe226f8",
"msg_id": "",
"outputs": [
{
"data": {
"text/html": "<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\">Inference <span style=\"color: #f92672; text-decoration-color: #f92672\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #3a3a3a; text-decoration-color: #3a3a3a\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span><span style=\"color: #f92672; text-decoration-color: #f92672\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #3a3a3a; text-decoration-color: #3a3a3a\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span> <span style=\"color: #008080; text-decoration-color: #008080\"> </span>\n</pre>\n",
"text/plain": "Inference \u001b[38;2;249;38;114m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;58;58;58m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m\u001b[38;2;249;38;114m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;58;58;58m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m \u001b[36m \u001b[0m\n"
},
"metadata": {},
"output_type": "display_data"
}
]
}
},
"7e62816d1f6c441fb98c1f8e942fff1d": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"8951ec1ee7164f7ca7239a37e80e98ea": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"95674a6baa1842d2981fe60b31ab6cad": {
"model_module": "@jupyter-widgets/output",
"model_module_version": "1.0.0",
"model_name": "OutputModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/output",
"_model_module_version": "1.0.0",
"_model_name": "OutputModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/output",
"_view_module_version": "1.0.0",
"_view_name": "OutputView",
"layout": "IPY_MODEL_7e62816d1f6c441fb98c1f8e942fff1d",
"msg_id": "",
"outputs": [
{
"data": {
"text/html": "<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\">Inference <span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #3a3a3a; text-decoration-color: #3a3a3a\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span><span style=\"color: #f92672; text-decoration-color: #f92672\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #3a3a3a; text-decoration-color: #3a3a3a\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span><span style=\"color: #f92672; text-decoration-color: #f92672\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span> <span style=\"color: #008080; text-decoration-color: #008080\"> </span>\n</pre>\n",
"text/plain": "Inference \u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;58;58;58m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m\u001b[38;2;249;38;114m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;58;58;58m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m\u001b[38;2;249;38;114m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m \u001b[36m \u001b[0m\n"
},
"metadata": {},
"output_type": "display_data"
}
]
}
},
"a64a6eb038c44236b80579b2bfc4b8e3": {
"model_module": "@jupyter-widgets/output",
"model_module_version": "1.0.0",
"model_name": "OutputModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/output",
"_model_module_version": "1.0.0",
"_model_name": "OutputModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/output",
"_view_module_version": "1.0.0",
"_view_name": "OutputView",
"layout": "IPY_MODEL_eef25a0509854f98883395a2c0fc2134",
"msg_id": "",
"outputs": [
{
"data": {
"text/html": "<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\">Inference <span style=\"color: #f92672; text-decoration-color: #f92672\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #3a3a3a; text-decoration-color: #3a3a3a\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span><span style=\"color: #f92672; text-decoration-color: #f92672\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #3a3a3a; text-decoration-color: #3a3a3a\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span> <span style=\"color: #800080; text-decoration-color: #800080\">9.7 it/s</span> <span style=\"color: #008080; text-decoration-color: #008080\"> </span>\n</pre>\n",
"text/plain": "Inference \u001b[38;2;249;38;114m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;58;58;58m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m\u001b[38;2;249;38;114m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;58;58;58m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m \u001b[35m9.7 it/s\u001b[0m \u001b[36m \u001b[0m\n"
},
"metadata": {},
"output_type": "display_data"
}
]
}
},
"eef25a0509854f98883395a2c0fc2134": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"f6634888109048069b6844e9f9b4ec13": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"f87f0b153b0342ad99dcd320a1302c92": {
"model_module": "@jupyter-widgets/output",
"model_module_version": "1.0.0",
"model_name": "OutputModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/output",
"_model_module_version": "1.0.0",
"_model_name": "OutputModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/output",
"_view_module_version": "1.0.0",
"_view_name": "OutputView",
"layout": "IPY_MODEL_f6634888109048069b6844e9f9b4ec13",
"msg_id": "",
"outputs": [
{
"data": {
"text/html": "<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\">Inference <span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span><span style=\"color: #f92672; text-decoration-color: #f92672\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #3a3a3a; text-decoration-color: #3a3a3a\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span><span style=\"color: #f92672; text-decoration-color: #f92672\">━</span><span style=\"color: #f42670; text-decoration-color: #f42670\">━</span><span style=\"color: #e6276c; text-decoration-color: #e6276c\">━</span><span style=\"color: #d12a66; text-decoration-color: #d12a66\">━</span><span style=\"color: #b72c5e; text-decoration-color: #b72c5e\">━</span><span style=\"color: #993056; text-decoration-color: #993056\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #3a3a3a; text-decoration-color: #3a3a3a\">━</span><span style=\"color: #3e393b; text-decoration-color: #3e393b\">━</span><span style=\"color: #4c383f; text-decoration-color: #4c383f\">━</span><span style=\"color: #613545; text-decoration-color: #613545\">━</span><span style=\"color: #7b334d; text-decoration-color: #7b334d\">━</span> <span style=\"color: #800080; text-decoration-color: #800080\">9.0 it/s</span> <span style=\"color: #008080; text-decoration-color: #008080\"> </span>\n</pre>\n",
"text/plain": "Inference \u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m\u001b[38;2;249;38;114m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;58;58;58m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m\u001b[38;2;249;38;114m━\u001b[0m\u001b[38;2;244;38;112m━\u001b[0m\u001b[38;2;230;39;108m━\u001b[0m\u001b[38;2;209;42;102m━\u001b[0m\u001b[38;2;183;44;94m━\u001b[0m\u001b[38;2;153;48;86m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;58;58;58m━\u001b[0m\u001b[38;2;62;57;59m━\u001b[0m\u001b[38;2;76;56;63m━\u001b[0m\u001b[38;2;97;53;69m━\u001b[0m\u001b[38;2;123;51;77m━\u001b[0m \u001b[35m9.0 it/s\u001b[0m \u001b[36m \u001b[0m\n"
},
"metadata": {},
"output_type": "display_data"
}
]
}
}
}
}
},
"nbformat": 4,
"nbformat_minor": 0
}
# Copyright (c) OpenMMLab. All rights reserved.
"""Perform MMDET inference on large images (as satellite imagery) as:
```shell
wget -P checkpoint https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_2x_coco/faster_rcnn_r101_fpn_2x_coco_bbox_mAP-0.398_20200504_210455-1d2dac9c.pth # noqa: E501, E261.
python demo/large_image_demo.py \
demo/large_image.jpg \
configs/faster_rcnn/faster-rcnn_r101_fpn_2x_coco.py \
checkpoint/faster_rcnn_r101_fpn_2x_coco_bbox_mAP-0.398_20200504_210455-1d2dac9c.pth
```
"""
import os
import random
from argparse import ArgumentParser
from pathlib import Path
import mmcv
import numpy as np
from mmengine.config import Config, ConfigDict
from mmengine.logging import print_log
from mmengine.utils import ProgressBar
from mmdet.apis import inference_detector, init_detector
try:
from sahi.slicing import slice_image
except ImportError:
raise ImportError('Please run "pip install -U sahi" '
'to install sahi first for large image inference.')
from mmdet.registry import VISUALIZERS
from mmdet.utils.large_image import merge_results_by_nms, shift_predictions
from mmdet.utils.misc import get_file_list
def parse_args():
parser = ArgumentParser(
description='Perform MMDET inference on large images.')
parser.add_argument(
'img', help='Image path, include image file, dir and URL.')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--out-dir', default='./output', help='Path to output file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--show', action='store_true', help='Show the detection results')
parser.add_argument(
'--tta',
action='store_true',
help='Whether to use test time augmentation')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument(
'--patch-size', type=int, default=640, help='The size of patches')
parser.add_argument(
'--patch-overlap-ratio',
type=float,
default=0.25,
help='Ratio of overlap between two patches')
parser.add_argument(
'--merge-iou-thr',
type=float,
default=0.25,
help='IoU threshould for merging results')
parser.add_argument(
'--merge-nms-type',
type=str,
default='nms',
help='NMS type for merging results')
parser.add_argument(
'--batch-size',
type=int,
default=1,
help='Batch size, must greater than or equal to 1')
parser.add_argument(
'--debug',
action='store_true',
help='Export debug results before merging')
parser.add_argument(
'--save-patch',
action='store_true',
help='Save the results of each patch. '
'The `--debug` must be enabled.')
args = parser.parse_args()
return args
def main():
args = parse_args()
config = args.config
if isinstance(config, (str, Path)):
config = Config.fromfile(config)
elif not isinstance(config, Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
if 'init_cfg' in config.model.backbone:
config.model.backbone.init_cfg = None
if args.tta:
assert 'tta_model' in config, 'Cannot find ``tta_model`` in config.' \
" Can't use tta !"
assert 'tta_pipeline' in config, 'Cannot find ``tta_pipeline`` ' \
"in config. Can't use tta !"
config.model = ConfigDict(**config.tta_model, module=config.model)
test_data_cfg = config.test_dataloader.dataset
while 'dataset' in test_data_cfg:
test_data_cfg = test_data_cfg['dataset']
test_data_cfg.pipeline = config.tta_pipeline
# TODO: TTA mode will error if cfg_options is not set.
# This is an mmdet issue and needs to be fixed later.
# build the model from a config file and a checkpoint file
model = init_detector(
config, args.checkpoint, device=args.device, cfg_options={})
if not os.path.exists(args.out_dir) and not args.show:
os.mkdir(args.out_dir)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
visualizer.dataset_meta = model.dataset_meta
# get file list
files, source_type = get_file_list(args.img)
# start detector inference
print(f'Performing inference on {len(files)} images.... '
'This may take a while.')
progress_bar = ProgressBar(len(files))
for file in files:
# read image
img = mmcv.imread(file)
# arrange slices
height, width = img.shape[:2]
sliced_image_object = slice_image(
img,
slice_height=args.patch_size,
slice_width=args.patch_size,
auto_slice_resolution=False,
overlap_height_ratio=args.patch_overlap_ratio,
overlap_width_ratio=args.patch_overlap_ratio,
)
# perform sliced inference
slice_results = []
start = 0
while True:
# prepare batch slices
end = min(start + args.batch_size, len(sliced_image_object))
images = []
for sliced_image in sliced_image_object.images[start:end]:
images.append(sliced_image)
# forward the model
slice_results.extend(inference_detector(model, images))
if end >= len(sliced_image_object):
break
start += args.batch_size
if source_type['is_dir']:
filename = os.path.relpath(file, args.img).replace('/', '_')
else:
filename = os.path.basename(file)
img = mmcv.imconvert(img, 'bgr', 'rgb')
out_file = None if args.show else os.path.join(args.out_dir, filename)
# export debug images
if args.debug:
# export sliced image results
name, suffix = os.path.splitext(filename)
shifted_instances = shift_predictions(
slice_results,
sliced_image_object.starting_pixels,
src_image_shape=(height, width))
merged_result = slice_results[0].clone()
merged_result.pred_instances = shifted_instances
debug_file_name = name + '_debug' + suffix
debug_out_file = None if args.show else os.path.join(
args.out_dir, debug_file_name)
visualizer.set_image(img.copy())
debug_grids = []
for starting_point in sliced_image_object.starting_pixels:
start_point_x = starting_point[0]
start_point_y = starting_point[1]
end_point_x = start_point_x + args.patch_size
end_point_y = start_point_y + args.patch_size
debug_grids.append(
[start_point_x, start_point_y, end_point_x, end_point_y])
debug_grids = np.array(debug_grids)
debug_grids[:, 0::2] = np.clip(debug_grids[:, 0::2], 1,
img.shape[1] - 1)
debug_grids[:, 1::2] = np.clip(debug_grids[:, 1::2], 1,
img.shape[0] - 1)
palette = np.random.randint(0, 256, size=(len(debug_grids), 3))
palette = [tuple(c) for c in palette]
line_styles = random.choices(['-', '-.', ':'], k=len(debug_grids))
visualizer.draw_bboxes(
debug_grids,
edge_colors=palette,
alpha=1,
line_styles=line_styles)
visualizer.draw_bboxes(
debug_grids, face_colors=palette, alpha=0.15)
visualizer.draw_texts(
list(range(len(debug_grids))),
debug_grids[:, :2] + 5,
colors='w')
visualizer.add_datasample(
debug_file_name,
visualizer.get_image(),
data_sample=merged_result,
draw_gt=False,
show=args.show,
wait_time=0,
out_file=debug_out_file,
pred_score_thr=args.score_thr,
)
if args.save_patch:
debug_patch_out_dir = os.path.join(args.out_dir,
f'{name}_patch')
for i, slice_result in enumerate(slice_results):
patch_out_file = os.path.join(
debug_patch_out_dir,
f'{filename}_slice_{i}_result.jpg')
image = mmcv.imconvert(sliced_image_object.images[i],
'bgr', 'rgb')
visualizer.add_datasample(
'patch_result',
image,
data_sample=slice_result,
draw_gt=False,
show=False,
wait_time=0,
out_file=patch_out_file,
pred_score_thr=args.score_thr,
)
image_result = merge_results_by_nms(
slice_results,
sliced_image_object.starting_pixels,
src_image_shape=(height, width),
nms_cfg={
'type': args.merge_nms_type,
'iou_threshold': args.merge_iou_thr
})
visualizer.add_datasample(
filename,
img,
data_sample=image_result,
draw_gt=False,
show=args.show,
wait_time=0,
out_file=out_file,
pred_score_thr=args.score_thr,
)
progress_bar.update()
if not args.show or (args.debug and args.save_patch):
print_log(
f'\nResults have been saved at {os.path.abspath(args.out_dir)}')
if __name__ == '__main__':
main()
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import tempfile
from argparse import ArgumentParser
import mmcv
import mmengine
from mmengine.registry import init_default_scope
from mmdet.apis import inference_mot, init_track_model
from mmdet.registry import VISUALIZERS
IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png')
def parse_args():
parser = ArgumentParser()
parser.add_argument(
'inputs', type=str, help='Input image file or folder path.')
parser.add_argument('config', help='config file')
parser.add_argument('--checkpoint', help='checkpoint file')
parser.add_argument('--detector', help='det checkpoint file')
parser.add_argument('--reid', help='reid checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='device used for inference')
parser.add_argument(
'--score-thr',
type=float,
default=0.0,
help='The threshold of score to filter bboxes.')
parser.add_argument(
'--out', help='output video file (mp4 format) or folder')
parser.add_argument(
'--show',
action='store_true',
help='whether show the results on the fly')
parser.add_argument('--fps', help='FPS of the output video')
args = parser.parse_args()
return args
def main(args):
assert args.out or args.show
# load images
if osp.isdir(args.inputs):
imgs = sorted(
filter(lambda x: x.endswith(IMG_EXTENSIONS),
os.listdir(args.inputs)),
key=lambda x: int(x.split('.')[0]))
in_video = False
else:
imgs = mmcv.VideoReader(args.inputs)
in_video = True
# define output
out_video = False
if args.out is not None:
if args.out.endswith('.mp4'):
out_video = True
out_dir = tempfile.TemporaryDirectory()
out_path = out_dir.name
_out = args.out.rsplit(os.sep, 1)
if len(_out) > 1:
os.makedirs(_out[0], exist_ok=True)
else:
out_path = args.out
os.makedirs(out_path, exist_ok=True)
fps = args.fps
if args.show or out_video:
if fps is None and in_video:
fps = imgs.fps
if not fps:
raise ValueError('Please set the FPS for the output video.')
fps = int(fps)
init_default_scope('mmdet')
# build the model from a config file and a checkpoint file
model = init_track_model(
args.config,
args.checkpoint,
args.detector,
args.reid,
device=args.device)
# build the visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
visualizer.dataset_meta = model.dataset_meta
prog_bar = mmengine.ProgressBar(len(imgs))
# test and show/save the images
for i, img in enumerate(imgs):
if isinstance(img, str):
img_path = osp.join(args.inputs, img)
img = mmcv.imread(img_path)
# result [TrackDataSample]
result = inference_mot(model, img, frame_id=i, video_len=len(imgs))
if args.out is not None:
if in_video or out_video:
out_file = osp.join(out_path, f'{i:06d}.jpg')
else:
out_file = osp.join(out_path, img.rsplit(os.sep, 1)[-1])
else:
out_file = None
# show the results
visualizer.add_datasample(
'mot',
img[..., ::-1],
data_sample=result[0],
show=args.show,
draw_gt=False,
out_file=out_file,
wait_time=float(1 / int(fps)) if fps else 0,
pred_score_thr=args.score_thr,
step=i)
prog_bar.update()
if args.out and out_video:
print(f'making the output video at {args.out} with a FPS of {fps}')
mmcv.frames2video(out_path, args.out, fps=fps, fourcc='mp4v')
out_dir.cleanup()
if __name__ == '__main__':
args = parse_args()
main(args)
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
from mmcv.transforms import Compose
from mmengine.utils import track_iter_progress
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
def parse_args():
parser = argparse.ArgumentParser(description='MMDetection video demo')
parser.add_argument('video', help='Video file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument('--out', type=str, help='Output video file')
parser.add_argument('--show', action='store_true', help='Show video')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='The interval of show (s), 0 is block')
args = parser.parse_args()
return args
def main():
args = parse_args()
assert args.out or args.show, \
('Please specify at least one operation (save/show the '
'video) with the argument "--out" or "--show"')
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# build test pipeline
model.cfg.test_dataloader.dataset.pipeline[
0].type = 'mmdet.LoadImageFromNDArray'
test_pipeline = Compose(model.cfg.test_dataloader.dataset.pipeline)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint and
# then pass to the model in init_detector
visualizer.dataset_meta = model.dataset_meta
video_reader = mmcv.VideoReader(args.video)
video_writer = None
if args.out:
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video_writer = cv2.VideoWriter(
args.out, fourcc, video_reader.fps,
(video_reader.width, video_reader.height))
for frame in track_iter_progress((video_reader, len(video_reader))):
result = inference_detector(model, frame, test_pipeline=test_pipeline)
visualizer.add_datasample(
name='video',
image=frame,
data_sample=result,
draw_gt=False,
show=False,
pred_score_thr=args.score_thr)
frame = visualizer.get_image()
if args.show:
cv2.namedWindow('video', 0)
mmcv.imshow(frame, 'video', args.wait_time)
if args.out:
video_writer.write(frame)
if video_writer:
video_writer.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment