Commit f8772570 authored by bailuo's avatar bailuo
Browse files

init

parents
import os
from glob import glob
from collections import defaultdict
import numpy as np
from PIL import Image
class DAVIS(object):
SUBSET_OPTIONS = ['train', 'val', 'test-dev', 'test-challenge']
TASKS = ['semi-supervised', 'unsupervised']
DATASET_WEB = 'https://davischallenge.org/davis2017/code.html'
VOID_LABEL = 255
def __init__(self, root, task='unsupervised', subset='val', sequences='all', resolution='480p', codalab=False):
"""
Class to read the DAVIS dataset
:param root: Path to the DAVIS folder that contains JPEGImages, Annotations, etc. folders.
:param task: Task to load the annotations, choose between semi-supervised or unsupervised.
:param subset: Set to load the annotations
:param sequences: Sequences to consider, 'all' to use all the sequences in a set.
:param resolution: Specify the resolution to use the dataset, choose between '480' and 'Full-Resolution'
"""
if subset not in self.SUBSET_OPTIONS:
raise ValueError(f'Subset should be in {self.SUBSET_OPTIONS}')
if task not in self.TASKS:
raise ValueError(f'The only tasks that are supported are {self.TASKS}')
self.task = task
self.subset = subset
self.root = root
self.img_path = os.path.join(self.root, 'JPEGImages', resolution)
annotations_folder = 'Annotations' if task == 'semi-supervised' else 'Annotations_unsupervised'
self.mask_path = os.path.join(self.root, annotations_folder, resolution)
year = '2019' if task == 'unsupervised' and (subset == 'test-dev' or subset == 'test-challenge') else '2017'
self.imagesets_path = os.path.join(self.root, 'ImageSets', year)
self._check_directories()
if sequences == 'all':
with open(os.path.join(self.imagesets_path, f'{self.subset}.txt'), 'r') as f:
tmp = f.readlines()
sequences_names = [x.strip() for x in tmp]
else:
sequences_names = sequences if isinstance(sequences, list) else [sequences]
self.sequences = defaultdict(dict)
for seq in sequences_names:
images = np.sort(glob(os.path.join(self.img_path, seq, '*.jpg'))).tolist()
if len(images) == 0 and not codalab:
raise FileNotFoundError(f'Images for sequence {seq} not found.')
self.sequences[seq]['images'] = images
masks = np.sort(glob(os.path.join(self.mask_path, seq, '*.png'))).tolist()
masks.extend([-1] * (len(images) - len(masks)))
self.sequences[seq]['masks'] = masks
def _check_directories(self):
if not os.path.exists(self.root):
raise FileNotFoundError(f'DAVIS not found in the specified directory, download it from {self.DATASET_WEB}')
if not os.path.exists(os.path.join(self.imagesets_path, f'{self.subset}.txt')):
raise FileNotFoundError(f'Subset sequences list for {self.subset} not found, download the missing subset '
f'for the {self.task} task from {self.DATASET_WEB}')
if self.subset in ['train', 'val'] and not os.path.exists(self.mask_path):
raise FileNotFoundError(f'Annotations folder for the {self.task} task not found, download it from {self.DATASET_WEB}')
def get_frames(self, sequence):
for img, msk in zip(self.sequences[sequence]['images'], self.sequences[sequence]['masks']):
image = np.array(Image.open(img))
mask = None if msk is None else np.array(Image.open(msk))
yield image, mask
def _get_all_elements(self, sequence, obj_type):
obj = np.array(Image.open(self.sequences[sequence][obj_type][0]))
all_objs = np.zeros((len(self.sequences[sequence][obj_type]), *obj.shape))
obj_id = []
for i, obj in enumerate(self.sequences[sequence][obj_type]):
all_objs[i, ...] = np.array(Image.open(obj))
obj_id.append(''.join(obj.split('/')[-1].split('.')[:-1]))
return all_objs, obj_id
def get_all_images(self, sequence):
return self._get_all_elements(sequence, 'images')
def get_all_masks(self, sequence, separate_objects_masks=False):
masks, masks_id = self._get_all_elements(sequence, 'masks')
masks_void = np.zeros_like(masks)
# Separate void and object masks
for i in range(masks.shape[0]):
masks_void[i, ...] = masks[i, ...] == 255
masks[i, masks[i, ...] == 255] = 0
if separate_objects_masks:
num_objects = int(np.max(masks[0, ...]))
tmp = np.ones((num_objects, *masks.shape))
tmp = tmp * np.arange(1, num_objects + 1)[:, None, None, None]
masks = (tmp == masks[None, ...])
masks = masks > 0
return masks, masks_void, masks_id
def get_sequences(self):
for seq in self.sequences:
yield seq
if __name__ == '__main__':
from matplotlib import pyplot as plt
only_first_frame = True
subsets = ['train', 'val']
for s in subsets:
dataset = DAVIS(root='/home/csergi/scratch2/Databases/DAVIS2017_private', subset=s)
for seq in dataset.get_sequences():
g = dataset.get_frames(seq)
img, mask = next(g)
plt.subplot(2, 1, 1)
plt.title(seq)
plt.imshow(img)
plt.subplot(2, 1, 2)
plt.imshow(mask)
plt.show(block=True)
import sys
from tqdm import tqdm
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
import numpy as np
from davis2017.davis import DAVIS
from davis2017.metrics import db_eval_boundary, db_eval_iou
from davis2017 import utils
from davis2017.results import Results
from scipy.optimize import linear_sum_assignment
class DAVISEvaluation(object):
def __init__(self, davis_root, task, gt_set, sequences='all', codalab=False):
"""
Class to evaluate DAVIS sequences from a certain set and for a certain task
:param davis_root: Path to the DAVIS folder that contains JPEGImages, Annotations, etc. folders.
:param task: Task to compute the evaluation, chose between semi-supervised or unsupervised.
:param gt_set: Set to compute the evaluation
:param sequences: Sequences to consider for the evaluation, 'all' to use all the sequences in a set.
"""
self.davis_root = davis_root
self.task = task
self.dataset = DAVIS(root=davis_root, task=task, subset=gt_set, sequences=sequences, codalab=codalab)
@staticmethod
def _evaluate_semisupervised(all_gt_masks, all_res_masks, all_void_masks, metric):
if all_res_masks.shape[0] > all_gt_masks.shape[0]:
sys.stdout.write("\nIn your PNG files there is an index higher than the number of objects in the sequence!")
sys.exit()
elif all_res_masks.shape[0] < all_gt_masks.shape[0]:
zero_padding = np.zeros((all_gt_masks.shape[0] - all_res_masks.shape[0], *all_res_masks.shape[1:]))
all_res_masks = np.concatenate([all_res_masks, zero_padding], axis=0)
j_metrics_res, f_metrics_res = np.zeros(all_gt_masks.shape[:2]), np.zeros(all_gt_masks.shape[:2])
for ii in range(all_gt_masks.shape[0]):
if 'J' in metric:
j_metrics_res[ii, :] = db_eval_iou(all_gt_masks[ii, ...], all_res_masks[ii, ...], all_void_masks)
if 'F' in metric:
f_metrics_res[ii, :] = db_eval_boundary(all_gt_masks[ii, ...], all_res_masks[ii, ...], all_void_masks)
return j_metrics_res, f_metrics_res
@staticmethod
def _evaluate_unsupervised(all_gt_masks, all_res_masks, all_void_masks, metric, max_n_proposals=20):
if all_res_masks.shape[0] > max_n_proposals:
sys.stdout.write(f"\nIn your PNG files there is an index higher than the maximum number ({max_n_proposals}) of proposals allowed!")
sys.exit()
elif all_res_masks.shape[0] < all_gt_masks.shape[0]:
zero_padding = np.zeros((all_gt_masks.shape[0] - all_res_masks.shape[0], *all_res_masks.shape[1:]))
all_res_masks = np.concatenate([all_res_masks, zero_padding], axis=0)
j_metrics_res = np.zeros((all_res_masks.shape[0], all_gt_masks.shape[0], all_gt_masks.shape[1]))
f_metrics_res = np.zeros((all_res_masks.shape[0], all_gt_masks.shape[0], all_gt_masks.shape[1]))
for ii in range(all_gt_masks.shape[0]):
for jj in range(all_res_masks.shape[0]):
if 'J' in metric:
j_metrics_res[jj, ii, :] = db_eval_iou(all_gt_masks[ii, ...], all_res_masks[jj, ...], all_void_masks)
if 'F' in metric:
f_metrics_res[jj, ii, :] = db_eval_boundary(all_gt_masks[ii, ...], all_res_masks[jj, ...], all_void_masks)
if 'J' in metric and 'F' in metric:
all_metrics = (np.mean(j_metrics_res, axis=2) + np.mean(f_metrics_res, axis=2)) / 2
else:
all_metrics = np.mean(j_metrics_res, axis=2) if 'J' in metric else np.mean(f_metrics_res, axis=2)
row_ind, col_ind = linear_sum_assignment(-all_metrics)
return j_metrics_res[row_ind, col_ind, :], f_metrics_res[row_ind, col_ind, :]
def evaluate(self, res_path, metric=('J', 'F'), debug=False):
metric = metric if isinstance(metric, tuple) or isinstance(metric, list) else [metric]
if 'T' in metric:
raise ValueError('Temporal metric not supported!')
if 'J' not in metric and 'F' not in metric:
raise ValueError('Metric possible values are J for IoU or F for Boundary')
# Containers
metrics_res = {}
if 'J' in metric:
metrics_res['J'] = {"M": [], "R": [], "D": [], "M_per_object": {}}
if 'F' in metric:
metrics_res['F'] = {"M": [], "R": [], "D": [], "M_per_object": {}}
# Sweep all sequences
results = Results(root_dir=res_path)
for seq in tqdm(list(self.dataset.get_sequences())):
all_gt_masks, all_void_masks, all_masks_id = self.dataset.get_all_masks(seq, True)
if self.task == 'semi-supervised':
all_gt_masks, all_masks_id = all_gt_masks[:, 1:-1, :, :], all_masks_id[1:-1]
all_res_masks = results.read_masks(seq, all_masks_id)
if self.task == 'unsupervised':
j_metrics_res, f_metrics_res = self._evaluate_unsupervised(all_gt_masks, all_res_masks, all_void_masks, metric)
elif self.task == 'semi-supervised':
j_metrics_res, f_metrics_res = self._evaluate_semisupervised(all_gt_masks, all_res_masks, None, metric)
for ii in range(all_gt_masks.shape[0]):
seq_name = f'{seq}_{ii+1}'
if 'J' in metric:
[JM, JR, JD] = utils.db_statistics(j_metrics_res[ii])
metrics_res['J']["M"].append(JM)
metrics_res['J']["R"].append(JR)
metrics_res['J']["D"].append(JD)
metrics_res['J']["M_per_object"][seq_name] = JM
if 'F' in metric:
[FM, FR, FD] = utils.db_statistics(f_metrics_res[ii])
metrics_res['F']["M"].append(FM)
metrics_res['F']["R"].append(FR)
metrics_res['F']["D"].append(FD)
metrics_res['F']["M_per_object"][seq_name] = FM
# Show progress
if debug:
sys.stdout.write(seq + '\n')
sys.stdout.flush()
return metrics_res
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment