Commit b634945d authored by limm's avatar limm
Browse files

support v0.6

parent 5b3792fc
_BASE_: "Base-DensePose-RCNN-FPN.yaml"
MODEL:
WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
RESNETS:
DEPTH: 50
ROI_DENSEPOSE_HEAD:
NUM_COARSE_SEGM_CHANNELS: 15
POOLER_RESOLUTION: 14
HEATMAP_SIZE: 56
INDEX_WEIGHTS: 2.0
PART_WEIGHTS: 0.3
POINT_REGRESSION_WEIGHTS: 0.1
DECODER_ON: False
SOLVER:
BASE_LR: 0.002
MAX_ITER: 130000
STEPS: (100000, 120000)
MODEL:
META_ARCHITECTURE: "GeneralizedRCNN"
BACKBONE:
NAME: "build_resnet_fpn_backbone"
RESNETS:
OUT_FEATURES: ["res2", "res3", "res4", "res5"]
FPN:
IN_FEATURES: ["res2", "res3", "res4", "res5"]
ANCHOR_GENERATOR:
SIZES: [[32], [64], [128], [256], [512]] # One size for each in feature map
ASPECT_RATIOS: [[0.5, 1.0, 2.0]] # Three aspect ratios (same for all in feature maps)
RPN:
IN_FEATURES: ["p2", "p3", "p4", "p5", "p6"]
PRE_NMS_TOPK_TRAIN: 2000 # Per FPN level
PRE_NMS_TOPK_TEST: 1000 # Per FPN level
# Detectron1 uses 2000 proposals per-batch,
# (See "modeling/rpn/rpn_outputs.py" for details of this legacy issue)
# which is approximately 1000 proposals per-image since the default batch size for FPN is 2.
POST_NMS_TOPK_TRAIN: 1000
POST_NMS_TOPK_TEST: 1000
ROI_HEADS:
NAME: "StandardROIHeads"
IN_FEATURES: ["p2", "p3", "p4", "p5"]
NUM_CLASSES: 1
ROI_BOX_HEAD:
NAME: "FastRCNNConvFCHead"
NUM_FC: 2
POOLER_RESOLUTION: 7
ROI_MASK_HEAD:
NAME: "MaskRCNNConvUpsampleHead"
NUM_CONV: 4
POOLER_RESOLUTION: 14
DATASETS:
TRAIN: ("base_coco_2017_train", "densepose_coco_2014_train")
TEST: ("densepose_chimps",)
CATEGORY_MAPS:
"base_coco_2017_train":
"16": 1 # bird -> person
"17": 1 # cat -> person
"18": 1 # dog -> person
"19": 1 # horse -> person
"20": 1 # sheep -> person
"21": 1 # cow -> person
"22": 1 # elephant -> person
"23": 1 # bear -> person
"24": 1 # zebra -> person
"25": 1 # girafe -> person
"base_coco_2017_val":
"16": 1 # bird -> person
"17": 1 # cat -> person
"18": 1 # dog -> person
"19": 1 # horse -> person
"20": 1 # sheep -> person
"21": 1 # cow -> person
"22": 1 # elephant -> person
"23": 1 # bear -> person
"24": 1 # zebra -> person
"25": 1 # girafe -> person
WHITELISTED_CATEGORIES:
"base_coco_2017_train":
- 1 # person
- 16 # bird
- 17 # cat
- 18 # dog
- 19 # horse
- 20 # sheep
- 21 # cow
- 22 # elephant
- 23 # bear
- 24 # zebra
- 25 # girafe
"base_coco_2017_val":
- 1 # person
- 16 # bird
- 17 # cat
- 18 # dog
- 19 # horse
- 20 # sheep
- 21 # cow
- 22 # elephant
- 23 # bear
- 24 # zebra
- 25 # girafe
SOLVER:
IMS_PER_BATCH: 16
BASE_LR: 0.02
STEPS: (60000, 80000)
MAX_ITER: 90000
INPUT:
MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800)
VERSION: 2
_BASE_: "Base-RCNN-FPN-Atop10P_CA.yaml"
MODEL:
WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
RESNETS:
DEPTH: 50
DENSEPOSE_ON: True
ROI_HEADS:
NAME: "DensePoseROIHeads"
IN_FEATURES: ["p2", "p3", "p4", "p5"]
NUM_CLASSES: 1
ROI_DENSEPOSE_HEAD:
NAME: "DensePoseDeepLabHead"
UV_CONFIDENCE:
ENABLED: True
TYPE: "iid_iso"
SEGM_CONFIDENCE:
ENABLED: True
POINT_REGRESSION_WEIGHTS: 0.0005
POOLER_TYPE: "ROIAlign"
NUM_COARSE_SEGM_CHANNELS: 2
COARSE_SEGM_TRAINED_BY_MASKS: True
INDEX_WEIGHTS: 1.0
SOLVER:
CLIP_GRADIENTS:
ENABLED: True
WARMUP_FACTOR: 0.025
MAX_ITER: 270000
STEPS: (210000, 250000)
_BASE_: "Base-RCNN-FPN-Atop10P_CA.yaml"
MODEL:
WEIGHTS: https://dl.fbaipublicfiles.com/densepose/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA/217578784/model_final_9fe1cc.pkl
RESNETS:
DEPTH: 50
DENSEPOSE_ON: True
ROI_HEADS:
NAME: "DensePoseROIHeads"
IN_FEATURES: ["p2", "p3", "p4", "p5"]
NUM_CLASSES: 1
ROI_DENSEPOSE_HEAD:
NAME: "DensePoseDeepLabHead"
UV_CONFIDENCE:
ENABLED: True
TYPE: "iid_iso"
SEGM_CONFIDENCE:
ENABLED: True
POINT_REGRESSION_WEIGHTS: 0.0005
POOLER_TYPE: "ROIAlign"
NUM_COARSE_SEGM_CHANNELS: 2
COARSE_SEGM_TRAINED_BY_MASKS: True
BOOTSTRAP_DATASETS:
- DATASET: "chimpnsee"
RATIO: 1.0
IMAGE_LOADER:
TYPE: "video_keyframe"
SELECT:
STRATEGY: "random_k"
NUM_IMAGES: 4
TRANSFORM:
TYPE: "resize"
MIN_SIZE: 800
MAX_SIZE: 1333
BATCH_SIZE: 8
NUM_WORKERS: 1
INFERENCE:
INPUT_BATCH_SIZE: 1
OUTPUT_BATCH_SIZE: 1
DATA_SAMPLER:
# supported types:
# densepose_uniform
# densepose_UV_confidence
# densepose_fine_segm_confidence
# densepose_coarse_segm_confidence
TYPE: "densepose_coarse_segm_confidence"
COUNT_PER_CLASS: 8
FILTER:
TYPE: "detection_score"
MIN_VALUE: 0.8
BOOTSTRAP_MODEL:
WEIGHTS: https://dl.fbaipublicfiles.com/densepose/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA/217578784/model_final_9fe1cc.pkl
SOLVER:
CLIP_GRADIENTS:
ENABLED: True
MAX_ITER: 270000
STEPS: (210000, 250000)
_BASE_: "Base-RCNN-FPN-Atop10P_CA.yaml"
MODEL:
WEIGHTS: https://dl.fbaipublicfiles.com/densepose/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA/217578784/model_final_9fe1cc.pkl
RESNETS:
DEPTH: 50
DENSEPOSE_ON: True
ROI_HEADS:
NAME: "DensePoseROIHeads"
IN_FEATURES: ["p2", "p3", "p4", "p5"]
NUM_CLASSES: 1
ROI_DENSEPOSE_HEAD:
NAME: "DensePoseDeepLabHead"
UV_CONFIDENCE:
ENABLED: True
TYPE: "iid_iso"
SEGM_CONFIDENCE:
ENABLED: True
POINT_REGRESSION_WEIGHTS: 0.0005
POOLER_TYPE: "ROIAlign"
NUM_COARSE_SEGM_CHANNELS: 2
COARSE_SEGM_TRAINED_BY_MASKS: True
BOOTSTRAP_DATASETS:
- DATASET: "chimpnsee"
RATIO: 1.0
IMAGE_LOADER:
TYPE: "video_keyframe"
SELECT:
STRATEGY: "random_k"
NUM_IMAGES: 4
TRANSFORM:
TYPE: "resize"
MIN_SIZE: 800
MAX_SIZE: 1333
BATCH_SIZE: 8
NUM_WORKERS: 1
INFERENCE:
INPUT_BATCH_SIZE: 1
OUTPUT_BATCH_SIZE: 1
DATA_SAMPLER:
# supported types:
# densepose_uniform
# densepose_UV_confidence
# densepose_fine_segm_confidence
# densepose_coarse_segm_confidence
TYPE: "densepose_fine_segm_confidence"
COUNT_PER_CLASS: 8
FILTER:
TYPE: "detection_score"
MIN_VALUE: 0.8
BOOTSTRAP_MODEL:
WEIGHTS: https://dl.fbaipublicfiles.com/densepose/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA/217578784/model_final_9fe1cc.pkl
SOLVER:
CLIP_GRADIENTS:
ENABLED: True
MAX_ITER: 270000
STEPS: (210000, 250000)
_BASE_: "Base-RCNN-FPN-Atop10P_CA.yaml"
MODEL:
WEIGHTS: https://dl.fbaipublicfiles.com/densepose/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA/217578784/model_final_9fe1cc.pkl
RESNETS:
DEPTH: 50
DENSEPOSE_ON: True
ROI_HEADS:
NAME: "DensePoseROIHeads"
IN_FEATURES: ["p2", "p3", "p4", "p5"]
NUM_CLASSES: 1
ROI_DENSEPOSE_HEAD:
NAME: "DensePoseDeepLabHead"
UV_CONFIDENCE:
ENABLED: True
TYPE: "iid_iso"
SEGM_CONFIDENCE:
ENABLED: True
POINT_REGRESSION_WEIGHTS: 0.0005
POOLER_TYPE: "ROIAlign"
NUM_COARSE_SEGM_CHANNELS: 2
COARSE_SEGM_TRAINED_BY_MASKS: True
BOOTSTRAP_DATASETS:
- DATASET: "chimpnsee"
RATIO: 1.0
IMAGE_LOADER:
TYPE: "video_keyframe"
SELECT:
STRATEGY: "random_k"
NUM_IMAGES: 4
TRANSFORM:
TYPE: "resize"
MIN_SIZE: 800
MAX_SIZE: 1333
BATCH_SIZE: 8
NUM_WORKERS: 1
INFERENCE:
INPUT_BATCH_SIZE: 1
OUTPUT_BATCH_SIZE: 1
DATA_SAMPLER:
# supported types:
# densepose_uniform
# densepose_UV_confidence
# densepose_fine_segm_confidence
# densepose_coarse_segm_confidence
TYPE: "densepose_uniform"
COUNT_PER_CLASS: 8
FILTER:
TYPE: "detection_score"
MIN_VALUE: 0.8
BOOTSTRAP_MODEL:
WEIGHTS: https://dl.fbaipublicfiles.com/densepose/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA/217578784/model_final_9fe1cc.pkl
SOLVER:
CLIP_GRADIENTS:
ENABLED: True
MAX_ITER: 270000
STEPS: (210000, 250000)
_BASE_: "Base-RCNN-FPN-Atop10P_CA.yaml"
MODEL:
WEIGHTS: https://dl.fbaipublicfiles.com/densepose/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA/217578784/model_final_9fe1cc.pkl
RESNETS:
DEPTH: 50
DENSEPOSE_ON: True
ROI_HEADS:
NAME: "DensePoseROIHeads"
IN_FEATURES: ["p2", "p3", "p4", "p5"]
NUM_CLASSES: 1
ROI_DENSEPOSE_HEAD:
NAME: "DensePoseDeepLabHead"
UV_CONFIDENCE:
ENABLED: True
TYPE: "iid_iso"
SEGM_CONFIDENCE:
ENABLED: True
POINT_REGRESSION_WEIGHTS: 0.0005
POOLER_TYPE: "ROIAlign"
NUM_COARSE_SEGM_CHANNELS: 2
COARSE_SEGM_TRAINED_BY_MASKS: True
BOOTSTRAP_DATASETS:
- DATASET: "chimpnsee"
RATIO: 1.0
IMAGE_LOADER:
TYPE: "video_keyframe"
SELECT:
STRATEGY: "random_k"
NUM_IMAGES: 4
TRANSFORM:
TYPE: "resize"
MIN_SIZE: 800
MAX_SIZE: 1333
BATCH_SIZE: 8
NUM_WORKERS: 1
INFERENCE:
INPUT_BATCH_SIZE: 1
OUTPUT_BATCH_SIZE: 1
DATA_SAMPLER:
# supported types:
# densepose_uniform
# densepose_UV_confidence
# densepose_fine_segm_confidence
# densepose_coarse_segm_confidence
TYPE: "densepose_UV_confidence"
COUNT_PER_CLASS: 8
FILTER:
TYPE: "detection_score"
MIN_VALUE: 0.8
BOOTSTRAP_MODEL:
WEIGHTS: https://dl.fbaipublicfiles.com/densepose/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA/217578784/model_final_9fe1cc.pkl
SOLVER:
CLIP_GRADIENTS:
ENABLED: True
MAX_ITER: 270000
STEPS: (210000, 250000)
_BASE_: "../../cse/Base-DensePose-RCNN-FPN.yaml"
MODEL:
WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
ROI_DENSEPOSE_HEAD:
NAME: "DensePoseDeepLabHead"
DATASETS:
TRAIN: ("densepose_coco_2014_minival_100_cse",)
TEST: ("densepose_coco_2014_minival_100_cse",)
SOLVER:
MAX_ITER: 40
STEPS: (30,)
_BASE_: "../../cse/Base-DensePose-RCNN-FPN.yaml"
MODEL:
WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
RESNETS:
DEPTH: 50
ROI_HEADS:
NUM_CLASSES: 9
ROI_DENSEPOSE_HEAD:
NAME: "DensePoseV1ConvXHead"
CSE:
EMBED_LOSS_NAME: "SoftEmbeddingLoss"
EMBEDDING_DIST_GAUSS_SIGMA: 0.1
EMBEDDERS:
"cat_5001":
TYPE: vertex_feature
NUM_VERTICES: 5001
FEATURE_DIM: 256
FEATURES_TRAINABLE: False
IS_TRAINABLE: True
INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cat_5001_256.pkl"
"dog_5002":
TYPE: vertex_feature
NUM_VERTICES: 5002
FEATURE_DIM: 256
FEATURES_TRAINABLE: False
IS_TRAINABLE: True
INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_dog_5002_256.pkl"
"sheep_5004":
TYPE: vertex_feature
NUM_VERTICES: 5004
FEATURE_DIM: 256
FEATURES_TRAINABLE: False
IS_TRAINABLE: True
INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_sheep_5004_256.pkl"
"horse_5004":
TYPE: vertex_feature
NUM_VERTICES: 5004
FEATURE_DIM: 256
FEATURES_TRAINABLE: False
IS_TRAINABLE: True
INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_horse_5004_256.pkl"
"zebra_5002":
TYPE: vertex_feature
NUM_VERTICES: 5002
FEATURE_DIM: 256
FEATURES_TRAINABLE: False
IS_TRAINABLE: True
INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_zebra_5002_256.pkl"
"giraffe_5002":
TYPE: vertex_feature
NUM_VERTICES: 5002
FEATURE_DIM: 256
FEATURES_TRAINABLE: False
IS_TRAINABLE: True
INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_giraffe_5002_256.pkl"
"elephant_5002":
TYPE: vertex_feature
NUM_VERTICES: 5002
FEATURE_DIM: 256
FEATURES_TRAINABLE: False
IS_TRAINABLE: True
INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_elephant_5002_256.pkl"
"cow_5002":
TYPE: vertex_feature
NUM_VERTICES: 5002
FEATURE_DIM: 256
FEATURES_TRAINABLE: False
IS_TRAINABLE: True
INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cow_5002_256.pkl"
"bear_4936":
TYPE: vertex_feature
NUM_VERTICES: 4936
FEATURE_DIM: 256
FEATURES_TRAINABLE: False
IS_TRAINABLE: True
INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_bear_4936_256.pkl"
DATASETS:
TRAIN:
- "densepose_lvis_v1_train1"
- "densepose_lvis_v1_train2"
TEST:
- "densepose_lvis_v1_val_animals_100"
WHITELISTED_CATEGORIES:
"densepose_lvis_v1_train1":
- 943 # sheep
- 1202 # zebra
- 569 # horse
- 496 # giraffe
- 422 # elephant
- 80 # cow
- 76 # bear
- 225 # cat
- 378 # dog
"densepose_lvis_v1_train2":
- 943 # sheep
- 1202 # zebra
- 569 # horse
- 496 # giraffe
- 422 # elephant
- 80 # cow
- 76 # bear
- 225 # cat
- 378 # dog
"densepose_lvis_v1_val_animals_100":
- 943 # sheep
- 1202 # zebra
- 569 # horse
- 496 # giraffe
- 422 # elephant
- 80 # cow
- 76 # bear
- 225 # cat
- 378 # dog
CLASS_TO_MESH_NAME_MAPPING:
"0": "bear_4936"
"1": "cow_5002"
"2": "cat_5001"
"3": "dog_5002"
"4": "elephant_5002"
"5": "giraffe_5002"
"6": "horse_5004"
"7": "sheep_5004"
"8": "zebra_5002"
SOLVER:
MAX_ITER: 40
STEPS: (30,)
_BASE_: "../HRNet/densepose_rcnn_HRFPN_HRNet_w32_s1x.yaml"
DATASETS:
TRAIN: ("densepose_coco_2014_minival_100",)
TEST: ("densepose_coco_2014_minival_100",)
SOLVER:
MAX_ITER: 40
STEPS: (30,)
IMS_PER_BATCH: 2
_BASE_: "../Base-DensePose-RCNN-FPN.yaml"
MODEL:
WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
ROI_DENSEPOSE_HEAD:
NAME: "DensePoseDeepLabHead"
DATASETS:
TRAIN: ("densepose_coco_2014_minival_100",)
TEST: ("densepose_coco_2014_minival_100",)
SOLVER:
MAX_ITER: 40
STEPS: (30,)
_BASE_: "../densepose_rcnn_R_50_FPN_s1x.yaml"
MODEL:
WEIGHTS: "https://dl.fbaipublicfiles.com/densepose/densepose_rcnn_R_50_FPN_s1x/165712039/model_final_162be9.pkl"
DATASETS:
TRAIN: ()
TEST: ("densepose_coco_2014_minival_100",)
TEST:
AUG:
ENABLED: True
MIN_SIZES: (400, 500, 600, 700, 800, 900, 1000, 1100, 1200)
MAX_SIZE: 4000
FLIP: True
EXPECTED_RESULTS: [["bbox_TTA", "AP", 61.74, 0.03], ["densepose_gps_TTA", "AP", 60.22, 0.03], ["densepose_gpsm_TTA", "AP", 63.59, 0.03]]
_BASE_: "../Base-DensePose-RCNN-FPN.yaml"
MODEL:
WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
RESNETS:
DEPTH: 50
ROI_DENSEPOSE_HEAD:
UV_CONFIDENCE:
ENABLED: True
TYPE: "iid_iso"
POINT_REGRESSION_WEIGHTS: 0.0005
DATASETS:
TRAIN: ("densepose_coco_2014_minival_100",)
TEST: ("densepose_coco_2014_minival_100",)
SOLVER:
CLIP_GRADIENTS:
ENABLED: True
MAX_ITER: 40
STEPS: (30,)
WARMUP_FACTOR: 0.025
_BASE_: "../Base-DensePose-RCNN-FPN.yaml"
MODEL:
WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
RESNETS:
DEPTH: 50
ROI_DENSEPOSE_HEAD:
UV_CONFIDENCE:
ENABLED: True
TYPE: "indep_aniso"
POINT_REGRESSION_WEIGHTS: 0.0005
DATASETS:
TRAIN: ("densepose_coco_2014_minival_100",)
TEST: ("densepose_coco_2014_minival_100",)
SOLVER:
CLIP_GRADIENTS:
ENABLED: True
MAX_ITER: 40
STEPS: (30,)
WARMUP_FACTOR: 0.025
_BASE_: "../densepose_rcnn_R_50_FPN_s1x.yaml"
MODEL:
WEIGHTS: "https://dl.fbaipublicfiles.com/densepose/densepose_rcnn_R_50_FPN_s1x/165712039/model_final_162be9.pkl"
DATASETS:
TRAIN: ()
TEST: ("densepose_coco_2014_minival_100",)
TEST:
EXPECTED_RESULTS: [["bbox", "AP", 59.27, 0.025], ["densepose_gps", "AP", 60.11, 0.02], ["densepose_gpsm", "AP", 64.09, 0.02]]
_BASE_: "../Base-DensePose-RCNN-FPN.yaml"
MODEL:
WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
DATASETS:
TRAIN: ("densepose_coco_2014_minival_100",)
TEST: ("densepose_coco_2014_minival_100",)
SOLVER:
MAX_ITER: 40
STEPS: (30,)
_BASE_: "../Base-DensePose-RCNN-FPN.yaml"
MODEL:
WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
ROI_HEADS:
NUM_CLASSES: 1
DATASETS:
TRAIN: ("densepose_coco_2014_minival",)
TEST: ("densepose_coco_2014_minival",)
SOLVER:
CLIP_GRADIENTS:
ENABLED: True
CLIP_TYPE: norm
CLIP_VALUE: 1.0
MAX_ITER: 6000
STEPS: (5500, 5800)
TEST:
EXPECTED_RESULTS: [["bbox", "AP", 76.2477, 1.0], ["densepose_gps", "AP", 79.6090, 1.5], ["densepose_gpsm", "AP", 80.0061, 1.5]]
# Copyright (c) Facebook, Inc. and its affiliates.
from .data.datasets import builtin # just to register data
from .converters import builtin as builtin_converters # register converters
from .config import (
add_densepose_config,
add_densepose_head_config,
add_hrnet_config,
add_dataset_category_config,
add_bootstrap_config,
load_bootstrap_config,
)
from .structures import DensePoseDataRelative, DensePoseList, DensePoseTransformData
from .evaluation import DensePoseCOCOEvaluator
from .modeling.roi_heads import DensePoseROIHeads
from .modeling.test_time_augmentation import (
DensePoseGeneralizedRCNNWithTTA,
DensePoseDatasetMapperTTA,
)
from .utils.transform import load_from_cfg
from .modeling.hrfpn import build_hrfpn_backbone
# -*- coding = utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
# pyre-ignore-all-errors
from detectron2.config import CfgNode as CN
def add_dataset_category_config(cfg: CN):
"""
Add config for additional category-related dataset options
- category whitelisting
- category mapping
"""
_C = cfg
_C.DATASETS.CATEGORY_MAPS = CN(new_allowed=True)
_C.DATASETS.WHITELISTED_CATEGORIES = CN(new_allowed=True)
# class to mesh mapping
_C.DATASETS.CLASS_TO_MESH_NAME_MAPPING = CN(new_allowed=True)
def add_evaluation_config(cfg: CN):
_C = cfg
_C.DENSEPOSE_EVALUATION = CN()
# evaluator type, possible values:
# - "iou": evaluator for models that produce iou data
# - "cse": evaluator for models that produce cse data
_C.DENSEPOSE_EVALUATION.TYPE = "iou"
# storage for DensePose results, possible values:
# - "none": no explicit storage, all the results are stored in the
# dictionary with predictions, memory intensive;
# historically the default storage type
# - "ram": RAM storage, uses per-process RAM storage, which is
# reduced to a single process storage on later stages,
# less memory intensive
# - "file": file storage, uses per-process file-based storage,
# the least memory intensive, but may create bottlenecks
# on file system accesses
_C.DENSEPOSE_EVALUATION.STORAGE = "none"
# minimum threshold for IOU values: the lower its values is,
# the more matches are produced (and the higher the AP score)
_C.DENSEPOSE_EVALUATION.MIN_IOU_THRESHOLD = 0.5
# Non-distributed inference is slower (at inference time) but can avoid RAM OOM
_C.DENSEPOSE_EVALUATION.DISTRIBUTED_INFERENCE = True
# evaluate mesh alignment based on vertex embeddings, only makes sense in CSE context
_C.DENSEPOSE_EVALUATION.EVALUATE_MESH_ALIGNMENT = False
# meshes to compute mesh alignment for
_C.DENSEPOSE_EVALUATION.MESH_ALIGNMENT_MESH_NAMES = []
def add_bootstrap_config(cfg: CN):
""" """
_C = cfg
_C.BOOTSTRAP_DATASETS = []
_C.BOOTSTRAP_MODEL = CN()
_C.BOOTSTRAP_MODEL.WEIGHTS = ""
_C.BOOTSTRAP_MODEL.DEVICE = "cuda"
def get_bootstrap_dataset_config() -> CN:
_C = CN()
_C.DATASET = ""
# ratio used to mix data loaders
_C.RATIO = 0.1
# image loader
_C.IMAGE_LOADER = CN(new_allowed=True)
_C.IMAGE_LOADER.TYPE = ""
_C.IMAGE_LOADER.BATCH_SIZE = 4
_C.IMAGE_LOADER.NUM_WORKERS = 4
_C.IMAGE_LOADER.CATEGORIES = []
_C.IMAGE_LOADER.MAX_COUNT_PER_CATEGORY = 1_000_000
_C.IMAGE_LOADER.CATEGORY_TO_CLASS_MAPPING = CN(new_allowed=True)
# inference
_C.INFERENCE = CN()
# batch size for model inputs
_C.INFERENCE.INPUT_BATCH_SIZE = 4
# batch size to group model outputs
_C.INFERENCE.OUTPUT_BATCH_SIZE = 2
# sampled data
_C.DATA_SAMPLER = CN(new_allowed=True)
_C.DATA_SAMPLER.TYPE = ""
_C.DATA_SAMPLER.USE_GROUND_TRUTH_CATEGORIES = False
# filter
_C.FILTER = CN(new_allowed=True)
_C.FILTER.TYPE = ""
return _C
def load_bootstrap_config(cfg: CN):
"""
Bootstrap datasets are given as a list of `dict` that are not automatically
converted into CfgNode. This method processes all bootstrap dataset entries
and ensures that they are in CfgNode format and comply with the specification
"""
if not cfg.BOOTSTRAP_DATASETS:
return
bootstrap_datasets_cfgnodes = []
for dataset_cfg in cfg.BOOTSTRAP_DATASETS:
_C = get_bootstrap_dataset_config().clone()
_C.merge_from_other_cfg(CN(dataset_cfg))
bootstrap_datasets_cfgnodes.append(_C)
cfg.BOOTSTRAP_DATASETS = bootstrap_datasets_cfgnodes
def add_densepose_head_cse_config(cfg: CN):
"""
Add configuration options for Continuous Surface Embeddings (CSE)
"""
_C = cfg
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE = CN()
# Dimensionality D of the embedding space
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_SIZE = 16
# Embedder specifications for various mesh IDs
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDERS = CN(new_allowed=True)
# normalization coefficient for embedding distances
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDING_DIST_GAUSS_SIGMA = 0.01
# normalization coefficient for geodesic distances
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.GEODESIC_DIST_GAUSS_SIGMA = 0.01
# embedding loss weight
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_LOSS_WEIGHT = 0.6
# embedding loss name, currently the following options are supported:
# - EmbeddingLoss: cross-entropy on vertex labels
# - SoftEmbeddingLoss: cross-entropy on vertex label combined with
# Gaussian penalty on distance between vertices
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_LOSS_NAME = "EmbeddingLoss"
# optimizer hyperparameters
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.FEATURES_LR_FACTOR = 1.0
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDING_LR_FACTOR = 1.0
# Shape to shape cycle consistency loss parameters:
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS = CN({"ENABLED": False})
# shape to shape cycle consistency loss weight
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.WEIGHT = 0.025
# norm type used for loss computation
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.NORM_P = 2
# normalization term for embedding similarity matrices
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.TEMPERATURE = 0.05
# maximum number of vertices to include into shape to shape cycle loss
# if negative or zero, all vertices are considered
# if positive, random subset of vertices of given size is considered
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.MAX_NUM_VERTICES = 4936
# Pixel to shape cycle consistency loss parameters:
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS = CN({"ENABLED": False})
# pixel to shape cycle consistency loss weight
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.WEIGHT = 0.0001
# norm type used for loss computation
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.NORM_P = 2
# map images to all meshes and back (if false, use only gt meshes from the batch)
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.USE_ALL_MESHES_NOT_GT_ONLY = False
# Randomly select at most this number of pixels from every instance
# if negative or zero, all vertices are considered
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.NUM_PIXELS_TO_SAMPLE = 100
# normalization factor for pixel to pixel distances (higher value = smoother distribution)
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.PIXEL_SIGMA = 5.0
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.TEMPERATURE_PIXEL_TO_VERTEX = 0.05
_C.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.TEMPERATURE_VERTEX_TO_PIXEL = 0.05
def add_densepose_head_config(cfg: CN):
"""
Add config for densepose head.
"""
_C = cfg
_C.MODEL.DENSEPOSE_ON = True
_C.MODEL.ROI_DENSEPOSE_HEAD = CN()
_C.MODEL.ROI_DENSEPOSE_HEAD.NAME = ""
_C.MODEL.ROI_DENSEPOSE_HEAD.NUM_STACKED_CONVS = 8
# Number of parts used for point labels
_C.MODEL.ROI_DENSEPOSE_HEAD.NUM_PATCHES = 24
_C.MODEL.ROI_DENSEPOSE_HEAD.DECONV_KERNEL = 4
_C.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_DIM = 512
_C.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_KERNEL = 3
_C.MODEL.ROI_DENSEPOSE_HEAD.UP_SCALE = 2
_C.MODEL.ROI_DENSEPOSE_HEAD.HEATMAP_SIZE = 112
_C.MODEL.ROI_DENSEPOSE_HEAD.POOLER_TYPE = "ROIAlignV2"
_C.MODEL.ROI_DENSEPOSE_HEAD.POOLER_RESOLUTION = 28
_C.MODEL.ROI_DENSEPOSE_HEAD.POOLER_SAMPLING_RATIO = 2
_C.MODEL.ROI_DENSEPOSE_HEAD.NUM_COARSE_SEGM_CHANNELS = 2 # 15 or 2
# Overlap threshold for an RoI to be considered foreground (if >= FG_IOU_THRESHOLD)
_C.MODEL.ROI_DENSEPOSE_HEAD.FG_IOU_THRESHOLD = 0.7
# Loss weights for annotation masks.(14 Parts)
_C.MODEL.ROI_DENSEPOSE_HEAD.INDEX_WEIGHTS = 5.0
# Loss weights for surface parts. (24 Parts)
_C.MODEL.ROI_DENSEPOSE_HEAD.PART_WEIGHTS = 1.0
# Loss weights for UV regression.
_C.MODEL.ROI_DENSEPOSE_HEAD.POINT_REGRESSION_WEIGHTS = 0.01
# Coarse segmentation is trained using instance segmentation task data
_C.MODEL.ROI_DENSEPOSE_HEAD.COARSE_SEGM_TRAINED_BY_MASKS = False
# For Decoder
_C.MODEL.ROI_DENSEPOSE_HEAD.DECODER_ON = True
_C.MODEL.ROI_DENSEPOSE_HEAD.DECODER_NUM_CLASSES = 256
_C.MODEL.ROI_DENSEPOSE_HEAD.DECODER_CONV_DIMS = 256
_C.MODEL.ROI_DENSEPOSE_HEAD.DECODER_NORM = ""
_C.MODEL.ROI_DENSEPOSE_HEAD.DECODER_COMMON_STRIDE = 4
# For DeepLab head
_C.MODEL.ROI_DENSEPOSE_HEAD.DEEPLAB = CN()
_C.MODEL.ROI_DENSEPOSE_HEAD.DEEPLAB.NORM = "GN"
_C.MODEL.ROI_DENSEPOSE_HEAD.DEEPLAB.NONLOCAL_ON = 0
# Predictor class name, must be registered in DENSEPOSE_PREDICTOR_REGISTRY
# Some registered predictors:
# "DensePoseChartPredictor": predicts segmentation and UV coordinates for predefined charts
# "DensePoseChartWithConfidencePredictor": predicts segmentation, UV coordinates
# and associated confidences for predefined charts (default)
# "DensePoseEmbeddingWithConfidencePredictor": predicts segmentation, embeddings
# and associated confidences for CSE
_C.MODEL.ROI_DENSEPOSE_HEAD.PREDICTOR_NAME = "DensePoseChartWithConfidencePredictor"
# Loss class name, must be registered in DENSEPOSE_LOSS_REGISTRY
# Some registered losses:
# "DensePoseChartLoss": loss for chart-based models that estimate
# segmentation and UV coordinates
# "DensePoseChartWithConfidenceLoss": loss for chart-based models that estimate
# segmentation, UV coordinates and the corresponding confidences (default)
_C.MODEL.ROI_DENSEPOSE_HEAD.LOSS_NAME = "DensePoseChartWithConfidenceLoss"
# Confidences
# Enable learning UV confidences (variances) along with the actual values
_C.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE = CN({"ENABLED": False})
# UV confidence lower bound
_C.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE.EPSILON = 0.01
# Enable learning segmentation confidences (variances) along with the actual values
_C.MODEL.ROI_DENSEPOSE_HEAD.SEGM_CONFIDENCE = CN({"ENABLED": False})
# Segmentation confidence lower bound
_C.MODEL.ROI_DENSEPOSE_HEAD.SEGM_CONFIDENCE.EPSILON = 0.01
# Statistical model type for confidence learning, possible values:
# - "iid_iso": statistically independent identically distributed residuals
# with isotropic covariance
# - "indep_aniso": statistically independent residuals with anisotropic
# covariances
_C.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE.TYPE = "iid_iso"
# List of angles for rotation in data augmentation during training
_C.INPUT.ROTATION_ANGLES = [0]
_C.TEST.AUG.ROTATION_ANGLES = () # Rotation TTA
add_densepose_head_cse_config(cfg)
def add_hrnet_config(cfg: CN):
"""
Add config for HRNet backbone.
"""
_C = cfg
# For HigherHRNet w32
_C.MODEL.HRNET = CN()
_C.MODEL.HRNET.STEM_INPLANES = 64
_C.MODEL.HRNET.STAGE2 = CN()
_C.MODEL.HRNET.STAGE2.NUM_MODULES = 1
_C.MODEL.HRNET.STAGE2.NUM_BRANCHES = 2
_C.MODEL.HRNET.STAGE2.BLOCK = "BASIC"
_C.MODEL.HRNET.STAGE2.NUM_BLOCKS = [4, 4]
_C.MODEL.HRNET.STAGE2.NUM_CHANNELS = [32, 64]
_C.MODEL.HRNET.STAGE2.FUSE_METHOD = "SUM"
_C.MODEL.HRNET.STAGE3 = CN()
_C.MODEL.HRNET.STAGE3.NUM_MODULES = 4
_C.MODEL.HRNET.STAGE3.NUM_BRANCHES = 3
_C.MODEL.HRNET.STAGE3.BLOCK = "BASIC"
_C.MODEL.HRNET.STAGE3.NUM_BLOCKS = [4, 4, 4]
_C.MODEL.HRNET.STAGE3.NUM_CHANNELS = [32, 64, 128]
_C.MODEL.HRNET.STAGE3.FUSE_METHOD = "SUM"
_C.MODEL.HRNET.STAGE4 = CN()
_C.MODEL.HRNET.STAGE4.NUM_MODULES = 3
_C.MODEL.HRNET.STAGE4.NUM_BRANCHES = 4
_C.MODEL.HRNET.STAGE4.BLOCK = "BASIC"
_C.MODEL.HRNET.STAGE4.NUM_BLOCKS = [4, 4, 4, 4]
_C.MODEL.HRNET.STAGE4.NUM_CHANNELS = [32, 64, 128, 256]
_C.MODEL.HRNET.STAGE4.FUSE_METHOD = "SUM"
_C.MODEL.HRNET.HRFPN = CN()
_C.MODEL.HRNET.HRFPN.OUT_CHANNELS = 256
def add_densepose_config(cfg: CN):
add_densepose_head_config(cfg)
add_hrnet_config(cfg)
add_bootstrap_config(cfg)
add_dataset_category_config(cfg)
add_evaluation_config(cfg)
# Copyright (c) Facebook, Inc. and its affiliates.
from .hflip import HFlipConverter
from .to_mask import ToMaskConverter
from .to_chart_result import ToChartResultConverter, ToChartResultConverterWithConfidences
from .segm_to_mask import (
predictor_output_with_fine_and_coarse_segm_to_mask,
predictor_output_with_coarse_segm_to_mask,
resample_fine_and_coarse_segm_to_bbox,
)
from .chart_output_to_chart_result import (
densepose_chart_predictor_output_to_result,
densepose_chart_predictor_output_to_result_with_confidences,
)
from .chart_output_hflip import densepose_chart_predictor_output_hflip
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment