Unverified Commit a6d39f6a authored by Yuliang Liu's avatar Yuliang Liu Committed by GitHub
Browse files

Merge pull request #39 from Yuliang-Liu/dev

Data generation
parents c7341cda 2189c3c4
import json
from argparse import ArgumentParser
def open_json(path):
with open(path,"r") as f:
data=json.load(f)
return data
def save_json(json_list,save_path):
with open(save_path, 'w') as file:
json.dump(json_list, file, indent=4)
def caculate_IOU(box1,box2):
Ax1=box1[0]
Ay1=box1[1]
Ax2=box1[2]
Ay2=box1[3]
Bx1=box2[0]
By1=box2[1]
Bx2=box2[2]
By2=box2[3]
Ix1 = max(Ax1, Bx1)
Iy1 = max(Ay1, By1)
Ix2 = min(Ax2, Bx2)
Iy2 = min(Ay2, By2)
IntersectionArea = max(0, Ix2 - Ix1 + 1) * max(0, Iy2 - Iy1 + 1)
BoxAArea = (Ax2 - Ax1 + 1) * (Ay2 - Ay1 + 1)
BoxBArea = (Bx2 - Bx1 + 1) * (By2 - By1 + 1)
UnionArea = BoxAArea + BoxBArea - IntersectionArea
IOU = IntersectionArea / UnionArea
return IOU
def _get_args():
parser = ArgumentParser()
parser.add_argument("--blip2_caption", type=str, default="./outputs/blip2_cap.json")
parser.add_argument("--ori_caption", type=str, default=None)
parser.add_argument("--grit", type=str, default="./outputs/grit_score.json")
parser.add_argument("--ppocr", type=str, default="./outputs/ppocr.json")
parser.add_argument("--sam_blip2", type=str, default="./outputs/sam_blip2_score.json")
parser.add_argument("--output", type=str, default="./outputs/ann_all.json")
args = parser.parse_args()
return args
if __name__=="__main__":
json_save = []
args = _get_args()
if args.ori_caption is not None:
ori_cap = open_json(args.ori_caption)
blip2_caption = open_json(args.blip2_caption)
grit = open_json(args.grit)
ppocr = open_json(args.ppocr)
sam_blip2 = open_json(args.sam_blip2)
blip2_caption_dict = {}
grit_dict = {}
ppocr_dict = {}
sam_blip2_dict = {}
for i in range(len(blip2_caption)):
img_id = blip2_caption[i]['img_id']
caption = blip2_caption[i]['blip2_caption']
blip2_caption_dict[img_id]=caption
for i in range(len(grit)):
img_id = grit[i]['img_id']
objects = grit[i]['objects']
caption = ""
for j in range(len(objects)):
if objects[j]['score']>0.4:
caption = caption + f"{objects[j]['caption']}: {objects[j]['box']}; "
grit_dict[img_id] = caption
for i in range(len(ppocr)):
img_id = ppocr[i]['img_id']
objects = ppocr[i]['objects']
caption = ""
for j in range(len(objects)):
if objects[j]['score']>0.85:
caption = caption + f"{objects[j]['caption']}: {objects[j]['box']}; "
ppocr_dict[img_id] = caption
for i in range(len(sam_blip2)):
img_id = sam_blip2[i]['img_id']
objects = sam_blip2[i]['objects']
caption = ""
iou_filter = {}
for j in range(len(objects)):
if objects[j]['score']>0.5:
if iou_filter.get(objects[j]['caption'], 0)==0:
iou_filter[objects[j]['caption']]=objects[j]['box']
caption = caption + f"{objects[j]['caption']}: {objects[j]['box']}; "
else:
if caculate_IOU(iou_filter[objects[j]['caption']], objects[j]['box'])<0.6:
caption = caption + f"{objects[j]['caption']}: {objects[j]['box']}; "
sam_blip2_dict[img_id] = caption
for key in blip2_caption_dict.keys():
ocr_result = ppocr_dict.get(key,"")
json_save.append({"img_id":key, "blip2cap":blip2_caption_dict[key], "grit":grit_dict[key], "ocr":ocr_result, "sam_blip":sam_blip2_dict[key]})
save_json(json_save, args.output)
\ No newline at end of file
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import cv2 # type: ignore
from segment_anything import SamAutomaticMaskGenerator, sam_model_registry
import argparse
import json
import os
from typing import Any, Dict, List
from tqdm import tqdm
from PIL import Image
parser = argparse.ArgumentParser(
description=(
"Runs automatic mask generation on an input image or directory of images, "
"and outputs masks as either PNGs or COCO-style RLEs. Requires open-cv, "
"as well as pycocotools if saving in RLE format."
)
)
parser.add_argument(
"--input",
type=str,
required=True,
help="Path to either a single input image or folder of images.",
)
parser.add_argument(
"--output",
type=str,
required=True,
help=(
"Path to the directory where masks will be output. Output will be either a folder "
"of PNGs per image or a single json with COCO-style masks."
),
)
parser.add_argument(
"--model-type",
type=str,
required=True,
help="The type of model to load, in ['default', 'vit_h', 'vit_l', 'vit_b']",
)
parser.add_argument(
"--checkpoint",
type=str,
required=True,
help="The path to the SAM checkpoint to use for mask generation.",
)
parser.add_argument("--device", type=str, default="cuda", help="The device to run generation on.")
parser.add_argument(
"--convert-to-rle",
action="store_true",
help=(
"Save masks as COCO RLEs in a single json instead of as a folder of PNGs. "
"Requires pycocotools."
),
)
amg_settings = parser.add_argument_group("AMG Settings")
amg_settings.add_argument(
"--points-per-side",
type=int,
default=None,
help="Generate masks by sampling a grid over the image with this many points to a side.",
)
amg_settings.add_argument(
"--points-per-batch",
type=int,
default=None,
help="How many input points to process simultaneously in one batch.",
)
amg_settings.add_argument(
"--pred-iou-thresh",
type=float,
default=None,
help="Exclude masks with a predicted score from the model that is lower than this threshold.",
)
amg_settings.add_argument(
"--stability-score-thresh",
type=float,
default=None,
help="Exclude masks with a stability score lower than this threshold.",
)
amg_settings.add_argument(
"--stability-score-offset",
type=float,
default=None,
help="Larger values perturb the mask more when measuring stability score.",
)
amg_settings.add_argument(
"--box-nms-thresh",
type=float,
default=None,
help="The overlap threshold for excluding a duplicate mask.",
)
amg_settings.add_argument(
"--crop-n-layers",
type=int,
default=None,
help=(
"If >0, mask generation is run on smaller crops of the image to generate more masks. "
"The value sets how many different scales to crop at."
),
)
amg_settings.add_argument(
"--crop-nms-thresh",
type=float,
default=None,
help="The overlap threshold for excluding duplicate masks across different crops.",
)
amg_settings.add_argument(
"--crop-overlap-ratio",
type=int,
default=None,
help="Larger numbers mean image crops will overlap more.",
)
amg_settings.add_argument(
"--crop-n-points-downscale-factor",
type=int,
default=None,
help="The number of points-per-side in each layer of crop is reduced by this factor.",
)
amg_settings.add_argument(
"--min-mask-region-area",
type=int,
default=None,
help=(
"Disconnected mask regions or holes with area smaller than this value "
"in pixels are removed by postprocessing."
),
)
amg_settings.add_argument(
"--rank",
type=int,
default=0,
)
def write_masks_to_folder(masks: List[Dict[str, Any]], path: str) -> None:
header = "id,area,bbox_x0,bbox_y0,bbox_w,bbox_h,point_input_x,point_input_y,predicted_iou,stability_score,crop_box_x0,crop_box_y0,crop_box_w,crop_box_h" # noqa
metadata = [header]
for i, mask_data in enumerate(masks):
mask = mask_data["segmentation"]
filename = f"{i}.png"
cv2.imwrite(os.path.join(path, filename), mask * 255)
mask_metadata = [
str(i),
str(mask_data["area"]),
*[str(x) for x in mask_data["bbox"]],
*[str(x) for x in mask_data["point_coords"][0]],
str(mask_data["predicted_iou"]),
str(mask_data["stability_score"]),
*[str(x) for x in mask_data["crop_box"]],
]
row = ",".join(mask_metadata)
metadata.append(row)
metadata_path = os.path.join(path, "metadata.csv")
with open(metadata_path, "w") as f:
f.write("\n".join(metadata))
return
def get_amg_kwargs(args):
amg_kwargs = {
"points_per_side": args.points_per_side,
"points_per_batch": args.points_per_batch,
"pred_iou_thresh": args.pred_iou_thresh,
"stability_score_thresh": args.stability_score_thresh,
"stability_score_offset": args.stability_score_offset,
"box_nms_thresh": args.box_nms_thresh,
"crop_n_layers": args.crop_n_layers,
"crop_nms_thresh": args.crop_nms_thresh,
"crop_overlap_ratio": args.crop_overlap_ratio,
"crop_n_points_downscale_factor": args.crop_n_points_downscale_factor,
"min_mask_region_area": args.min_mask_region_area,
}
amg_kwargs = {k: v for k, v in amg_kwargs.items() if v is not None}
return amg_kwargs
def main(args: argparse.Namespace, image_list: List) -> None:
print("Loading model...")
sam = sam_model_registry[args.model_type](checkpoint=args.checkpoint)
_ = sam.to(device=f"cuda:{args.rank}")
output_mode = "coco_rle" if args.convert_to_rle else "binary_mask"
amg_kwargs = get_amg_kwargs(args)
generator = SamAutomaticMaskGenerator(sam, output_mode=output_mode, **amg_kwargs)
targets=image_list
num_empty=0
empty_list=[]
for i in tqdm(range(len(targets))):
t=targets[i]
if os.path.exists(t.replace('images', 'masks').replace('.jpg','.json')):
with open(t.replace('images', 'masks').replace('.jpg','.json'), 'r') as f:
data = json.load(f)
if data==[]:
num_empty+=1
empty_list.append(t)
print(f"{t} is empty process again")
else:
print(f"Exist '{t}'...")
print(f"Processing '{t}'...")
image = cv2.imread(t)
if image is None:
print(f"Could not load '{t}' as an image, skipping...")
continue
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
masks = generator.generate(image)
name=t.split('/')[-1].split('.')[0]
save_base=os.path.join(args.output, name)
if output_mode == "binary_mask":
os.makedirs(save_base, exist_ok=False)
write_masks_to_folder(masks, save_base)
else:
save_file = save_base + ".json"
with open(save_file, "w") as f:
json.dump(masks, f)
print(f"empyt num {num_empty}")
print(empty_list)
print("Done!")
def get_image_files(folder_path):
image_files = []
for root, dirs, files in os.walk(folder_path):
for file in files:
if file.endswith('.jpg') or file.endswith('.png'):
image_files.append(os.path.join(root, file))
return image_files
if __name__ == "__main__":
args = parser.parse_args()
image_list=get_image_files(args.input)
print(f"len_image: {len(image_list)}")
main(args, image_list)
\ No newline at end of file
import torch
from PIL import Image
from lavis.models import load_model_and_preprocess
from argparse import ArgumentParser
import os
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
import torchvision.utils as vutils
import json
def save_json(json_list,save_path):
with open(save_path, 'w') as file:
json.dump(json_list, file, indent=4)
def get_image_files(folder_path):
image_files = []
for root, dirs, files in os.walk(folder_path):
for file in files:
if file.endswith('.jpg') or file.endswith('.png'):
image_files.append(os.path.join(root, file))
return image_files
def _get_args():
parser = ArgumentParser()
parser.add_argument("--image_folder", type=str, default="./images")
parser.add_argument("--output_path", type=str, default="./outputs/blip2_cap.json")
parser.add_argument("--batch_size", type=int, default=2)
parser.add_argument("--device", type=str, default="cuda:0")
args = parser.parse_args()
return args
class lazydataset(Dataset):
def __init__(self, data_path, processor) -> None:
super(lazydataset).__init__()
self.image_paths = get_image_files(data_path)
self.processor = processor
def __len__(self):
return len(self.image_paths)
def __getitem__(self, i):
image_path = self.image_paths[i]
raw_image = Image.open(image_path).convert('RGB')
image = self.processor["eval"](raw_image)
return {'image':image, 'img_id': image_path.split('/')[-1]}
def collate_fn(batch):
image = [item['image'].squeeze(0) for item in batch]
image = torch.stack(image)
img_id = [item['img_id'] for item in batch]
return {'image':image, 'img_id':img_id}
if __name__=="__main__":
json_save = []
args = _get_args()
device = args.device
model, vis_processors, _ = load_model_and_preprocess(name="blip2_opt", model_type="pretrain_opt2.7b", is_eval=True, device=device)
dataset = lazydataset(data_path=args.image_folder, processor = vis_processors)
dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=False, num_workers=0, collate_fn=collate_fn)
for batch in tqdm(dataloader):
image = batch['image'].to(device)
captions = model.generate({"image": image})
img_id = batch['img_id']
for i in range(len(img_id)):
json_save.append({'img_id':img_id[i],'blip2_caption':captions[i]})
save_json(json_save, args.output_path)
import json
import openai
from argparse import ArgumentParser
#You can replace this step with Open Source LMM
openai.api_base = ""
openai.api_key = ''
def save_json(json_list,save_path):
with open(save_path, 'w') as file:
json.dump(json_list, file, indent=4)
def _get_args():
parser = ArgumentParser()
parser.add_argument("--ann_path", type=str, default="./outputs/ann_all.json")
parser.add_argument("--output_path", type=str, default="./outputs/detailed_caption.json")
args = parser.parse_args()
return args
def get_question(ann):
sentence1 = "I want you to act as an intelligent image captioner. You should generate a descriptive, coherent and logical description of the image based on the given descriptions from different people for the same image. The position is represented by normalized top-left and bottom-right coordinates."
sentence2 = "\n Overall Image Caption: "
sentece3 = "\n Dense Caption1 (Region Description: Area Location): "
sentece4 = "\n Dense Caption2 (Region Description: Area Location): "
sentece5 = "\n Texts in the image: "
sentence6 = "\n There are some rules for your response: Provide context of the image. \n Merging the descriptions of the same object at the same position and the texts belonging to the same sentence. \n Show main objects with their attributes (e.g. position, color, shape). \n Show relative position between main objects. \n Less than 6 sentences. \n Do not show any numbers or coordinates. \n Do not describe any individual letter."
if ann['ocr']!="":
question = f"{sentence1}{sentence2}{ann['blip2cap']}{sentece3}{ann['grit']}{sentece4}{ann['sam_blip']}{sentece5}{ann['ocr']}{sentence6}"
else:
question = f"{sentence1}{sentence2}{ann['blip2cap']}{sentece3}{ann['grit']}{sentece4}{ann['sam_blip']}{sentence6}"
return question
if __name__=="__main__":
args = _get_args()
json_save=[]
with open(args.ann_path,"r") as f:
data = json.load(f)
for i in range(len(data)):
question = get_question(data[i])
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo", #gpt-4-1106-preview
messages=[
{"role": "user", "content": question},
]
)
answer = response['choices'][0]['message']['content']
json_save.append({"img_id":data[i]['img_id'],"detailed_caption":answer})
save_json(json_save, args.output_path)
\ No newline at end of file
import argparse
import multiprocessing as mp
import os
import time
import cv2
from tqdm import tqdm
import sys
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.utils.logger import setup_logger
sys.path.insert(0, 'grit/third_party/CenterNet2/projects/CenterNet2/')
sys.path.append('./grit')
from centernet.config import add_centernet_config
from grit.config import add_grit_config
from grit.predictor import VisualizationDemo, BatchVisualizationDemo
from torch.utils.data import Dataset, DataLoader
from PIL import Image
import json
def save_json(json_list,save_path):
with open(save_path, 'w') as file:
json.dump(json_list, file, indent=4)
def get_image_files(folder_path):
image_files = []
for root, dirs, files in os.walk(folder_path):
for file in files:
if file.endswith('.jpg') or file.endswith('.png'):
image_files.append(os.path.join(root, file))
return image_files
# constants
WINDOW_NAME = "GRiT"
def norm_xy(xy, width, height):
xy[0]=round(xy[0]/width,3)
xy[2]=round(xy[2]/width,3)
xy[1]=round(xy[1]/height,3)
xy[3]=round(xy[3]/height,3)
return xy
def dense_pred_to_normcaption(predictions, width, height):
boxes = predictions["instances"].pred_boxes if predictions["instances"].has("pred_boxes") else None
object_description = predictions["instances"].pred_object_descriptions.data
objects = []
for i in range(len(object_description)):
xy = [a for a in boxes[i].tensor.cpu().detach().numpy()[0]]
box = norm_xy(xy, width, height)
objects.append({"caption":object_description[i],"box":box})
return objects
def setup_cfg(args):
cfg = get_cfg()
if args.cpu:
cfg.MODEL.DEVICE="cpu"
cfg.MODEL.DEVICE=args.device
add_centernet_config(cfg)
add_grit_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# Set score_threshold for builtin models
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
if args.test_task:
cfg.MODEL.TEST_TASK = args.test_task
cfg.MODEL.BEAM_SIZE = 1
cfg.MODEL.ROI_HEADS.SOFT_NMS_ENABLED = False
cfg.USE_ACT_CHECKPOINT = False
cfg.freeze()
return cfg
def _get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--config-file",
default="./grit/configs/GRiT_B_DenseCap_ObjectDet.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--cpu", type=bool, default=False)
parser.add_argument("--device", type=str, default="cuda:0")
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.5,
help="Minimum score for instance predictions to be shown",
)
parser.add_argument(
"--test-task",
type=str,
default='DenseCap',
help="Choose a task to have GRiT perform",
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=["MODEL.WEIGHTS", "./grit/model_weight/grit_b_densecap.pth"],
nargs=argparse.REMAINDER,
)
parser.add_argument("--image_folder", type=str, default="./images")
parser.add_argument("--output_path", type=str, default="./outputs/grit.json")
parser.add_argument("--batch_size", type=int, default=2)
args = parser.parse_args()
return args
class lazydataset(Dataset):
def __init__(self, data_path) -> None:
super(lazydataset).__init__()
self.image_paths = get_image_files(data_path)
def __len__(self):
return len(self.image_paths)
def __getitem__(self, i):
image_path = self.image_paths[i]
image = read_image(image_path, format="BGR")
return {'image':image, 'img_id': image_path.split('/')[-1]}
def collate_fn(batch):
image = [item['image'] for item in batch]
img_id = [item['img_id'] for item in batch]
return {'image':image, 'img_id':img_id}
if __name__ == "__main__":
json_save=[]
args = _get_args()
cfg = setup_cfg(args)
demo = BatchVisualizationDemo(cfg)
dataset=lazydataset(args.image_folder)
dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=False, num_workers=0,collate_fn=collate_fn)
for batch in tqdm(dataloader):
predictions = demo.run_on_images(batch['image'])
for i in range(len(predictions)):
height, width = batch['image'][i].shape[0], batch['image'][i].shape[1]
objects = dense_pred_to_normcaption(predictions[i], width, height)
json_save.append({"img_id":batch['img_id'][i], "objects":objects})
save_json(json_save, args.output_path)
\ No newline at end of file
These codes come from https://github.com/JialianW/GRiT.git.
\ No newline at end of file
MODEL:
META_ARCHITECTURE: "GRiT"
MASK_ON: True
PROPOSAL_GENERATOR:
NAME: "CenterNet"
FPN:
IN_FEATURES: ["layer3", "layer4", "layer5"]
PIXEL_MEAN: [123.675, 116.280, 103.530]
PIXEL_STD: [58.395, 57.12, 57.375]
ROI_HEADS:
NAME: GRiTROIHeadsAndTextDecoder
IN_FEATURES: ["p3", "p4", "p5"]
IOU_THRESHOLDS: [0.6]
NUM_CLASSES: 1
SCORE_THRESH_TEST: 0.02
NMS_THRESH_TEST: 0.5
OBJECT_FEAT_POOLER_RES: 14
ROI_BOX_CASCADE_HEAD:
IOUS: [0.6, 0.7, 0.8]
ROI_BOX_HEAD:
NAME: "FastRCNNConvFCHead"
NUM_FC: 2
POOLER_RESOLUTION: 7
CLS_AGNOSTIC_BBOX_REG: True
MULT_PROPOSAL_SCORE: True
ROI_MASK_HEAD:
NAME: "MaskRCNNConvUpsampleHead"
NUM_CONV: 4
POOLER_RESOLUTION: 14
CLS_AGNOSTIC_MASK: True
CENTERNET:
NUM_CLASSES: 1
REG_WEIGHT: 1.
NOT_NORM_REG: True
ONLY_PROPOSAL: True
WITH_AGN_HM: True
INFERENCE_TH: 0.0001
PRE_NMS_TOPK_TRAIN: 4000
POST_NMS_TOPK_TRAIN: 2000
PRE_NMS_TOPK_TEST: 1000
POST_NMS_TOPK_TEST: 256
NMS_TH_TRAIN: 0.9
NMS_TH_TEST: 0.9
POS_WEIGHT: 0.5
NEG_WEIGHT: 0.5
IGNORE_HIGH_FP: 0.85
DATASETS:
TRAIN: ("coco_2017_train",)
TEST: ("coco_2017_val",)
DATALOADER:
SAMPLER_TRAIN: "MultiDatasetSampler"
DATASET_RATIO: [1]
DATASET_INPUT_SIZE: [1024]
DATASET_INPUT_SCALE: [[0.1, 2.0]]
FILTER_EMPTY_ANNOTATIONS: False
NUM_WORKERS: 8
TEST:
DETECTIONS_PER_IMAGE: 256
SOLVER:
LR_SCHEDULER_NAME: "WarmupCosineLR"
CHECKPOINT_PERIOD: 10000
WARMUP_ITERS: 1000
WARMUP_FACTOR: 0.001
USE_CUSTOM_SOLVER: True
OPTIMIZER: "ADAMW"
MAX_ITER: 180000
IMS_PER_BATCH: 64
BASE_LR: 0.00008
VIT_LAYER_DECAY: True
CLIP_GRADIENTS:
ENABLED: True
INPUT:
FORMAT: RGB
CUSTOM_AUG: EfficientDetResizeCrop
TRAIN_SIZE: 640
USE_ACT_CHECKPOINT: True
VERSION: 2
\ No newline at end of file
_BASE_: "Base.yaml"
MODEL:
TRAIN_TASK: ["DenseCap"]
TEST_TASK: "DenseCap"
MASK_ON: False
ROI_HEADS:
SOFT_NMS_ENABLED: False
BEAM_SIZE: 1
WEIGHTS: "detectron2://ImageNetPretrained/MAE/mae_pretrain_vit_base.pth"
BACKBONE:
NAME: build_vit_fpn_backbone
VIT_LAYERS: 12
SOLVER:
VIT_LAYER_DECAY_RATE: 0.7
DATASETS:
TRAIN: ("vg_train",)
TEST: ("vg_test",)
DATALOADER:
DATASET_BS: 2
OUTPUT_DIR: "./output/GRiT_B_DenseCap"
\ No newline at end of file
_BASE_: "Base.yaml"
MODEL:
TRAIN_TASK: ["ObjectDet", "DenseCap"]
TEST_TASK: "DenseCap" # DenseCap or ObjectDet: Choose one for testing
MASK_ON: True
ROI_HEADS:
SOFT_NMS_ENABLED: False
BEAM_SIZE: 1
WEIGHTS: "detectron2://ImageNetPretrained/MAE/mae_pretrain_vit_base.pth"
BACKBONE:
NAME: build_vit_fpn_backbone
VIT_LAYERS: 12
SOLVER:
VIT_LAYER_DECAY_RATE: 0.7
DATASETS:
TRAIN: ("GRiT_coco2017_train", "vg_train")
TEST: ("coco_2017_test-dev",)
DATALOADER:
DATASET_RATIO: [1, 1]
DATASET_BS: 2
DATASET_INPUT_SIZE: [1024, 1024]
DATASET_INPUT_SCALE: [[0.1, 2.0], [0.1, 2.0]]
OUTPUT_DIR: "./output/GRiT_B_DenseCap_ObjectDet"
\ No newline at end of file
_BASE_: "Base.yaml"
MODEL:
TRAIN_TASK: ["ObjectDet"]
TEST_TASK: "ObjectDet"
MASK_ON: True
ROI_HEADS:
SOFT_NMS_ENABLED: True
BEAM_SIZE: 3
WEIGHTS: "detectron2://ImageNetPretrained/MAE/mae_pretrain_vit_base.pth"
BACKBONE:
NAME: build_vit_fpn_backbone
VIT_LAYERS: 12
SOLVER:
VIT_LAYER_DECAY_RATE: 0.7
DATASETS:
TRAIN: ("GRiT_coco2017_train",)
TEST: ("coco_2017_val",)
DATALOADER:
DATASET_BS: 2
OUTPUT_DIR: "./output/GRiT_B_ObjectDet"
\ No newline at end of file
_BASE_: "Base.yaml"
MODEL:
TRAIN_TASK: ["ObjectDet"]
TEST_TASK: "ObjectDet"
MASK_ON: True
ROI_HEADS:
SOFT_NMS_ENABLED: True
BEAM_SIZE: 3
WEIGHTS: "detectron2://ImageNetPretrained/MAE/mae_pretrain_vit_huge_p14to16.pth"
BACKBONE:
NAME: build_vit_fpn_backbone_huge
VIT_LAYERS: 32
SOLVER:
MAX_ITER: 135000
VIT_LAYER_DECAY_RATE: 0.9
DATASETS:
TRAIN: ("GRiT_coco2017_train",)
TEST: ("coco_2017_val",)
DATALOADER:
DATASET_BS: 1
OUTPUT_DIR: "./output/GRiT_H_ObjectDet"
\ No newline at end of file
_BASE_: "Base.yaml"
MODEL:
TRAIN_TASK: ["ObjectDet"]
TEST_TASK: "ObjectDet"
MASK_ON: True
ROI_HEADS:
SOFT_NMS_ENABLED: True
BEAM_SIZE: 3
WEIGHTS: "detectron2://ImageNetPretrained/MAE/mae_pretrain_vit_large.pth"
BACKBONE:
NAME: build_vit_fpn_backbone_large
VIT_LAYERS: 24
SOLVER:
VIT_LAYER_DECAY_RATE: 0.8
DATASETS:
TRAIN: ("GRiT_coco2017_train",)
TEST: ("coco_2017_val",)
DATALOADER:
DATASET_BS: 1
OUTPUT_DIR: "./output/GRiT_L_ObjectDet"
\ No newline at end of file
from .modeling.meta_arch import grit
from .modeling.roi_heads import grit_roi_heads
from .modeling.backbone import vit
from .data.datasets import object365
from .data.datasets import vg
from .data.datasets import grit_coco
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment