Commit 20e33356 authored by luopl's avatar luopl
Browse files

init

parents
Pipeline #1587 canceled with stages
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import os
import os.path as osp
import shutil
import subprocess
import time
from collections import OrderedDict
import torch
import yaml
from mmengine.config import Config
from mmengine.fileio import dump
from mmengine.utils import digit_version, mkdir_or_exist, scandir
def ordered_yaml_dump(data, stream=None, Dumper=yaml.SafeDumper, **kwds):
class OrderedDumper(Dumper):
pass
def _dict_representer(dumper, data):
return dumper.represent_mapping(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items())
OrderedDumper.add_representer(OrderedDict, _dict_representer)
return yaml.dump(data, stream, OrderedDumper, **kwds)
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
if 'ema_state_dict' in checkpoint:
del checkpoint['ema_state_dict']
# remove ema state_dict
for key in list(checkpoint['state_dict']):
if key.startswith('ema_'):
checkpoint['state_dict'].pop(key)
elif key.startswith('data_preprocessor'):
checkpoint['state_dict'].pop(key)
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
if digit_version(torch.__version__) >= digit_version('1.6'):
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8])
subprocess.Popen(['mv', out_file, final_file])
return final_file
def is_by_epoch(config):
cfg = Config.fromfile('./configs/' + config)
return cfg.train_cfg.type == 'EpochBasedTrainLoop'
def get_final_epoch_or_iter(config):
cfg = Config.fromfile('./configs/' + config)
if cfg.train_cfg.type == 'EpochBasedTrainLoop':
return cfg.train_cfg.max_epochs
else:
return cfg.train_cfg.max_iters
def get_best_epoch_or_iter(exp_dir):
best_epoch_iter_full_path = list(
sorted(glob.glob(osp.join(exp_dir, 'best_*.pth'))))[-1]
best_epoch_or_iter_model_path = best_epoch_iter_full_path.split('/')[-1]
best_epoch_or_iter = best_epoch_or_iter_model_path.\
split('_')[-1].split('.')[0]
return best_epoch_or_iter_model_path, int(best_epoch_or_iter)
def get_real_epoch_or_iter(config):
cfg = Config.fromfile('./configs/' + config)
if cfg.train_cfg.type == 'EpochBasedTrainLoop':
epoch = cfg.train_cfg.max_epochs
return epoch
else:
return cfg.train_cfg.max_iters
def get_final_results(log_json_path,
epoch_or_iter,
results_lut='coco/bbox_mAP',
by_epoch=True):
result_dict = dict()
with open(log_json_path) as f:
r = f.readlines()[-1]
last_metric = r.split(',')[0].split(': ')[-1].strip()
result_dict[results_lut] = last_metric
return result_dict
def get_dataset_name(config):
# If there are more dataset, add here.
name_map = dict(
CityscapesDataset='Cityscapes',
CocoDataset='COCO',
CocoPanopticDataset='COCO',
DeepFashionDataset='Deep Fashion',
LVISV05Dataset='LVIS v0.5',
LVISV1Dataset='LVIS v1',
VOCDataset='Pascal VOC',
WIDERFaceDataset='WIDER Face',
OpenImagesDataset='OpenImagesDataset',
OpenImagesChallengeDataset='OpenImagesChallengeDataset',
Objects365V1Dataset='Objects365 v1',
Objects365V2Dataset='Objects365 v2')
cfg = Config.fromfile('./configs/' + config)
return name_map[cfg.dataset_type]
def find_last_dir(model_dir):
dst_times = []
for time_stamp in os.scandir(model_dir):
if osp.isdir(time_stamp):
dst_time = time.mktime(
time.strptime(time_stamp.name, '%Y%m%d_%H%M%S'))
dst_times.append([dst_time, time_stamp.name])
return max(dst_times, key=lambda x: x[0])[1]
def convert_model_info_to_pwc(model_infos):
pwc_files = {}
for model in model_infos:
cfg_folder_name = osp.split(model['config'])[-2]
pwc_model_info = OrderedDict()
pwc_model_info['Name'] = osp.split(model['config'])[-1].split('.')[0]
pwc_model_info['In Collection'] = 'Please fill in Collection name'
pwc_model_info['Config'] = osp.join('configs', model['config'])
# get metadata
meta_data = OrderedDict()
if 'epochs' in model:
meta_data['Epochs'] = get_real_epoch_or_iter(model['config'])
else:
meta_data['Iterations'] = get_real_epoch_or_iter(model['config'])
pwc_model_info['Metadata'] = meta_data
# get dataset name
dataset_name = get_dataset_name(model['config'])
# get results
results = []
# if there are more metrics, add here.
if 'bbox_mAP' in model['results']:
metric = round(model['results']['bbox_mAP'] * 100, 1)
results.append(
OrderedDict(
Task='Object Detection',
Dataset=dataset_name,
Metrics={'box AP': metric}))
if 'segm_mAP' in model['results']:
metric = round(model['results']['segm_mAP'] * 100, 1)
results.append(
OrderedDict(
Task='Instance Segmentation',
Dataset=dataset_name,
Metrics={'mask AP': metric}))
if 'PQ' in model['results']:
metric = round(model['results']['PQ'], 1)
results.append(
OrderedDict(
Task='Panoptic Segmentation',
Dataset=dataset_name,
Metrics={'PQ': metric}))
pwc_model_info['Results'] = results
link_string = 'https://download.openmmlab.com/mmdetection/v3.0/'
link_string += '{}/{}'.format(model['config'].rstrip('.py'),
osp.split(model['model_path'])[-1])
pwc_model_info['Weights'] = link_string
if cfg_folder_name in pwc_files:
pwc_files[cfg_folder_name].append(pwc_model_info)
else:
pwc_files[cfg_folder_name] = [pwc_model_info]
return pwc_files
def parse_args():
parser = argparse.ArgumentParser(description='Gather benchmarked models')
parser.add_argument(
'root',
type=str,
default='work_dirs',
help='root path of benchmarked models to be gathered')
parser.add_argument(
'--out',
type=str,
default='gather',
help='output path of gathered models to be stored')
parser.add_argument(
'--best',
action='store_true',
help='whether to gather the best model.')
args = parser.parse_args()
return args
def main():
args = parse_args()
models_root = args.root
models_out = args.out
mkdir_or_exist(models_out)
# find all models in the root directory to be gathered
raw_configs = list(scandir('./configs', '.py', recursive=True))
# filter configs that is not trained in the experiments dir
used_configs = []
for raw_config in raw_configs:
if osp.exists(osp.join(models_root, raw_config)):
used_configs.append(raw_config)
print(f'Find {len(used_configs)} models to be gathered')
# find final_ckpt and log file for trained each config
# and parse the best performance
model_infos = []
for used_config in used_configs:
exp_dir = osp.join(models_root, used_config)
by_epoch = is_by_epoch(used_config)
# check whether the exps is finished
if args.best is True:
final_model, final_epoch_or_iter = get_best_epoch_or_iter(exp_dir)
else:
final_epoch_or_iter = get_final_epoch_or_iter(used_config)
final_model = '{}_{}.pth'.format('epoch' if by_epoch else 'iter',
final_epoch_or_iter)
model_path = osp.join(exp_dir, final_model)
# skip if the model is still training
if not osp.exists(model_path):
continue
# get the latest logs
latest_exp_name = find_last_dir(exp_dir)
latest_exp_json = osp.join(exp_dir, latest_exp_name, 'vis_data',
latest_exp_name + '.json')
model_performance = get_final_results(
latest_exp_json, final_epoch_or_iter, by_epoch=by_epoch)
if model_performance is None:
continue
model_info = dict(
config=used_config,
results=model_performance,
final_model=final_model,
latest_exp_json=latest_exp_json,
latest_exp_name=latest_exp_name)
model_info['epochs' if by_epoch else 'iterations'] =\
final_epoch_or_iter
model_infos.append(model_info)
# publish model for each checkpoint
publish_model_infos = []
for model in model_infos:
model_publish_dir = osp.join(models_out, model['config'].rstrip('.py'))
mkdir_or_exist(model_publish_dir)
model_name = osp.split(model['config'])[-1].split('.')[0]
model_name += '_' + model['latest_exp_name']
publish_model_path = osp.join(model_publish_dir, model_name)
trained_model_path = osp.join(models_root, model['config'],
model['final_model'])
# convert model
final_model_path = process_checkpoint(trained_model_path,
publish_model_path)
# copy log
shutil.copy(model['latest_exp_json'],
osp.join(model_publish_dir, f'{model_name}.log.json'))
# copy config to guarantee reproducibility
config_path = model['config']
config_path = osp.join(
'configs',
config_path) if 'configs' not in config_path else config_path
target_config_path = osp.split(config_path)[-1]
shutil.copy(config_path, osp.join(model_publish_dir,
target_config_path))
model['model_path'] = final_model_path
publish_model_infos.append(model)
models = dict(models=publish_model_infos)
print(f'Totally gathered {len(publish_model_infos)} models')
dump(models, osp.join(models_out, 'model_info.json'))
pwc_files = convert_model_info_to_pwc(publish_model_infos)
for name in pwc_files:
with open(osp.join(models_out, name + '_metafile.yml'), 'w') as f:
ordered_yaml_dump(pwc_files[name], f, encoding='utf-8')
if __name__ == '__main__':
main()
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import os.path as osp
from mmengine.config import Config
from mmengine.fileio import dump, load
from mmengine.utils import mkdir_or_exist
def parse_args():
parser = argparse.ArgumentParser(
description='Gather benchmarked models metric')
parser.add_argument('config', help='test config file path')
parser.add_argument(
'root',
type=str,
help='root path of benchmarked models to be gathered')
parser.add_argument(
'--out', type=str, help='output path of gathered metrics to be stored')
parser.add_argument(
'--not-show', action='store_true', help='not show metrics')
parser.add_argument(
'--show-all', action='store_true', help='show all model metrics')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
root_path = args.root
metrics_out = args.out
result_dict = {}
cfg = Config.fromfile(args.config)
for model_key in cfg:
model_infos = cfg[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
record_metrics = model_info['metric']
config = model_info['config'].strip()
fname, _ = osp.splitext(osp.basename(config))
metric_json_dir = osp.join(root_path, fname)
if osp.exists(metric_json_dir):
json_list = glob.glob(osp.join(metric_json_dir, '*.json'))
if len(json_list) > 0:
log_json_path = list(sorted(json_list))[-1]
metric = load(log_json_path)
if config in metric.get('config', {}):
new_metrics = dict()
for record_metric_key in record_metrics:
record_metric_key_bk = record_metric_key
old_metric = record_metrics[record_metric_key]
if record_metric_key == 'AR_1000':
record_metric_key = 'AR@1000'
if record_metric_key not in metric['metric']:
raise KeyError(
'record_metric_key not exist, please '
'check your config')
new_metric = round(
metric['metric'][record_metric_key] * 100, 1)
new_metrics[record_metric_key_bk] = new_metric
if args.show_all:
result_dict[config] = dict(
before=record_metrics, after=new_metrics)
else:
for record_metric_key in record_metrics:
old_metric = record_metrics[record_metric_key]
new_metric = new_metrics[record_metric_key]
if old_metric != new_metric:
result_dict[config] = dict(
before=record_metrics,
after=new_metrics)
break
else:
print(f'{config} not included in: {log_json_path}')
else:
print(f'{config} not exist file: {metric_json_dir}')
else:
print(f'{config} not exist dir: {metric_json_dir}')
if metrics_out:
mkdir_or_exist(metrics_out)
dump(result_dict, osp.join(metrics_out, 'batch_test_metric_info.json'))
if not args.not_show:
print('===================================')
for config_name, metrics in result_dict.items():
print(config_name, metrics)
print('===================================')
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import os.path as osp
from gather_models import get_final_results
from mmengine.config import Config
from mmengine.fileio import dump
from mmengine.utils import mkdir_or_exist
try:
import xlrd
except ImportError:
xlrd = None
try:
import xlutils
from xlutils.copy import copy
except ImportError:
xlutils = None
def parse_args():
parser = argparse.ArgumentParser(
description='Gather benchmarked models metric')
parser.add_argument(
'root',
type=str,
help='root path of benchmarked models to be gathered')
parser.add_argument(
'txt_path', type=str, help='txt path output by benchmark_filter')
parser.add_argument(
'--out', type=str, help='output path of gathered metrics to be stored')
parser.add_argument(
'--not-show', action='store_true', help='not show metrics')
parser.add_argument(
'--excel', type=str, help='input path of excel to be recorded')
parser.add_argument(
'--ncol', type=int, help='Number of column to be modified or appended')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if args.excel:
assert args.ncol, 'Please specify "--excel" and "--ncol" ' \
'at the same time'
if xlrd is None:
raise RuntimeError(
'xlrd is not installed,'
'Please use “pip install xlrd==1.2.0” to install')
if xlutils is None:
raise RuntimeError(
'xlutils is not installed,'
'Please use “pip install xlutils==2.0.0” to install')
readbook = xlrd.open_workbook(args.excel)
sheet = readbook.sheet_by_name('Sheet1')
sheet_info = {}
total_nrows = sheet.nrows
for i in range(3, sheet.nrows):
sheet_info[sheet.row_values(i)[0]] = i
xlrw = copy(readbook)
table = xlrw.get_sheet(0)
root_path = args.root
metrics_out = args.out
result_dict = {}
with open(args.txt_path, 'r') as f:
model_cfgs = f.readlines()
for i, config in enumerate(model_cfgs):
config = config.strip()
if len(config) == 0:
continue
config_name = osp.split(config)[-1]
config_name = osp.splitext(config_name)[0]
result_path = osp.join(root_path, config_name)
if osp.exists(result_path):
# 1 read config
cfg = Config.fromfile(config)
total_epochs = cfg.runner.max_epochs
final_results = cfg.evaluation.metric
if not isinstance(final_results, list):
final_results = [final_results]
final_results_out = []
for key in final_results:
if 'proposal_fast' in key:
final_results_out.append('AR@1000') # RPN
elif 'mAP' not in key:
final_results_out.append(key + '_mAP')
# 2 determine whether total_epochs ckpt exists
ckpt_path = f'epoch_{total_epochs}.pth'
if osp.exists(osp.join(result_path, ckpt_path)):
log_json_path = list(
sorted(glob.glob(osp.join(result_path,
'*.log.json'))))[-1]
# 3 read metric
model_performance = get_final_results(
log_json_path, total_epochs, final_results_out)
if model_performance is None:
print(f'log file error: {log_json_path}')
continue
for performance in model_performance:
if performance in ['AR@1000', 'bbox_mAP', 'segm_mAP']:
metric = round(
model_performance[performance] * 100, 1)
model_performance[performance] = metric
result_dict[config] = model_performance
# update and append excel content
if args.excel:
if 'AR@1000' in model_performance:
metrics = f'{model_performance["AR@1000"]}' \
f'(AR@1000)'
elif 'segm_mAP' in model_performance:
metrics = f'{model_performance["bbox_mAP"]}/' \
f'{model_performance["segm_mAP"]}'
else:
metrics = f'{model_performance["bbox_mAP"]}'
row_num = sheet_info.get(config, None)
if row_num:
table.write(row_num, args.ncol, metrics)
else:
table.write(total_nrows, 0, config)
table.write(total_nrows, args.ncol, metrics)
total_nrows += 1
else:
print(f'{config} not exist: {ckpt_path}')
else:
print(f'not exist: {config}')
# 4 save or print results
if metrics_out:
mkdir_or_exist(metrics_out)
dump(result_dict, osp.join(metrics_out, 'model_metric_info.json'))
if not args.not_show:
print('===================================')
for config_name, metrics in result_dict.items():
print(config_name, metrics)
print('===================================')
if args.excel:
filename, sufflx = osp.splitext(args.excel)
xlrw.save(f'{filename}_o{sufflx}')
print(f'>>> Output {filename}_o{sufflx}')
yapf -r -i mmdet/ configs/ tests/ tools/
isort -rc mmdet/ configs/ tests/ tools/
flake8 .
This diff is collapsed.
# Copyright (c) OpenMMLab. All rights reserved.
"""Check out backbone whether successfully load pretrained checkpoint."""
import copy
import os
from os.path import dirname, exists, join
import pytest
from mmengine.config import Config
from mmengine.runner import CheckpointLoader
from mmengine.utils import ProgressBar
from mmdet.registry import MODELS
def _get_config_directory():
"""Find the predefined detector config directory."""
try:
# Assume we are running in the source mmdetection repo
repo_dpath = dirname(dirname(__file__))
except NameError:
# For IPython development when this __file__ is not defined
import mmdet
repo_dpath = dirname(dirname(mmdet.__file__))
config_dpath = join(repo_dpath, 'configs')
if not exists(config_dpath):
raise Exception('Cannot find config path')
return config_dpath
def _get_config_module(fname):
"""Load a configuration as a python module."""
config_dpath = _get_config_directory()
config_fpath = join(config_dpath, fname)
config_mod = Config.fromfile(config_fpath)
return config_mod
def _get_detector_cfg(fname):
"""Grab configs necessary to create a detector.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
config = _get_config_module(fname)
model = copy.deepcopy(config.model)
return model
def _traversed_config_file():
"""We traversed all potential config files under the `config` file. If you
need to print details or debug code, you can use this function.
If the `backbone.init_cfg` is None (do not use `Pretrained` init way), you
need add the folder name in `ignores_folder` (if the config files in this
folder all set backbone.init_cfg is None) or add config name in
`ignores_file` (if the config file set backbone.init_cfg is None)
"""
config_path = _get_config_directory()
check_cfg_names = []
# `base`, `legacy_1.x` and `common` ignored by default.
ignores_folder = ['_base_', 'legacy_1.x', 'common']
# 'ld' need load teacher model, if want to check 'ld',
# please check teacher_config path first.
ignores_folder += ['ld']
# `selfsup_pretrain` need convert model, if want to check this model,
# need to convert the model first.
ignores_folder += ['selfsup_pretrain']
# the `init_cfg` in 'centripetalnet', 'cornernet', 'cityscapes',
# 'scratch' is None.
# the `init_cfg` in ssdlite(`ssdlite_mobilenetv2_scratch_600e_coco.py`)
# is None
# Please confirm `bockbone.init_cfg` is None first.
ignores_folder += ['centripetalnet', 'cornernet', 'cityscapes', 'scratch']
ignores_file = ['ssdlite_mobilenetv2_scratch_600e_coco.py']
for config_file_name in os.listdir(config_path):
if config_file_name not in ignores_folder:
config_file = join(config_path, config_file_name)
if os.path.isdir(config_file):
for config_sub_file in os.listdir(config_file):
if config_sub_file.endswith('py') and \
config_sub_file not in ignores_file:
name = join(config_file, config_sub_file)
check_cfg_names.append(name)
return check_cfg_names
def _check_backbone(config, print_cfg=True):
"""Check out backbone whether successfully load pretrained model, by using
`backbone.init_cfg`.
First, using `CheckpointLoader.load_checkpoint` to load the checkpoint
without loading models.
Then, using `MODELS.build` to build models, and using
`model.init_weights()` to initialize the parameters.
Finally, assert weights and bias of each layer loaded from pretrained
checkpoint are equal to the weights and bias of original checkpoint.
For the convenience of comparison, we sum up weights and bias of
each loaded layer separately.
Args:
config (str): Config file path.
print_cfg (bool): Whether print logger and return the result.
Returns:
results (str or None): If backbone successfully load pretrained
checkpoint, return None; else, return config file path.
"""
if print_cfg:
print('-' * 15 + 'loading ', config)
cfg = Config.fromfile(config)
init_cfg = None
try:
init_cfg = cfg.model.backbone.init_cfg
init_flag = True
except AttributeError:
init_flag = False
if init_cfg is None or init_cfg.get('type') != 'Pretrained':
init_flag = False
if init_flag:
checkpoint = CheckpointLoader.load_checkpoint(init_cfg.checkpoint)
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
model = MODELS.build(cfg.model)
model.init_weights()
checkpoint_layers = state_dict.keys()
for name, value in model.backbone.state_dict().items():
if name in checkpoint_layers:
assert value.equal(state_dict[name])
if print_cfg:
print('-' * 10 + 'Successfully load checkpoint' + '-' * 10 +
'\n', )
return None
else:
if print_cfg:
print(config + '\n' + '-' * 10 +
'config file do not have init_cfg' + '-' * 10 + '\n')
return config
@pytest.mark.parametrize('config', _traversed_config_file())
def test_load_pretrained(config):
"""Check out backbone whether successfully load pretrained model by using
`backbone.init_cfg`.
Details please refer to `_check_backbone`
"""
_check_backbone(config, print_cfg=False)
def _test_load_pretrained():
"""We traversed all potential config files under the `config` file. If you
need to print details or debug code, you can use this function.
Returns:
check_cfg_names (list[str]): Config files that backbone initialized
from pretrained checkpoint might be problematic. Need to recheck
the config file. The output including the config files that the
backbone.init_cfg is None
"""
check_cfg_names = _traversed_config_file()
need_check_cfg = []
prog_bar = ProgressBar(len(check_cfg_names))
for config in check_cfg_names:
init_cfg_name = _check_backbone(config)
if init_cfg_name is not None:
need_check_cfg.append(init_cfg_name)
prog_bar.update()
print('These config files need to be checked again')
print(need_check_cfg)
This diff is collapsed.
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, sex characteristics, gender identity and expression,
level of experience, education, socio-economic status, nationality, personal
appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
- Using welcoming and inclusive language
- Being respectful of differing viewpoints and experiences
- Gracefully accepting constructive criticism
- Focusing on what is best for the community
- Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
- The use of sexualized language or imagery and unwelcome sexual attention or
advances
- Trolling, insulting/derogatory comments, and personal or political attacks
- Public or private harassment
- Publishing others' private information, such as a physical or electronic
address, without explicit permission
- Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at chenkaidev@gmail.com. All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
For answers to common questions about this code of conduct, see
https://www.contributor-covenant.org/faq
[homepage]: https://www.contributor-covenant.org
We appreciate all contributions to improve MMDetection. Please refer to [CONTRIBUTING.md](https://github.com/open-mmlab/mmcv/blob/master/CONTRIBUTING.md) in MMCV for more details about the contributing guideline.
blank_issues_enabled: false
contact_links:
- name: Common Issues
url: https://mmdetection.readthedocs.io/en/latest/faq.html
about: Check if your issue already has solutions
- name: MMDetection Documentation
url: https://mmdetection.readthedocs.io/en/latest/
about: Check if your question is answered in docs
---
name: Error report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
Thanks for your error report and we appreciate it a lot.
**Checklist**
1. I have searched related issues but cannot get the expected help.
2. I have read the [FAQ documentation](https://mmdetection.readthedocs.io/en/latest/faq.html) but cannot get the expected help.
3. The bug has not been fixed in the latest version.
**Describe the bug**
A clear and concise description of what the bug is.
**Reproduction**
1. What command or script did you run?
```none
A placeholder for the command.
```
2. Did you make any modifications on the code or config? Did you understand what you have modified?
3. What dataset did you use?
**Environment**
1. Please run `python mmdet/utils/collect_env.py` to collect necessary environment information and paste it here.
2. You may add addition that may be helpful for locating the problem, such as
- How you installed PyTorch \[e.g., pip, conda, source\]
- Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.)
**Error traceback**
If applicable, paste the error trackback here.
```none
A placeholder for trackback.
```
**Bug fix**
If you have already identified the reason, you can provide the information here. If you are willing to create a PR to fix it, please also leave a comment here and that would be much appreciated!
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
**Describe the feature**
**Motivation**
A clear and concise description of the motivation of the feature.
Ex1. It is inconvenient when \[....\].
Ex2. There is a recent paper \[....\], which is very helpful for \[....\].
**Related resources**
If there is an official code release or third-party implementations, please also provide the information here, which would be very helpful.
**Additional context**
Add any other context or screenshots about the feature request here.
If you would like to implement the feature and create a PR, please leave a comment here and that would be much appreciated.
---
name: General questions
about: Ask general questions to get help
title: ''
labels: ''
assignees: ''
---
---
name: Reimplementation Questions
about: Ask about questions during model reimplementation
title: ''
labels: reimplementation
assignees: ''
---
**Notice**
There are several common situations in the reimplementation issues as below
1. Reimplement a model in the model zoo using the provided configs
2. Reimplement a model in the model zoo on other dataset (e.g., custom datasets)
3. Reimplement a custom model but all the components are implemented in MMDetection
4. Reimplement a custom model with new modules implemented by yourself
There are several things to do for different cases as below.
- For case 1 & 3, please follow the steps in the following sections thus we could help to quick identify the issue.
- For case 2 & 4, please understand that we are not able to do much help here because we usually do not know the full code and the users should be responsible to the code they write.
- One suggestion for case 2 & 4 is that the users should first check whether the bug lies in the self-implemented code or the original code. For example, users can first make sure that the same model runs well on supported datasets. If you still need help, please describe what you have done and what you obtain in the issue, and follow the steps in the following sections and try as clear as possible so that we can better help you.
**Checklist**
1. I have searched related issues but cannot get the expected help.
2. The issue has not been fixed in the latest version.
**Describe the issue**
A clear and concise description of what the problem you meet and what have you done.
**Reproduction**
1. What command or script did you run?
```none
A placeholder for the command.
```
2. What config dir you run?
```none
A placeholder for the config.
```
3. Did you make any modifications on the code or config? Did you understand what you have modified?
4. What dataset did you use?
**Environment**
1. Please run `python mmdet/utils/collect_env.py` to collect necessary environment information and paste it here.
2. You may add addition that may be helpful for locating the problem, such as
1. How you installed PyTorch \[e.g., pip, conda, source\]
2. Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.)
**Results**
If applicable, paste the related results here, e.g., what you expect and what you get.
```none
A placeholder for results comparison
```
**Issue fix**
If you have already identified the reason, you can provide the information here. If you are willing to create a PR to fix it, please also leave a comment here and that would be much appreciated!
Thanks for your contribution and we appreciate it a lot. The following instructions would make your pull request more healthy and more easily get feedback. If you do not understand some items, don't worry, just make the pull request and seek help from maintainers.
## Motivation
Please describe the motivation of this PR and the goal you want to achieve through this PR.
## Modification
Please briefly describe what modification is made in this PR.
## BC-breaking (Optional)
Does the modification introduce changes that break the backward-compatibility of the downstream repos?
If so, please describe how it breaks the compatibility and how the downstream projects should modify their code to keep compatibility with this PR.
## Use cases (Optional)
If this PR introduces a new feature, it is better to list some use cases here, and update the documentation.
## Checklist
1. Pre-commit or other linting tools are used to fix the potential lint issues.
2. The modification is covered by complete unit tests. If not, please add more unit test to ensure the correctness.
3. If the modification has potential influence on downstream projects, this PR should be tested with downstream projects, like MMDet or MMPreTrain.
4. The documentation has been modified accordingly, like docstring or example tutorials.
name: deploy
on: push
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build-n-publish:
runs-on: ubuntu-latest
if: startsWith(github.event.ref, 'refs/tags')
steps:
- uses: actions/checkout@v2
- name: Set up Python 3.7
uses: actions/setup-python@v2
with:
python-version: 3.7
- name: Install torch
run: pip install torch
- name: Install wheel
run: pip install wheel
- name: Build MMDetection
run: python setup.py sdist bdist_wheel
- name: Publish distribution to PyPI
run: |
pip install twine
twine upload dist/* -u __token__ -p ${{ secrets.pypi_password }}
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/en/_build/
docs/zh_cn/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
data/
data
.vscode
.idea
.DS_Store
# custom
*.pkl
*.pkl.json
*.log.json
docs/modelzoo_statistics.md
mmdet/.mim
work_dirs/
# Pytorch
*.pth
*.py~
*.sh~
assign:
strategy:
# random
daily-shift-based
scedule:
'*/1 * * * *'
assignees:
- Czm369
- hhaAndroid
- jbwang1997
- RangiLyu
- BIGWangYuDong
- chhluo
- ZwwWayne
exclude: ^tests/data/
repos:
- repo: https://gitee.com/openmmlab/mirrors-flake8
rev: 5.0.4
hooks:
- id: flake8
- repo: https://gitee.com/openmmlab/mirrors-isort
rev: 5.11.5
hooks:
- id: isort
- repo: https://gitee.com/openmmlab/mirrors-yapf
rev: v0.32.0
hooks:
- id: yapf
- repo: https://gitee.com/openmmlab/mirrors-pre-commit-hooks
rev: v4.3.0
hooks:
- id: trailing-whitespace
- id: check-yaml
- id: end-of-file-fixer
- id: requirements-txt-fixer
- id: double-quote-string-fixer
- id: check-merge-conflict
- id: fix-encoding-pragma
args: ["--remove"]
- id: mixed-line-ending
args: ["--fix=lf"]
- repo: https://gitee.com/openmmlab/mirrors-mdformat
rev: 0.7.9
hooks:
- id: mdformat
args: ["--number"]
additional_dependencies:
- mdformat-openmmlab
- mdformat_frontmatter
- linkify-it-py
- repo: https://gitee.com/openmmlab/mirrors-codespell
rev: v2.2.1
hooks:
- id: codespell
- repo: https://gitee.com/openmmlab/mirrors-docformatter
rev: v1.3.1
hooks:
- id: docformatter
args: ["--in-place", "--wrap-descriptions", "79"]
- repo: https://gitee.com/openmmlab/mirrors-pyupgrade
rev: v3.0.0
hooks:
- id: pyupgrade
args: ["--py36-plus"]
- repo: https://gitee.com/open-mmlab/pre-commit-hooks
rev: v0.2.0
hooks:
- id: check-algo-readme
- id: check-copyright
args: ["mmdet"]
# - repo: https://gitee.com/openmmlab/mirrors-mypy
# rev: v0.812
# hooks:
# - id: mypy
# exclude: "docs"
repos:
- repo: https://github.com/PyCQA/flake8
rev: 5.0.4
hooks:
- id: flake8
- repo: https://github.com/PyCQA/isort
rev: 5.11.5
hooks:
- id: isort
- repo: https://github.com/pre-commit/mirrors-yapf
rev: v0.32.0
hooks:
- id: yapf
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.3.0
hooks:
- id: trailing-whitespace
- id: check-yaml
- id: end-of-file-fixer
- id: requirements-txt-fixer
- id: double-quote-string-fixer
- id: check-merge-conflict
- id: fix-encoding-pragma
args: ["--remove"]
- id: mixed-line-ending
args: ["--fix=lf"]
- repo: https://github.com/codespell-project/codespell
rev: v2.2.1
hooks:
- id: codespell
- repo: https://github.com/executablebooks/mdformat
rev: 0.7.9
hooks:
- id: mdformat
args: ["--number"]
additional_dependencies:
- mdformat-openmmlab
- mdformat_frontmatter
- linkify-it-py
- repo: https://github.com/myint/docformatter
rev: v1.3.1
hooks:
- id: docformatter
args: ["--in-place", "--wrap-descriptions", "79"]
- repo: https://github.com/open-mmlab/pre-commit-hooks
rev: v0.2.0 # Use the ref you want to point at
hooks:
- id: check-algo-readme
- id: check-copyright
args: ["mmdet"] # replace the dir_to_check with your expected directory to check
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment