Commit 49a371b2 authored by chenych's avatar chenych
Browse files

update README

parent 69f4ba48
*.mdb
*.pth
*.tar
*.ipynb
*.zip
......@@ -198,7 +197,6 @@ $RECYCLE.BIN/
# Windows shortcuts
*.lnk
.idea/
.vscode/
output/
......
......@@ -7,9 +7,11 @@ CenterFace是一种人脸检测算法,采用了轻量级网络mobileNetV2作
<div align=center>
<img src="./Architecture of the CenterFace.png"/>
</div>
## 算法原理
CenterFace模型是一种基于单阶段人脸检测算法,作者借鉴了CenterNet的思想,将人脸检测转换为标准点问题,根据人脸中心点来回归人脸框的大小和五个标志点。
## 环境配置
### Docker(方法一)
......@@ -140,6 +142,8 @@ python gen_data.py
## 训练
默认训练模型保存在./exp/下,如需修改为自己的路径,可以对centerface_pytorch/src/lib/opts_pose.py的284行进行修改
### 单机单卡
```
......@@ -158,7 +162,9 @@ bash train_multi.sh
#### 单卡推理
```
cd ./src
cd lib/external/
bash make.sh
cd ../../
python test_wider_face.py
```
......
......@@ -5,13 +5,15 @@ from __future__ import print_function
import torch
import torch.nn as nn
def _sigmoid(x):
y = torch.clamp(x.sigmoid_(), min=1e-4, max=1-1e-4)
return y
y = torch.clamp(x.sigmoid_(), min=1e-4, max=1-1e-4)
return y
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
......@@ -19,32 +21,36 @@ def _gather_feat(feat, ind, mask=None):
feat = feat.view(-1, dim)
return feat
def _tranpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3))
feat = _gather_feat(feat, ind)
return feat
def flip_tensor(x):
return torch.flip(x, [3])
# tmp = x.detach().cpu().numpy()[..., ::-1].copy()
# return torch.from_numpy(tmp).to(x.device)
def flip_lr(x, flip_idx):
tmp = x.detach().cpu().numpy()[..., ::-1].copy()
shape = tmp.shape
for e in flip_idx:
tmp[:, e[0], ...], tmp[:, e[1], ...] = \
tmp[:, e[1], ...].copy(), tmp[:, e[0], ...].copy()
return torch.from_numpy(tmp.reshape(shape)).to(x.device)
tmp = x.detach().cpu().numpy()[..., ::-1].copy()
shape = tmp.shape
for e in flip_idx:
tmp[:, e[0], ...], tmp[:, e[1], ...] = \
tmp[:, e[1], ...].copy(), tmp[:, e[0], ...].copy()
return torch.from_numpy(tmp.reshape(shape)).to(x.device)
def flip_lr_off(x, flip_idx):
tmp = x.detach().cpu().numpy()[..., ::-1].copy()
shape = tmp.shape
tmp = tmp.reshape(tmp.shape[0], 17, 2,
tmp.shape[2], tmp.shape[3])
tmp[:, :, 0, :, :] *= -1
for e in flip_idx:
tmp[:, e[0], ...], tmp[:, e[1], ...] = \
tmp[:, e[1], ...].copy(), tmp[:, e[0], ...].copy()
return torch.from_numpy(tmp.reshape(shape)).to(x.device)
\ No newline at end of file
tmp = x.detach().cpu().numpy()[..., ::-1].copy()
shape = tmp.shape
tmp = tmp.reshape(tmp.shape[0], 17, 2,
tmp.shape[2], tmp.shape[3])
tmp[:, :, 0, :, :] *= -1
for e in flip_idx:
tmp[:, e[0], ...], tmp[:, e[1], ...] = \
tmp[:, e[1], ...].copy(), tmp[:, e[0], ...].copy()
return torch.from_numpy(tmp.reshape(shape)).to(x.device)
......@@ -98,7 +98,7 @@ class opts(object):
help='total training epochs.')
self.parser.add_argument('--batch_size', type=int, default=32,
help='batch size')
self.parser.add_argument('--master_batch_size', type=int, default=15,
self.parser.add_argument('--master_batch_size', type=int, default=-1,
help='batch size on the master gpu.')
self.parser.add_argument('--num_iters', type=int, default=-1,
help='default: #samples / batch_size.')
......
......@@ -2,15 +2,15 @@ import os
import sys
import cv2
path = os.path.dirname(__file__)
CENTERNET_PATH = os.path.join(path, '../src/lib')
sys.path.insert(0, CENTERNET_PATH)
from opts_pose import opts
from detectors.detector_factory import detector_factory
import scipy.io as sio
path = os.path.dirname(__file__)
CENTERNET_PATH = os.path.join(path, '../src/lib')
sys.path.insert(0, CENTERNET_PATH)
def test_img(model_path, debug, threshold=0.4):
TASK = 'multi_pose'
......@@ -61,7 +61,7 @@ def test_vedio(model_path, debug, vedio_path=None):
def test_wider_Face(model_path, debug, threshold=0.05):
from progress.bar import Bar
Path = '/your/path/WIDER_val/images' # WIDER_val/images path
Path = '../datasets/images/val' # WIDER_val/images path
wider_face_mat = sio.loadmat('../evaluate/ground_truth/wider_face_val.mat')
event_list = wider_face_mat['event_list']
file_list = wider_face_mat['file_list']
......
#!/usr/bin/env bash
python main.py --input_h 512 --input_w 512 --batch_size 8 --lr 5e-4 --data_dir ../datasets/
\ No newline at end of file
python main.py --input_h 640 --input_w 640 --batch_size 8 --lr 5e-4 --data_dir ../datasets/
\ No newline at end of file
#!/usr/bin/env bash
python main.py --input_h 512 --input_w 512 --batch_size 8 --lr 5e-4 --data_dir ../datasets/ --gpus 0,1
\ No newline at end of file
python main.py --input_h 640 --input_w 640 --batch_size 8 --lr 5e-4 --data_dir ../datasets/ --gpus 0,1
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment