Commit df1f3ca2 authored by liuhy's avatar liuhy
Browse files

Merge branch 'dev' into 'master'

Dev

See merge request modelzoo/lpr!3
parents 6b68a1af 5d44c851
...@@ -26,9 +26,7 @@ def LPRNetPreprocess(image): ...@@ -26,9 +26,7 @@ def LPRNetPreprocess(image):
return img return img
def LPRNetPostprocess(infer_res): def LPRNetPostprocess(infer_res):
preb_label = [] preb_label = np.argmax(infer_res, axis=1)[0]
for j in range(infer_res.shape[1]):
preb_label.append(np.argmax(infer_res[:, j], axis=0))
no_repeat_blank_label = [] no_repeat_blank_label = []
pre_c = preb_label[0] pre_c = preb_label[0]
if pre_c != len(CHARS) - 1: if pre_c != len(CHARS) - 1:
...@@ -51,7 +49,9 @@ def LPRNetInference(args): ...@@ -51,7 +49,9 @@ def LPRNetInference(args):
if os.path.isdir(args.imgpath): if os.path.isdir(args.imgpath):
images = os.listdir(args.imgpath) images = os.listdir(args.imgpath)
count = 0 Tp = 0
Tn_1 = 0
Tn_2 = 0
time1 = time.perf_counter() time1 = time.perf_counter()
for image in images: for image in images:
img = LPRNetPreprocess(os.path.join(args.imgpath, image)) img = LPRNetPreprocess(os.path.join(args.imgpath, image))
...@@ -59,11 +59,16 @@ def LPRNetInference(args): ...@@ -59,11 +59,16 @@ def LPRNetInference(args):
preb = sess.run(None, input_feed={sess.get_inputs()[0].name: img})[0] preb = sess.run(None, input_feed={sess.get_inputs()[0].name: img})[0]
result = LPRNetPostprocess(preb) result = LPRNetPostprocess(preb)
if result == image[:-4]: if result == image[:-4]:
count += 1 Tp += 1
print('Inference Result:', result) elif len(result) != len(image[:-4]):
Tn_1 += 1
else:
Tn_2 += 1
print(image + ' Inference Result:', result)
time2 = time.perf_counter() time2 = time.perf_counter()
print('accuracy rate:', count / len(images)) Acc = Tp * 1.0 / (Tp + Tn_1 + Tn_2)
print('average time', (time2 - time1)/count*1000) print("[Info] Test Accuracy: {} [{}:{}:{}:{}]".format(Acc, Tp, Tn_1, Tn_2, (Tp+Tn_1+Tn_2)))
print("[Info] Test Speed: {}s 1/{}]".format((time2 - time1) / len(images), len(images)))
else: else:
img = LPRNetPreprocess(args.imgpath) img = LPRNetPreprocess(args.imgpath)
intput = sess.get_inputs()[0].shape intput = sess.get_inputs()[0].shape
......
...@@ -28,9 +28,7 @@ def LPRNetPreprocess(image): ...@@ -28,9 +28,7 @@ def LPRNetPreprocess(image):
return img return img
def LPRNetPostprocess(infer_res): def LPRNetPostprocess(infer_res):
preb_label = [] preb_label = np.argmax(infer_res, axis=1)[0]
for j in range(infer_res.shape[1]):
preb_label.append(np.argmax(infer_res[:, j], axis=0))
no_repeat_blank_label = [] no_repeat_blank_label = []
pre_c = preb_label[0] pre_c = preb_label[0]
if pre_c != len(CHARS) - 1: if pre_c != len(CHARS) - 1:
...@@ -57,7 +55,9 @@ def LPRNetInference(args): ...@@ -57,7 +55,9 @@ def LPRNetInference(args):
if os.path.isdir(args.imgpath): if os.path.isdir(args.imgpath):
images = os.listdir(args.imgpath) images = os.listdir(args.imgpath)
count = 0 Tp = 0
Tn_1 = 0
Tn_2 = 0
time1 = time.perf_counter() time1 = time.perf_counter()
for image in images: for image in images:
img = LPRNetPreprocess(os.path.join(args.imgpath, image)) img = LPRNetPreprocess(os.path.join(args.imgpath, image))
...@@ -67,11 +67,16 @@ def LPRNetInference(args): ...@@ -67,11 +67,16 @@ def LPRNetInference(args):
results = model.run({inputName: migraphx.argument(img)}) results = model.run({inputName: migraphx.argument(img)})
result = LPRNetPostprocess(np.array(results[0])) result = LPRNetPostprocess(np.array(results[0]))
if result == image[:-4]: if result == image[:-4]:
count += 1 Tp += 1
print('Inference Result:', result) elif len(result) != len(image[:-4]):
Tn_1 += 1
else:
Tn_2 += 1
print(image + ' Inference Result:', result)
time2 = time.perf_counter() time2 = time.perf_counter()
print('accuracy rate:', count / len(images)) Acc = Tp * 1.0 / (Tp + Tn_1 + Tn_2)
print('average time', (time2 - time1)/count*1000) print("[Info] Test Accuracy: {} [{}:{}:{}:{}]".format(Acc, Tp, Tn_1, Tn_2, (Tp+Tn_1+Tn_2)))
print("[Info] Test Speed: {}s 1/{}]".format((time2 - time1) / len(images), len(images)))
else: else:
img = LPRNetPreprocess(args.imgpath) img = LPRNetPreprocess(args.imgpath)
inputName=model.get_parameter_names()[0] inputName=model.get_parameter_names()[0]
......
...@@ -55,6 +55,7 @@ LPR模型用test.py对训练出的模型进行测试,使用方法如下: ...@@ -55,6 +55,7 @@ LPR模型用test.py对训练出的模型进行测试,使用方法如下:
python test.py \ python test.py \
--model 需要测试的pth模型路径 \ --model 需要测试的pth模型路径 \
--imgpath 测试集路径(文件夹或图像皆可) \ --imgpath 测试集路径(文件夹或图像皆可) \
--batch_size 测试时的batch size大小 \
--export_onnx True/False(该参数用于选择是否需要将pth模型转为onnx模型) \ --export_onnx True/False(该参数用于选择是否需要将pth模型转为onnx模型) \
--dynamic True/False(该参数用于选择onnx模型是否使用动态的batch size) --dynamic True/False(该参数用于选择onnx模型是否使用动态的batch size)
...@@ -81,9 +82,9 @@ LPRNet_migraphx_infer.py是基于Migraphx的推理脚本,使用需安装好Mig ...@@ -81,9 +82,9 @@ LPRNet_migraphx_infer.py是基于Migraphx的推理脚本,使用需安装好Mig
| Engine | Model Path| Model Format | Accuracy(%) | Speed(ms) | | Engine | Model Path| Model Format | Accuracy(%) | Speed(ms) |
| :------: | :------: | :------: | :------: |:------: | | :------: | :------: | :------: | :------: |:------: |
| ONNXRuntime | model/LPRNet.onnx | onnx | 91.0 | 2.59 | | ONNXRuntime | model/LPRNet.onnx | onnx | 92.7 | 2.59 |
| Migraphx | model/LPRNet.onnx |onnx | 91.0 | 2.66 | | Migraphx | model/LPRNet.onnx | onnx | 92.7 | 2.66 |
| Migraphx | model/LPRNet.mxr |mxr | 91.0 | 2.49 | | Migraphx | model/LPRNet.mxr | mxr | 92.7 | 2.49 |
## 参考 ## 参考
* [LPRNet_Pytorch](https://github.com/sirius-ai/LPRNet_Pytorch) * [LPRNet_Pytorch](https://github.com/sirius-ai/LPRNet_Pytorch)
* [license-plate-detect-recoginition](https://github.com/qzpzd/license-plate-detect-recoginition) * [license-plate-detect-recoginition](https://github.com/qzpzd/license-plate-detect-recoginition)
...@@ -89,5 +89,3 @@ def build_lprnet(class_num, phase=False): ...@@ -89,5 +89,3 @@ def build_lprnet(class_num, phase=False):
return Net.train() return Net.train()
else: else:
return Net.eval() return Net.eval()
No preview for this file type
No preview for this file type
No preview for this file type
...@@ -4,65 +4,94 @@ import os ...@@ -4,65 +4,94 @@ import os
import torch import torch
import numpy as np import numpy as np
from lprnet import build_lprnet from lprnet import build_lprnet
from load_data import CHARS from load_data import CHARS, LPRDataLoader
import time import time
from torch.utils.data import *
from torch.autograd import Variable
def infer(args, image, model): def collate_fn(batch):
img = cv2.imread(image) imgs = []
height, width, _ = img.shape labels = []
if height != 24 or width != 94: lengths = []
img = cv2.resize(img, (94, 24)) for _, sample in enumerate(batch):
img = img.astype('float32') img, label, length = sample
img -= 127.5 imgs.append(torch.from_numpy(img))
img *= 0.0078125 labels.extend(label)
img = np.transpose(img, (2, 0, 1)) lengths.append(length)
labels = np.asarray(labels).flatten().astype(np.float32)
with torch.no_grad(): return (torch.stack(imgs, 0), torch.from_numpy(labels), lengths)
img = torch.from_numpy(img).unsqueeze(0).to(args.device)
preb = model(img) def Greedy_Decode_Eval(Net, datasets, args):
preb = preb.detach().cpu().numpy().squeeze() epoch_size = len(datasets) // args.batch_size
preb_label = [] batch_iterator = iter(DataLoader(datasets, args.batch_size, shuffle=True, num_workers=args.num_workers, collate_fn=collate_fn))
for j in range(preb.shape[1]):
preb_label.append(np.argmax(preb[:, j], axis=0)) Tp = 0
no_repeat_blank_label = [] Tn_1 = 0
pre_c = preb_label[0] Tn_2 = 0
t1 = time.time()
for i in range(epoch_size):
# load train data
images, labels, lengths = next(batch_iterator)
start = 0
targets = []
for length in lengths:
label = labels[start:start+length]
targets.append(label)
start += length
targets = np.array([el.numpy() for el in targets])
if args.cuda:
images = Variable(images.cuda())
else:
images = Variable(images)
# forward
prebs = Net(images)
# greedy decode
prebs = prebs.cpu().detach().numpy()
preb_max = np.argmax(prebs, axis=1)
preb_labels = list()
for preb in preb_max:
no_repeat_blank_label = list()
pre_c = preb[0]
if pre_c != len(CHARS) - 1: if pre_c != len(CHARS) - 1:
no_repeat_blank_label.append(pre_c) no_repeat_blank_label.append(pre_c)
for c in preb_label: for c in preb: # dropout repeate label and blank label
if (pre_c == c) or (c == len(CHARS) - 1): if (pre_c == c) or (c == len(CHARS) - 1):
if c == len(CHARS) - 1: if c == len(CHARS) - 1:
pre_c = c pre_c = c
continue continue
no_repeat_blank_label.append(c) no_repeat_blank_label.append(c)
pre_c = c pre_c = c
return ''.join(list(map(lambda x: CHARS[x], no_repeat_blank_label))) preb_labels.append(no_repeat_blank_label)
for i, label in enumerate(preb_labels):
if len(label) != len(targets[i]):
Tn_1 += 1
continue
if (np.asarray(targets[i]) == np.asarray(label)).all():
Tp += 1
else:
Tn_2 += 1
Acc = Tp * 1.0 / (Tp + Tn_1 + Tn_2)
print("[Info] Test Accuracy: {} [{}:{}:{}:{}]".format(Acc, Tp, Tn_1, Tn_2, (Tp+Tn_1+Tn_2)))
t2 = time.time()
print("[Info] Test Speed: {}s 1/{}]".format((t2 - t1) / len(datasets), len(datasets)))
def validation(args): def validation(args):
model = build_lprnet(len(CHARS)) lprnet = build_lprnet(class_num=len(CHARS), phase=args.phase_train)
model.load_state_dict(torch.load(args.model, map_location=args.device)) lprnet.load_state_dict(torch.load(args.model))
model.to(args.device) lprnet.to(args.device)
print("Successful to build network!")
if os.path.isdir(args.imgpath): test_img_dirs = os.path.expanduser(args.imgpath)
images = os.listdir(args.imgpath) test_dataset = LPRDataLoader(test_img_dirs.split(','), args.img_size)
count = 0 Greedy_Decode_Eval(lprnet, test_dataset, args)
time1 = time.perf_counter()
for image in images:
result = infer(args, os.path.join(args.imgpath, image), model)
if result == image[:-4]:
count += 1
print('Image: ' + image + ' recongise result: '+ result)
time2 = time.perf_counter()
print('accuracy rate:', count / len(images))
print('average time', (time2 - time1)/count*1000)
else:
result = infer(args, args.imgpath, model)
print('Image: ' + args.imgpath + ' recongise result: '+ result)
if args.export_onnx: if args.export_onnx:
print('export pytorch model to onnx model...') print('export pytorch model to onnx model...')
onnx_input = torch.randn(1, 3, 24, 94, device=args.device) onnx_input = torch.randn(1, 3, 24, 94, device=args.device)
torch.onnx.export( torch.onnx.export(
model, lprnet,
onnx_input, onnx_input,
'LPRNet.onnx', 'LPRNet.onnx',
input_names=['input'], input_names=['input'],
...@@ -71,15 +100,19 @@ def validation(args): ...@@ -71,15 +100,19 @@ def validation(args):
opset_version=12, opset_version=12,
) )
if __name__ == "__main__":
if __name__ == '__main__': parser = argparse.ArgumentParser(description='parameters to train net')
parser = argparse.ArgumentParser(description='parameters to vaildate net') parser.add_argument('--img_size', default=[94, 24], help='the image size')
parser.add_argument('--imgpath', default="imgs", help='the image path')
parser.add_argument('--model', default='model/lprnet.pth', help='model path to vaildate') parser.add_argument('--model', default='model/lprnet.pth', help='model path to vaildate')
parser.add_argument('--imgpath', default='imgs', help='the image path') parser.add_argument('--batch_size', default=100, type=int, help='testing batch size.')
parser.add_argument('--cuda', default=True, type=bool, help='Use cuda to train model')
parser.add_argument('--device', default='cuda', help='Use cuda to vaildate model') parser.add_argument('--device', default='cuda', help='Use cuda to vaildate model')
parser.add_argument('--export_onnx', default=False, help='export model to onnx') parser.add_argument('--export_onnx', default=False, help='export model to onnx')
parser.add_argument('--dynamic', default=False, help='use dynamic batch size') parser.add_argument('--dynamic', default=False, help='use dynamic batch size')
args = parser.parse_args() parser.add_argument('--phase_train', default=False, type=bool, help='train or test phase flag.')
parser.add_argument('--num_workers', default=8, type=int, help='Number of workers used in dataloading')
args = parser.parse_args()
validation(args) validation(args)
...@@ -178,6 +178,7 @@ def train(args): ...@@ -178,6 +178,7 @@ def train(args):
if (iteration + 1) % args.test_interval == 0: if (iteration + 1) % args.test_interval == 0:
Greedy_Decode_Eval(lprnet, test_dataset, args) Greedy_Decode_Eval(lprnet, test_dataset, args)
# lprnet.train()
start_time = time.time() start_time = time.time()
# load train data # load train data
...@@ -239,7 +240,7 @@ def get_parser(): ...@@ -239,7 +240,7 @@ def get_parser():
parser.add_argument('--weight_decay', default=2e-5, type=float, help='Weight decay for SGD') parser.add_argument('--weight_decay', default=2e-5, type=float, help='Weight decay for SGD')
parser.add_argument('--lr_schedule', default=[4, 8, 12, 14, 16], help='schedule for learning rate.') parser.add_argument('--lr_schedule', default=[4, 8, 12, 14, 16], help='schedule for learning rate.')
parser.add_argument('--save_folder', default='./weights/', help='Location to save checkpoint models') parser.add_argument('--save_folder', default='./weights/', help='Location to save checkpoint models')
parser.add_argument('--pretrained_model', default='./weights/Final_LPRNet_model.pth', help='pretrained base model') parser.add_argument('--pretrained_model', default='model/lprnet.pth', help='pretrained base model')
args = parser.parse_args() args = parser.parse_args()
return args return args
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment