test.py 3.01 KB
Newer Older
liuhy's avatar
liuhy committed
1
2
import argparse
import cv2
liuhy's avatar
liuhy committed
3
import os
liuhy's avatar
liuhy committed
4
5
6
7
8
import torch
import numpy as np
from lprnet import build_lprnet
from load_data import CHARS

liuhy's avatar
liuhy committed
9
10
def infer(args, image, model):
    img = cv2.imread(image)
liuhy's avatar
liuhy committed
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
    height, width, _ = img.shape
    if height != 24 or width != 94:
        img = cv2.resize(img, (94, 24))
    img = img.astype('float32')
    img -= 127.5
    img *= 0.0078125
    img = np.transpose(img, (2, 0, 1))

    with torch.no_grad():
        img = torch.from_numpy(img).unsqueeze(0).to(args.device)
        preb = model(img)
        preb = preb.detach().cpu().numpy().squeeze()
    preb_label = []
    for j in range(preb.shape[1]):
        preb_label.append(np.argmax(preb[:, j], axis=0))
    no_repeat_blank_label = []
    pre_c = preb_label[0]
    if pre_c != len(CHARS) - 1:
        no_repeat_blank_label.append(pre_c)
    for c in preb_label:
        if (pre_c == c) or (c == len(CHARS) - 1):
            if c == len(CHARS) - 1:
                pre_c = c
            continue
        no_repeat_blank_label.append(c)
        pre_c = c
liuhy's avatar
liuhy committed
37
    return ''.join(list(map(lambda x: CHARS[x], no_repeat_blank_label)))
liuhy's avatar
liuhy committed
38

liuhy's avatar
liuhy committed
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
def validation(args):
    model = build_lprnet(len(CHARS))
    model.load_state_dict(torch.load(args.model, map_location=args.device))
    model.to(args.device)

    if os.path.isdir(args.imgpath):
        images = os.listdir(args.imgpath)
        count = 0
        for image in images:
            res = infer(args, os.path.join(args.imgpath, image), model)
            if res == image[:-4]:
                count += 1
            print('Image: ' + image + ' recongise result: '+ res)
        print('acc rate:', count / len(images))
    else:
        res = infer(args, args.imgpath, model)
        print('Image: ' + args.imgpath + ' recongise result: '+ res)
    
liuhy's avatar
liuhy committed
57
58
59
60
61
62
63
64
65
66
67
68
    if args.export_onnx:
        print('export pytroch model to onnx model...')
        onnx_input = torch.randn(1, 3, 24, 94, device=args.device)
        torch.onnx.export(
            model,
            onnx_input,
            'LPRNet.onnx',
            input_names=['input'],
            output_names=['output'],
            dynamic_axes={'input': {0: 'batch'}, 'output': {0: 'batch'}} if args.dynamic else None,
            opset_version=12,
            )
liuhy's avatar
liuhy committed
69
70
    return res
    
liuhy's avatar
liuhy committed
71
72
73

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='parameters to vaildate net')
liuhy's avatar
liuhy committed
74
75
76
77
    # parser.add_argument('--model', default='model/lprnet.pth', help='model path to vaildate')
    parser.add_argument('--model', default='weights/Final_LPRNet_model.pth', help='model path to vaildate')
    # parser.add_argument('--imgpath', default='imgs/川JK0707.jpg', help='the image path')
    parser.add_argument('--imgpath', default='/code/lpr_ori/data/test', help='the image path')
liuhy's avatar
liuhy committed
78
79
80
81
82
83
    parser.add_argument('--device', default='cuda', help='Use cuda to vaildate model')
    parser.add_argument('--export_onnx', default=False, help='export model to onnx')
    parser.add_argument('--dynamic', default=False, help='use dynamic batch size')
    args = parser.parse_args()

    result = validation(args)
liuhy's avatar
liuhy committed
84