program.py 16.8 KB
Newer Older
LDOUBLEV's avatar
LDOUBLEV committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from argparse import ArgumentParser, RawDescriptionHelpFormatter
import sys
import yaml
import os
from ppocr.utils.utility import create_module
from ppocr.utils.utility import initial_logger
25

LDOUBLEV's avatar
LDOUBLEV committed
26
27
28
29
30
31
32
33
34
logger = initial_logger()

import paddle.fluid as fluid
import time
from ppocr.utils.stats import TrainingStats
from eval_utils.eval_det_utils import eval_det_run
from eval_utils.eval_rec_utils import eval_rec_run
from ppocr.utils.save_load import save_model
import numpy as np
tink2123's avatar
tink2123 committed
35
from ppocr.utils.character import cal_predicts_accuracy, cal_predicts_accuracy_srn, CharacterOps
LDOUBLEV's avatar
LDOUBLEV committed
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77

class ArgsParser(ArgumentParser):
    def __init__(self):
        super(ArgsParser, self).__init__(
            formatter_class=RawDescriptionHelpFormatter)
        self.add_argument("-c", "--config", help="configuration file to use")
        self.add_argument(
            "-o", "--opt", nargs='+', help="set configuration options")

    def parse_args(self, argv=None):
        args = super(ArgsParser, self).parse_args(argv)
        assert args.config is not None, \
            "Please specify --config=configure_file_path."
        args.opt = self._parse_opt(args.opt)
        return args

    def _parse_opt(self, opts):
        config = {}
        if not opts:
            return config
        for s in opts:
            s = s.strip()
            k, v = s.split('=')
            config[k] = yaml.load(v, Loader=yaml.Loader)
        return config


class AttrDict(dict):
    """Single level attribute dict, NOT recursive"""

    def __init__(self, **kwargs):
        super(AttrDict, self).__init__()
        super(AttrDict, self).update(kwargs)

    def __getattr__(self, key):
        if key in self:
            return self[key]
        raise AttributeError("object has no attribute '{}'".format(key))


global_config = AttrDict()

lyl120117's avatar
lyl120117 committed
78
79
default_config = {'Global': {'debug': False, }}

LDOUBLEV's avatar
LDOUBLEV committed
80
81
82
83
84
85
86
87
88
89

def load_config(file_path):
    """
    Load config from yml/yaml file.

    Args:
        file_path (str): Path of the config file to be loaded.

    Returns: global config
    """
lyl120117's avatar
lyl120117 committed
90
    merge_config(default_config)
LDOUBLEV's avatar
LDOUBLEV committed
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
    _, ext = os.path.splitext(file_path)
    assert ext in ['.yml', '.yaml'], "only support yaml files for now"
    merge_config(yaml.load(open(file_path), Loader=yaml.Loader))
    assert "reader_yml" in global_config['Global'],\
        "absence reader_yml in global"
    reader_file_path = global_config['Global']['reader_yml']
    _, ext = os.path.splitext(reader_file_path)
    assert ext in ['.yml', '.yaml'], "only support yaml files for reader"
    merge_config(yaml.load(open(reader_file_path), Loader=yaml.Loader))
    return global_config


def merge_config(config):
    """
    Merge config into global config.

    Args:
        config (dict): Config to be merged.

    Returns: global config
    """
    for key, value in config.items():
        if "." not in key:
            if isinstance(value, dict) and key in global_config:
                global_config[key].update(value)
            else:
                global_config[key] = value
        else:
            sub_keys = key.split('.')
tink2123's avatar
tink2123 committed
120
121
122
123
            assert (
                sub_keys[0] in global_config
            ), "the sub_keys can only be one of global_config: {}, but get: {}, please check your running command".format(
                global_config.keys(), sub_keys[0])
LDOUBLEV's avatar
LDOUBLEV committed
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
            cur = global_config[sub_keys[0]]
            for idx, sub_key in enumerate(sub_keys[1:]):
                assert (sub_key in cur)
                if idx == len(sub_keys) - 2:
                    cur[sub_key] = value
                else:
                    cur = cur[sub_key]


def check_gpu(use_gpu):
    """
    Log error and exit when set use_gpu=true in paddlepaddle
    cpu version.
    """
    err = "Config use_gpu cannot be set as true while you are " \
          "using paddlepaddle cpu version ! \nPlease try: \n" \
          "\t1. Install paddlepaddle-gpu to run model on GPU \n" \
          "\t2. Set use_gpu as false in config file to run " \
          "model on CPU"

    try:
        if use_gpu and not fluid.is_compiled_with_cuda():
            logger.error(err)
            sys.exit(1)
    except Exception as e:
        pass


def build(config, main_prog, startup_prog, mode):
    """
    Build a program using a model and an optimizer
        1. create feeds
        2. create a dataloader
        3. create a model
        4. create fetchs
        5. create an optimizer

    Args:
        config(dict): config
        main_prog(): main program
        startup_prog(): startup program
        is_train(bool): train or valid

    Returns:
        dataloader(): a bridge between the model and the data
        fetchs(dict): dict of model outputs(included loss and measures)
    """
    with fluid.program_guard(main_prog, startup_prog):
        with fluid.unique_name.guard():
            func_infor = config['Architecture']['function']
            model = create_module(func_infor)(params=config)
            dataloader, outputs = model(mode=mode)
            fetch_name_list = list(outputs.keys())
            fetch_varname_list = [outputs[v].name for v in fetch_name_list]
            opt_loss_name = None
tink2123's avatar
tink2123 committed
179
180
181
            model_average = None
            img_loss_name = None
            word_loss_name = None
LDOUBLEV's avatar
LDOUBLEV committed
182
183
            if mode == "train":
                opt_loss = outputs['total_loss']
tink2123's avatar
tink2123 committed
184
185
186
187
188
                # srn loss
                #img_loss = outputs['img_loss']
                #word_loss = outputs['word_loss']
                #img_loss_name = img_loss.name
                #word_loss_name = word_loss.name
LDOUBLEV's avatar
LDOUBLEV committed
189
190
191
192
193
194
195
                opt_params = config['Optimizer']
                optimizer = create_module(opt_params['function'])(opt_params)
                optimizer.minimize(opt_loss)
                opt_loss_name = opt_loss.name
                global_lr = optimizer._global_learning_rate()
                fetch_name_list.insert(0, "lr")
                fetch_varname_list.insert(0, global_lr.name)
tink2123's avatar
tink2123 committed
196
197
198
199
200
201
202
                if config['Global']["loss_type"] == 'srn':
                    model_average = fluid.optimizer.ModelAverage(
                        config['Global']['average_window'],
                        min_average_window=config['Global']['min_average_window'],
                        max_average_window=config['Global']['max_average_window'])

    return (dataloader, fetch_name_list, fetch_varname_list, opt_loss_name,model_average)
LDOUBLEV's avatar
LDOUBLEV committed
203
204
205
206
207
208
209
210
211
212


def build_export(config, main_prog, startup_prog):
    """
    """
    with fluid.program_guard(main_prog, startup_prog):
        with fluid.unique_name.guard():
            func_infor = config['Architecture']['function']
            model = create_module(func_infor)(params=config)
            image, outputs = model(mode='export')
213
            fetches_var_name = sorted([name for name in outputs.keys()])
dyning's avatar
dyning committed
214
            fetches_var = [outputs[name] for name in fetches_var_name]
LDOUBLEV's avatar
LDOUBLEV committed
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
    feeded_var_names = [image.name]
    target_vars = fetches_var
    return feeded_var_names, target_vars, fetches_var_name


def create_multi_devices_program(program, loss_var_name):
    build_strategy = fluid.BuildStrategy()
    build_strategy.memory_optimize = False
    build_strategy.enable_inplace = True
    exec_strategy = fluid.ExecutionStrategy()
    exec_strategy.num_iteration_per_drop_scope = 1
    compile_program = fluid.CompiledProgram(program).with_data_parallel(
        loss_name=loss_var_name,
        build_strategy=build_strategy,
        exec_strategy=exec_strategy)
    return compile_program


def train_eval_det_run(config, exe, train_info_dict, eval_info_dict):
    train_batch_id = 0
    log_smooth_window = config['Global']['log_smooth_window']
    epoch_num = config['Global']['epoch_num']
    print_batch_step = config['Global']['print_batch_step']
    eval_batch_step = config['Global']['eval_batch_step']
LDOUBLEV's avatar
LDOUBLEV committed
239
240
241
242
243
244
245
    start_eval_step = 0
    if type(eval_batch_step) == list and len(eval_batch_step) >= 2:
        start_eval_step = eval_batch_step[0]
        eval_batch_step = eval_batch_step[1]
        logger.info(
            "During the training process, after the {}th iteration, an evaluation is run every {} iterations".
            format(start_eval_step, eval_batch_step))
LDOUBLEV's avatar
LDOUBLEV committed
246
247
    save_epoch_step = config['Global']['save_epoch_step']
    save_model_dir = config['Global']['save_model_dir']
248
249
    if not os.path.exists(save_model_dir):
        os.makedirs(save_model_dir)
LDOUBLEV's avatar
LDOUBLEV committed
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
    train_stats = TrainingStats(log_smooth_window,
                                train_info_dict['fetch_name_list'])
    best_eval_hmean = -1
    best_batch_id = 0
    best_epoch = 0
    train_loader = train_info_dict['reader']
    for epoch in range(epoch_num):
        train_loader.start()
        try:
            while True:
                t1 = time.time()
                train_outs = exe.run(
                    program=train_info_dict['compile_program'],
                    fetch_list=train_info_dict['fetch_varname_list'],
                    return_numpy=False)
                stats = {}
                for tno in range(len(train_outs)):
                    fetch_name = train_info_dict['fetch_name_list'][tno]
                    fetch_value = np.mean(np.array(train_outs[tno]))
                    stats[fetch_name] = fetch_value
                t2 = time.time()
                train_batch_elapse = t2 - t1
                train_stats.update(stats)
LDOUBLEV's avatar
LDOUBLEV committed
273
                if train_batch_id > 0 and train_batch_id  \
LDOUBLEV's avatar
LDOUBLEV committed
274
275
276
277
278
279
                    % print_batch_step == 0:
                    logs = train_stats.log()
                    strs = 'epoch: {}, iter: {}, {}, time: {:.3f}'.format(
                        epoch, train_batch_id, logs, train_batch_elapse)
                    logger.info(strs)

LDOUBLEV's avatar
LDOUBLEV committed
280
281
                if train_batch_id > start_eval_step and\
                    (train_batch_id - start_eval_step) % eval_batch_step == 0:
LDOUBLEV's avatar
LDOUBLEV committed
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
                    metrics = eval_det_run(exe, config, eval_info_dict, "eval")
                    hmean = metrics['hmean']
                    if hmean >= best_eval_hmean:
                        best_eval_hmean = hmean
                        best_batch_id = train_batch_id
                        best_epoch = epoch
                        save_path = save_model_dir + "/best_accuracy"
                        save_model(train_info_dict['train_program'], save_path)
                    strs = 'Test iter: {}, metrics:{}, best_hmean:{:.6f}, best_epoch:{}, best_batch_id:{}'.format(
                        train_batch_id, metrics, best_eval_hmean, best_epoch,
                        best_batch_id)
                    logger.info(strs)
                train_batch_id += 1

        except fluid.core.EOFException:
            train_loader.reset()
tink2123's avatar
tink2123 committed
298
        if epoch == 0 and save_epoch_step == 1:
tink2123's avatar
tink2123 committed
299
            save_path = save_model_dir + "/iter_epoch_0"
300
            save_model(train_info_dict['train_program'], save_path)
LDOUBLEV's avatar
LDOUBLEV committed
301
302
303
304
305
306
307
308
309
310
311
312
        if epoch > 0 and epoch % save_epoch_step == 0:
            save_path = save_model_dir + "/iter_epoch_%d" % (epoch)
            save_model(train_info_dict['train_program'], save_path)
    return


def train_eval_rec_run(config, exe, train_info_dict, eval_info_dict):
    train_batch_id = 0
    log_smooth_window = config['Global']['log_smooth_window']
    epoch_num = config['Global']['epoch_num']
    print_batch_step = config['Global']['print_batch_step']
    eval_batch_step = config['Global']['eval_batch_step']
LDOUBLEV's avatar
LDOUBLEV committed
313
314
315
316
317
318
319
    start_eval_step = 0
    if type(eval_batch_step) == list and len(eval_batch_step) >= 2:
        start_eval_step = eval_batch_step[0]
        eval_batch_step = eval_batch_step[1]
        logger.info(
            "During the training process, after the {}th iteration, an evaluation is run every {} iterations".
            format(start_eval_step, eval_batch_step))
LDOUBLEV's avatar
LDOUBLEV committed
320
321
    save_epoch_step = config['Global']['save_epoch_step']
    save_model_dir = config['Global']['save_model_dir']
LDOUBLEV's avatar
LDOUBLEV committed
322
    if not os.path.exists(save_model_dir):
LDOUBLEV's avatar
LDOUBLEV committed
323
        os.makedirs(save_model_dir)
LDOUBLEV's avatar
LDOUBLEV committed
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
    train_stats = TrainingStats(log_smooth_window, ['loss', 'acc'])
    best_eval_acc = -1
    best_batch_id = 0
    best_epoch = 0
    train_loader = train_info_dict['reader']
    for epoch in range(epoch_num):
        train_loader.start()
        try:
            while True:
                t1 = time.time()
                train_outs = exe.run(
                    program=train_info_dict['compile_program'],
                    fetch_list=train_info_dict['fetch_varname_list'],
                    return_numpy=False)
                fetch_map = dict(
                    zip(train_info_dict['fetch_name_list'],
                        range(len(train_outs))))

                loss = np.mean(np.array(train_outs[fetch_map['total_loss']]))
                lr = np.mean(np.array(train_outs[fetch_map['lr']]))
                preds_idx = fetch_map['decoded_out']
                preds = np.array(train_outs[preds_idx])
                labels_idx = fetch_map['label']
                labels = np.array(train_outs[labels_idx])

tink2123's avatar
tink2123 committed
349
350
351
352
353
354
355
356
357
358
359
                if config['Global']['loss_type'] != 'srn':
                    preds_lod = train_outs[preds_idx].lod()[0]
                    labels_lod = train_outs[labels_idx].lod()[0]

                    acc, acc_num, img_num = cal_predicts_accuracy(
                        config['Global']['char_ops'], preds, preds_lod, labels,
                        labels_lod)
                else:
                    acc, acc_num, img_num = cal_predicts_accuracy_srn(
                        config['Global']['char_ops'], preds, labels,
                        config['Global']['max_text_length'])
LDOUBLEV's avatar
LDOUBLEV committed
360
361
362
363
                t2 = time.time()
                train_batch_elapse = t2 - t1
                stats = {'loss': loss, 'acc': acc}
                train_stats.update(stats)
LDOUBLEV's avatar
update  
LDOUBLEV committed
364
                if train_batch_id > start_eval_step and (train_batch_id - start_eval_step) \
LDOUBLEV's avatar
LDOUBLEV committed
365
366
367
368
369
370
371
372
                    % print_batch_step == 0:
                    logs = train_stats.log()
                    strs = 'epoch: {}, iter: {}, lr: {:.6f}, {}, time: {:.3f}'.format(
                        epoch, train_batch_id, lr, logs, train_batch_elapse)
                    logger.info(strs)

                if train_batch_id > 0 and\
                    train_batch_id % eval_batch_step == 0:
tink2123's avatar
tink2123 committed
373
374
375
                    model_average = train_info_dict['model_average']
                    if model_average != None:
                        model_average.apply(exe)
LDOUBLEV's avatar
LDOUBLEV committed
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
                    metrics = eval_rec_run(exe, config, eval_info_dict, "eval")
                    eval_acc = metrics['avg_acc']
                    eval_sample_num = metrics['total_sample_num']
                    if eval_acc > best_eval_acc:
                        best_eval_acc = eval_acc
                        best_batch_id = train_batch_id
                        best_epoch = epoch
                        save_path = save_model_dir + "/best_accuracy"
                        save_model(train_info_dict['train_program'], save_path)
                    strs = 'Test iter: {}, acc:{:.6f}, best_acc:{:.6f}, best_epoch:{}, best_batch_id:{}, eval_sample_num:{}'.format(
                        train_batch_id, eval_acc, best_eval_acc, best_epoch,
                        best_batch_id, eval_sample_num)
                    logger.info(strs)
                train_batch_id += 1

        except fluid.core.EOFException:
            train_loader.reset()
tink2123's avatar
tink2123 committed
393
        if epoch == 0 and save_epoch_step == 1:
tink2123's avatar
tink2123 committed
394
            save_path = save_model_dir + "/iter_epoch_0"
395
            save_model(train_info_dict['train_program'], save_path)
LDOUBLEV's avatar
LDOUBLEV committed
396
397
398
399
        if epoch > 0 and epoch % save_epoch_step == 0:
            save_path = save_model_dir + "/iter_epoch_%d" % (epoch)
            save_model(train_info_dict['train_program'], save_path)
    return
400

shaohua.zhang's avatar
shaohua.zhang committed
401
def preprocess():
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
    FLAGS = ArgsParser().parse_args()
    config = load_config(FLAGS.config)
    merge_config(FLAGS.opt)
    logger.info(config)

    # check if set use_gpu=True in paddlepaddle cpu version
    use_gpu = config['Global']['use_gpu']
    check_gpu(use_gpu)

    alg = config['Global']['algorithm']
    assert alg in ['EAST', 'DB', 'Rosetta', 'CRNN', 'STARNet', 'RARE']
    if alg in ['Rosetta', 'CRNN', 'STARNet', 'RARE']:
        config['Global']['char_ops'] = CharacterOps(config['Global'])

    place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
    startup_program = fluid.Program()
    train_program = fluid.Program()

    if alg in ['EAST', 'DB']:
shaohua.zhang's avatar
shaohua.zhang committed
421
422
423
        train_alg_type = 'det'
    else:
        train_alg_type = 'rec'
424

shaohua.zhang's avatar
shaohua.zhang committed
425
    return startup_program, train_program, place, config, train_alg_type