Unverified Commit 8bae1e40 authored by MissPenguin's avatar MissPenguin Committed by GitHub
Browse files

Merge pull request #5174 from WenmuZhou/fix_vqa

vqa code integrated into ppocr training system
parents 9fa209e3 1cbe4bf2
......@@ -61,7 +61,8 @@ def main():
else:
model_type = None
best_model_dict = load_model(config, model)
best_model_dict = load_model(
config, model, model_type=config['Architecture']["model_type"])
if len(best_model_dict):
logger.info('metric in ckpt ***************')
for k, v in best_model_dict.items():
......
......@@ -85,7 +85,7 @@ def export_single_model(model, arch_config, save_path, logger):
def main():
FLAGS = ArgsParser().parse_args()
config = load_config(FLAGS.config)
merge_config(FLAGS.opt)
config = merge_config(config, FLAGS.opt)
logger = get_logger()
# build post process
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import os
import sys
__dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(__dir__)
sys.path.append(os.path.abspath(os.path.join(__dir__, '..')))
os.environ["FLAGS_allocator_strategy"] = 'auto_growth'
import cv2
import json
import paddle
from ppocr.data import create_operators, transform
from ppocr.modeling.architectures import build_model
from ppocr.postprocess import build_post_process
from ppocr.utils.save_load import load_model
from ppocr.utils.visual import draw_ser_results
from ppocr.utils.utility import get_image_file_list, load_vqa_bio_label_maps
import tools.program as program
def to_tensor(data):
import numbers
from collections import defaultdict
data_dict = defaultdict(list)
to_tensor_idxs = []
for idx, v in enumerate(data):
if isinstance(v, (np.ndarray, paddle.Tensor, numbers.Number)):
if idx not in to_tensor_idxs:
to_tensor_idxs.append(idx)
data_dict[idx].append(v)
for idx in to_tensor_idxs:
data_dict[idx] = paddle.to_tensor(data_dict[idx])
return list(data_dict.values())
class SerPredictor(object):
def __init__(self, config):
global_config = config['Global']
# build post process
self.post_process_class = build_post_process(config['PostProcess'],
global_config)
# build model
self.model = build_model(config['Architecture'])
load_model(
config, self.model, model_type=config['Architecture']["model_type"])
from paddleocr import PaddleOCR
self.ocr_engine = PaddleOCR(use_angle_cls=False, show_log=False)
# create data ops
transforms = []
for op in config['Eval']['dataset']['transforms']:
op_name = list(op)[0]
if 'Label' in op_name:
op[op_name]['ocr_engine'] = self.ocr_engine
elif op_name == 'KeepKeys':
op[op_name]['keep_keys'] = [
'input_ids', 'labels', 'bbox', 'image', 'attention_mask',
'token_type_ids', 'segment_offset_id', 'ocr_info',
'entities'
]
transforms.append(op)
global_config['infer_mode'] = True
self.ops = create_operators(config['Eval']['dataset']['transforms'],
global_config)
self.model.eval()
def __call__(self, img_path):
with open(img_path, 'rb') as f:
img = f.read()
data = {'image': img}
batch = transform(data, self.ops)
batch = to_tensor(batch)
preds = self.model(batch)
post_result = self.post_process_class(
preds,
attention_masks=batch[4],
segment_offset_ids=batch[6],
ocr_infos=batch[7])
return post_result, batch
if __name__ == '__main__':
config, device, logger, vdl_writer = program.preprocess()
os.makedirs(config['Global']['save_res_path'], exist_ok=True)
ser_engine = SerPredictor(config)
infer_imgs = get_image_file_list(config['Global']['infer_img'])
with open(
os.path.join(config['Global']['save_res_path'],
"infer_results.txt"),
"w",
encoding='utf-8') as fout:
for idx, img_path in enumerate(infer_imgs):
save_img_path = os.path.join(
config['Global']['save_res_path'],
os.path.splitext(os.path.basename(img_path))[0] + "_ser.jpg")
logger.info("process: [{}/{}], save result to {}".format(
idx, len(infer_imgs), save_img_path))
result, _ = ser_engine(img_path)
result = result[0]
fout.write(img_path + "\t" + json.dumps(
{
"ocr_info": result,
}, ensure_ascii=False) + "\n")
img_res = draw_ser_results(img_path, result)
cv2.imwrite(save_img_path, img_res)
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import os
import sys
__dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(__dir__)
sys.path.append(os.path.abspath(os.path.join(__dir__, '..')))
os.environ["FLAGS_allocator_strategy"] = 'auto_growth'
import cv2
import json
import paddle
import paddle.distributed as dist
from ppocr.data import create_operators, transform
from ppocr.modeling.architectures import build_model
from ppocr.postprocess import build_post_process
from ppocr.utils.save_load import load_model
from ppocr.utils.visual import draw_re_results
from ppocr.utils.logging import get_logger
from ppocr.utils.utility import get_image_file_list, load_vqa_bio_label_maps, print_dict
from tools.program import ArgsParser, load_config, merge_config, check_gpu
from tools.infer_vqa_token_ser import SerPredictor
class ReArgsParser(ArgsParser):
def __init__(self):
super(ReArgsParser, self).__init__()
self.add_argument(
"-c_ser", "--config_ser", help="ser configuration file to use")
self.add_argument(
"-o_ser",
"--opt_ser",
nargs='+',
help="set ser configuration options ")
def parse_args(self, argv=None):
args = super(ReArgsParser, self).parse_args(argv)
assert args.config_ser is not None, \
"Please specify --config_ser=ser_configure_file_path."
args.opt_ser = self._parse_opt(args.opt_ser)
return args
def make_input(ser_inputs, ser_results):
entities_labels = {'HEADER': 0, 'QUESTION': 1, 'ANSWER': 2}
entities = ser_inputs[8][0]
ser_results = ser_results[0]
assert len(entities) == len(ser_results)
# entities
start = []
end = []
label = []
entity_idx_dict = {}
for i, (res, entity) in enumerate(zip(ser_results, entities)):
if res['pred'] == 'O':
continue
entity_idx_dict[len(start)] = i
start.append(entity['start'])
end.append(entity['end'])
label.append(entities_labels[res['pred']])
entities = dict(start=start, end=end, label=label)
# relations
head = []
tail = []
for i in range(len(entities["label"])):
for j in range(len(entities["label"])):
if entities["label"][i] == 1 and entities["label"][j] == 2:
head.append(i)
tail.append(j)
relations = dict(head=head, tail=tail)
batch_size = ser_inputs[0].shape[0]
entities_batch = []
relations_batch = []
entity_idx_dict_batch = []
for b in range(batch_size):
entities_batch.append(entities)
relations_batch.append(relations)
entity_idx_dict_batch.append(entity_idx_dict)
ser_inputs[8] = entities_batch
ser_inputs.append(relations_batch)
# remove ocr_info segment_offset_id and label in ser input
ser_inputs.pop(7)
ser_inputs.pop(6)
ser_inputs.pop(1)
return ser_inputs, entity_idx_dict_batch
class SerRePredictor(object):
def __init__(self, config, ser_config):
self.ser_engine = SerPredictor(ser_config)
# init re model
global_config = config['Global']
# build post process
self.post_process_class = build_post_process(config['PostProcess'],
global_config)
# build model
self.model = build_model(config['Architecture'])
load_model(
config, self.model, model_type=config['Architecture']["model_type"])
self.model.eval()
def __call__(self, img_path):
ser_results, ser_inputs = self.ser_engine(img_path)
paddle.save(ser_inputs, 'ser_inputs.npy')
paddle.save(ser_results, 'ser_results.npy')
re_input, entity_idx_dict_batch = make_input(ser_inputs, ser_results)
preds = self.model(re_input)
post_result = self.post_process_class(
preds,
ser_results=ser_results,
entity_idx_dict_batch=entity_idx_dict_batch)
return post_result
def preprocess():
FLAGS = ReArgsParser().parse_args()
config = load_config(FLAGS.config)
config = merge_config(config, FLAGS.opt)
ser_config = load_config(FLAGS.config_ser)
ser_config = merge_config(ser_config, FLAGS.opt_ser)
logger = get_logger(name='root')
# check if set use_gpu=True in paddlepaddle cpu version
use_gpu = config['Global']['use_gpu']
check_gpu(use_gpu)
device = 'gpu:{}'.format(dist.ParallelEnv().dev_id) if use_gpu else 'cpu'
device = paddle.set_device(device)
logger.info('{} re config {}'.format('*' * 10, '*' * 10))
print_dict(config, logger)
logger.info('\n')
logger.info('{} ser config {}'.format('*' * 10, '*' * 10))
print_dict(ser_config, logger)
logger.info('train with paddle {} and device {}'.format(paddle.__version__,
device))
return config, ser_config, device, logger
if __name__ == '__main__':
config, ser_config, device, logger = preprocess()
os.makedirs(config['Global']['save_res_path'], exist_ok=True)
ser_re_engine = SerRePredictor(config, ser_config)
infer_imgs = get_image_file_list(config['Global']['infer_img'])
with open(
os.path.join(config['Global']['save_res_path'],
"infer_results.txt"),
"w",
encoding='utf-8') as fout:
for idx, img_path in enumerate(infer_imgs):
save_img_path = os.path.join(
config['Global']['save_res_path'],
os.path.splitext(os.path.basename(img_path))[0] + "_ser.jpg")
logger.info("process: [{}/{}], save result to {}".format(
idx, len(infer_imgs), save_img_path))
result = ser_re_engine(img_path)
result = result[0]
fout.write(img_path + "\t" + json.dumps(
{
"ser_resule": result,
}, ensure_ascii=False) + "\n")
img_res = draw_re_results(img_path, result)
cv2.imwrite(save_img_path, img_res)
......@@ -69,24 +69,6 @@ class ArgsParser(ArgumentParser):
return config
class AttrDict(dict):
"""Single level attribute dict, NOT recursive"""
def __init__(self, **kwargs):
super(AttrDict, self).__init__()
super(AttrDict, self).update(kwargs)
def __getattr__(self, key):
if key in self:
return self[key]
raise AttributeError("object has no attribute '{}'".format(key))
global_config = AttrDict()
default_config = {'Global': {'debug': False, }}
def load_config(file_path):
"""
Load config from yml/yaml file.
......@@ -94,38 +76,38 @@ def load_config(file_path):
file_path (str): Path of the config file to be loaded.
Returns: global config
"""
merge_config(default_config)
_, ext = os.path.splitext(file_path)
assert ext in ['.yml', '.yaml'], "only support yaml files for now"
merge_config(yaml.load(open(file_path, 'rb'), Loader=yaml.Loader))
return global_config
config = yaml.load(open(file_path, 'rb'), Loader=yaml.Loader)
return config
def merge_config(config):
def merge_config(config, opts):
"""
Merge config into global config.
Args:
config (dict): Config to be merged.
Returns: global config
"""
for key, value in config.items():
for key, value in opts.items():
if "." not in key:
if isinstance(value, dict) and key in global_config:
global_config[key].update(value)
if isinstance(value, dict) and key in config:
config[key].update(value)
else:
global_config[key] = value
config[key] = value
else:
sub_keys = key.split('.')
assert (
sub_keys[0] in global_config
sub_keys[0] in config
), "the sub_keys can only be one of global_config: {}, but get: {}, please check your running command".format(
global_config.keys(), sub_keys[0])
cur = global_config[sub_keys[0]]
config.keys(), sub_keys[0])
cur = config[sub_keys[0]]
for idx, sub_key in enumerate(sub_keys[1:]):
if idx == len(sub_keys) - 2:
cur[sub_key] = value
else:
cur = cur[sub_key]
return config
def check_gpu(use_gpu):
......@@ -204,20 +186,24 @@ def train(config,
model_type = None
algorithm = config['Architecture']['algorithm']
if 'start_epoch' in best_model_dict:
start_epoch = best_model_dict['start_epoch']
else:
start_epoch = 1
start_epoch = best_model_dict[
'start_epoch'] if 'start_epoch' in best_model_dict else 1
train_reader_cost = 0.0
train_run_cost = 0.0
total_samples = 0
reader_start = time.time()
max_iter = len(train_dataloader) - 1 if platform.system(
) == "Windows" else len(train_dataloader)
for epoch in range(start_epoch, epoch_num + 1):
train_dataloader = build_dataloader(
config, 'Train', device, logger, seed=epoch)
train_reader_cost = 0.0
train_run_cost = 0.0
total_samples = 0
reader_start = time.time()
max_iter = len(train_dataloader) - 1 if platform.system(
) == "Windows" else len(train_dataloader)
if train_dataloader.dataset.need_reset:
train_dataloader = build_dataloader(
config, 'Train', device, logger, seed=epoch)
max_iter = len(train_dataloader) - 1 if platform.system(
) == "Windows" else len(train_dataloader)
for idx, batch in enumerate(train_dataloader):
profiler.add_profiler_step(profiler_options)
train_reader_cost += time.time() - reader_start
......@@ -239,10 +225,11 @@ def train(config,
else:
if model_type == 'table' or extra_input:
preds = model(images, data=batch[1:])
elif model_type == "kie":
elif model_type in ["kie", 'vqa']:
preds = model(batch)
else:
preds = model(images)
loss = loss_class(preds, batch)
avg_loss = loss['loss']
......@@ -256,6 +243,7 @@ def train(config,
optimizer.clear_grad()
train_run_cost += time.time() - train_start
global_step += 1
total_samples += len(images)
if not isinstance(lr_scheduler, float):
......@@ -285,12 +273,13 @@ def train(config,
(global_step > 0 and global_step % print_batch_step == 0) or
(idx >= len(train_dataloader) - 1)):
logs = train_stats.log()
strs = 'epoch: [{}/{}], iter: {}, {}, reader_cost: {:.5f} s, batch_cost: {:.5f} s, samples: {}, ips: {:.5f}'.format(
strs = 'epoch: [{}/{}], global_step: {}, {}, avg_reader_cost: {:.5f} s, avg_batch_cost: {:.5f} s, avg_samples: {}, ips: {:.5f}'.format(
epoch, epoch_num, global_step, logs, train_reader_cost /
print_batch_step, (train_reader_cost + train_run_cost) /
print_batch_step, total_samples,
print_batch_step, total_samples / print_batch_step,
total_samples / (train_reader_cost + train_run_cost))
logger.info(strs)
train_reader_cost = 0.0
train_run_cost = 0.0
total_samples = 0
......@@ -330,6 +319,7 @@ def train(config,
optimizer,
save_model_dir,
logger,
config,
is_best=True,
prefix='best_accuracy',
best_model_dict=best_model_dict,
......@@ -344,8 +334,7 @@ def train(config,
vdl_writer.add_scalar('EVAL/best_{}'.format(main_indicator),
best_model_dict[main_indicator],
global_step)
global_step += 1
optimizer.clear_grad()
reader_start = time.time()
if dist.get_rank() == 0:
save_model(
......@@ -353,6 +342,7 @@ def train(config,
optimizer,
save_model_dir,
logger,
config,
is_best=False,
prefix='latest',
best_model_dict=best_model_dict,
......@@ -364,6 +354,7 @@ def train(config,
optimizer,
save_model_dir,
logger,
config,
is_best=False,
prefix='iter_epoch_{}'.format(epoch),
best_model_dict=best_model_dict,
......@@ -401,19 +392,28 @@ def eval(model,
start = time.time()
if model_type == 'table' or extra_input:
preds = model(images, data=batch[1:])
elif model_type == "kie":
elif model_type in ["kie", 'vqa']:
preds = model(batch)
else:
preds = model(images)
batch = [item.numpy() for item in batch]
batch_numpy = []
for item in batch:
if isinstance(item, paddle.Tensor):
batch_numpy.append(item.numpy())
else:
batch_numpy.append(item)
# Obtain usable results from post-processing methods
total_time += time.time() - start
# Evaluate the results of the current batch
if model_type in ['table', 'kie']:
eval_class(preds, batch)
eval_class(preds, batch_numpy)
elif model_type in ['vqa']:
post_result = post_process_class(preds, batch_numpy)
eval_class(post_result, batch_numpy)
else:
post_result = post_process_class(preds, batch[1])
eval_class(post_result, batch)
post_result = post_process_class(preds, batch_numpy[1])
eval_class(post_result, batch_numpy)
pbar.update(1)
total_frame += len(images)
......@@ -479,9 +479,9 @@ def preprocess(is_train=False):
FLAGS = ArgsParser().parse_args()
profiler_options = FLAGS.profiler_options
config = load_config(FLAGS.config)
merge_config(FLAGS.opt)
config = merge_config(config, FLAGS.opt)
profile_dic = {"profiler_options": FLAGS.profiler_options}
merge_config(profile_dic)
config = merge_config(config, profile_dic)
if is_train:
# save_config
......@@ -503,13 +503,8 @@ def preprocess(is_train=False):
assert alg in [
'EAST', 'DB', 'SAST', 'Rosetta', 'CRNN', 'STARNet', 'RARE', 'SRN',
'CLS', 'PGNet', 'Distillation', 'NRTR', 'TableAttn', 'SAR', 'PSE',
'SEED', 'SDMGR'
'SEED', 'SDMGR', 'LayoutXLM', 'LayoutLM'
]
windows_not_support_list = ['PSE']
if platform.system() == "Windows" and alg in windows_not_support_list:
logger.warning('{} is not support in Windows now'.format(
windows_not_support_list))
sys.exit()
device = 'gpu:{}'.format(dist.ParallelEnv().dev_id) if use_gpu else 'cpu'
device = paddle.set_device(device)
......
......@@ -27,8 +27,6 @@ import yaml
import paddle
import paddle.distributed as dist
paddle.seed(2)
from ppocr.data import build_dataloader
from ppocr.modeling.architectures import build_model
from ppocr.losses import build_loss
......@@ -36,6 +34,7 @@ from ppocr.optimizer import build_optimizer
from ppocr.postprocess import build_post_process
from ppocr.metrics import build_metric
from ppocr.utils.save_load import load_model
from ppocr.utils.utility import set_seed
import tools.program as program
dist.get_world_size()
......@@ -97,7 +96,8 @@ def main(config, device, logger, vdl_writer):
# build metric
eval_class = build_metric(config['Metric'])
# load pretrain model
pre_best_model_dict = load_model(config, model, optimizer)
pre_best_model_dict = load_model(config, model, optimizer,
config['Architecture']["model_type"])
logger.info('train dataloader has {} iters'.format(len(train_dataloader)))
if valid_dataloader is not None:
logger.info('valid dataloader has {} iters'.format(
......@@ -145,5 +145,7 @@ def test_reader(config, device, logger):
if __name__ == '__main__':
config, device, logger, vdl_writer = program.preprocess(is_train=True)
seed = config['Global']['seed'] if 'seed' in config['Global'] else 1024
set_seed(seed)
main(config, device, logger, vdl_writer)
# test_reader(config, device, logger)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment