Commit f9b1a89a authored by HHL's avatar HHL
Browse files

v

parent 60e27226
import os
import torch
from .comm import get_rank, synchronize
def save_checkpoint(checkpoint, model, optimizer=None, best_metric=None, epoch=None):
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model = model.module
if get_rank() == 0:
if not os.path.exists(os.path.dirname(checkpoint)):
os.makedirs(os.path.dirname(checkpoint))
infos = dict()
infos['model_param'] = model.state_dict()
if optimizer is not None:
infos['opt_param'] = optimizer.state_dict()
if best_metric is not None:
infos['best_metric'] = best_metric
if epoch is not None:
infos['epoch'] = epoch
torch.save(infos, checkpoint)
synchronize()
def load_checkpoint(checkpoint, model, optimizer=None):
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model = model.module
checkpoint = torch.load(checkpoint, map_location='cpu')
model.load_state_dict(checkpoint['model_param'])
if (optimizer is not None) and ('opt_param' in checkpoint):
optimizer.load_state_dict(checkpoint['opt_param'])
if 'best_metric' in checkpoint:
best_metric = checkpoint['best_metric']
else:
best_metric = None
if 'epoch' in checkpoint:
epoch = checkpoint['epoch']
else:
epoch = None
return best_metric, epoch
\ No newline at end of file
"""
This file contains primitives for multi-gpu communication.
This is useful when doing distributed training.
"""
import os
import pickle
import torch
import torch.distributed as dist
def distributed():
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
distributed = num_gpus > 1
return distributed
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def get_local_rank():
if 'LOCAL_RANK' not in os.environ:
return get_rank()
else:
return int(os.environ['LOCAL_RANK'])
def is_main_process():
return get_rank() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.LongTensor([tensor.numel()]).to("cuda")
size_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda"))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that process with rank
0 has the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
import torch
from collections import defaultdict
from .comm import distributed, all_gather
def format_dict(res_dict):
res_strs = []
for key, val in res_dict.items():
res_strs.append('%s: %s' % (key, val))
return ', '.join(res_strs)
class Counter:
def __init__(self, cache_nums=1000):
self.cache_nums = cache_nums
self.reset()
def update(self, metric):
for key, val in metric.items():
if isinstance(val, torch.Tensor):
val = val.item()
self.metric_dict[key].append(val)
if self.cache_nums is not None:
self.metric_dict[key] = self.metric_dict[key][-1*self.cache_nums:]
def reset(self):
self.metric_dict = defaultdict(list)
def _sync(self):
metric_dicts = all_gather(self.metric_dict)
total_metric_dict = defaultdict(list)
for metric_dict in metric_dicts:
for key, val in metric_dict.items():
total_metric_dict[key].extend(val)
return total_metric_dict
def dict_mean(self, sync=True):
if sync and distributed():
metric_dict = self._sync()
else:
metric_dict = self.metric_dict
# res_dict = {key: '%.4f' % (sum(val)/len(val)) for key, val in metric_dict.items()}
res_dict = {key: round((sum(val)/len(val)), 4) for key, val in metric_dict.items()}
return res_dict
def format_mean(self, sync=True):
if sync and distributed():
metric_dict = self._sync()
else:
metric_dict = self.metric_dict
res_dict = {key: '%.4f' % (sum(val)/len(val)) for key, val in metric_dict.items()}
return format_dict(res_dict)
\ No newline at end of file
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import logging
import os
import sys
from .comm import get_rank
_default_logger = None
def __init_logger():
global _default_logger
if get_rank() == 0:
logger = logging.getLogger('default')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s")
if not any([isinstance(item, logging.StreamHandler) for item in logger.handlers]):
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
_default_logger = logger
__init_logger()
def setup_logger(name, save_dir, filename="log.txt"):
global _default_logger
# don't log results for the non-master process
if get_rank() == 0:
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s")
if not any([isinstance(item, logging.StreamHandler) for item in logger.handlers]):
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.handlers = [item for item in logger.handlers if not isinstance(item, logging.FileHandler)]
if save_dir:
log_path = os.path.join(save_dir, filename)
if not os.path.exists(os.path.dirname(log_path)):
os.makedirs(os.path.dirname(log_path))
fh = logging.FileHandler(log_path)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
_default_logger = logger
def info(*args, **kwargs):
if get_rank() == 0:
_default_logger.info(*args, **kwargs)
def error(*args, **kwargs):
if get_rank() == 0:
_default_logger.error(*args, **kwargs)
import torch
class AccMetric:
def __call__(self, preds, labels, labels_mask):
mask = (labels_mask != 0) & (labels != -1)
correct_nums = float(torch.sum((preds == labels) & mask).detach().cpu().item())
total_nums = max(float(torch.sum(mask).detach().cpu().item()), 1e-6)
return correct_nums, total_nums
class AccMulMetric:
def __call__(self, preds, labels, labels_mask):
mask = labels_mask != 0
correct_nums = float(torch.sum((preds == labels).min(1)[0] & mask).detach().cpu().item())
total_nums = max(float(torch.sum(mask).detach().cpu().item()), 1e-6)
return correct_nums, total_nums
\ No newline at end of file
import torch
from .comm import get_world_size
import torch.distributed as dist
class ModelSynchronizer:
bm_map = {
2: 0.65,
4: 0.75,
8: 0.875,
12: 0.8875,
16: 0.9,
32: 0.9
}
def __init__(self, model, sync_rate, bm=None, blr=1.0, rescale_grad=1.0):
if bm is None:
self.bm = self.bm_map[get_world_size()]
else:
self.bm = bm
self.blr = blr
self.model = model
self.sync_rate = sync_rate
self.rescale_grad = rescale_grad
self.count = 0
self.param_align()
self.momentums = dict()
self.global_params = dict()
for k, v in self.model.named_parameters():
temp = torch.zeros_like(v, requires_grad=False)
temp.copy_(v.data)
self.global_params[k] = v
self.momentums[k] = torch.zeros_like(v, requires_grad=False)
def param_align(self):
for v in self.model.parameters():
dist.broadcast_multigpu([v.data], src=0)
for k, v in self.model.named_buffers():
if 'num_batches_tracked' in k:
continue
dist.broadcast_multigpu([v.data], src=0)
def sync_params(self):
size = float(get_world_size())
for v in self.model.parameters():
dist.all_reduce(v.data, op=dist.ReduceOp.SUM)
v.data /= size
for k, v in self.model.named_buffers():
if 'num_batches_tracked' in k:
continue
dist.all_reduce(v.data, op=dist.ReduceOp.SUM)
v.data /= size
def __call__(self, final_align=False):
self.count += 1
if (self.count % self.sync_rate == 0) or final_align:
with torch.no_grad():
if final_align:
self.param_align()
else:
self.sync_params()
for k, v in self.model.named_parameters():
global_param = self.global_params[k]
momentum = self.momentums[k]
grad = v.data * self.rescale_grad - global_param
momentum *= self.bm
global_param -= momentum
momentum += self.blr * grad
global_param += (1.0 + self.bm) * momentum
v.detach().copy_(global_param.detach())
from collections import defaultdict
import time
import datetime
class TimeCounter:
def __init__(self, start_epoch, num_epochs, epoch_iters):
self.start_epoch = start_epoch
self.num_epochs = num_epochs
self.epoch_iters = epoch_iters
self.start_time = None
def reset(self):
self.start_time = time.time()
def step(self, epoch, batch):
used = time.time() - self.start_time
finished_batch_nums = (epoch - self.start_epoch) * self.epoch_iters + batch
batch_time_cost = used / finished_batch_nums
total = (self.num_epochs - self.start_epoch) * self.epoch_iters * batch_time_cost
left = total - used
return str(datetime.timedelta(seconds=left))
def format_table(table, padding=1):
table = [[str(subitem) for subitem in item] for item in table]
num_cols = max([len(item) for item in table])
cols_width = [0] * num_cols
for row in table:
for col_idx, cell in enumerate(row):
cols_width[col_idx] = max(cols_width[col_idx], len(cell))
string = '┌'
for col_idx in range(num_cols):
string += '─' * (padding * 2 + cols_width[col_idx])
if col_idx == num_cols - 1:
string += '┐'
else:
string += '┬'
string += '\n'
for row_idx, row in enumerate(table):
string += '│'
for col_idx in range(num_cols):
if col_idx < len(row):
word = row[col_idx]
else:
word = ''
col_width = cols_width[col_idx]
left_pad = (col_width - len(word))//2
right_pad = col_width - len(word) - left_pad
string += ' ' * (padding + left_pad)
string += word
string += ' ' * (padding + right_pad)
string += '│'
string += '\n'
if row_idx < len(table) - 1:
string += '├'
else:
string += '└'
for col_idx in range(num_cols):
string += '─' * (padding * 2 + cols_width[col_idx])
if col_idx == num_cols - 1:
if row_idx < len(table) - 1:
string += '┤'
else:
string += '┘'
else:
if row_idx < len(table) - 1:
string += '┼'
else:
string += '┴'
string += '\n'
return string
class TicTocCounter:
def __init__(self):
self.tics = dict()
self.seps = defaultdict(list)
def tic(self, name):
self.tics[name] = time.time()
def toc(self, name):
toc = time.time()
if name in self.tics:
self.seps[name].append(toc-self.tics[name])
def __repr__(self):
string = 'TicTocCount Result:\n'
infos = [['Name', 'Mean Time', 'Total Time']]
for key, val in self.seps.items():
mean = sum(val)/len(val)
total = sum(val)
infos.append([key, '%0.4f' % mean, '%0.4f' % total])
string += format_table(infos)
return string
def reset(self):
self.tics.clear()
self.seps.clear()
global_tictoc_counter = TicTocCounter()
import os
def get_filepaths(file_dir, ext='.pdf'):
'''
goal: 提取当前文件夹及其子文件夹下的所有'.ext'文件
param: file_dir, 需要提取的文件夹路径
param: ext, 需要提取的文件类型
output: 文件类型为ext的所有文件路径
'''
all_files = []
for root, dirs, files in os.walk(file_dir):
for file in files:
if os.path.splitext(file)[-1].lower() == ext:
all_files.append(root+"/"+file)
return all_files
def get_image_file_list(img_file):
imgs_lists = []
if img_file is None or not os.path.exists(img_file):
raise Exception("not found any img file in {}".format(img_file))
img_end = {'jpg', 'bmp', 'png', 'jpeg', 'rgb', 'tif', 'tiff', 'gif', 'GIF'}
if os.path.isfile(img_file) and imghdr.what(img_file) in img_end:
imgs_lists.append(img_file)
elif os.path.isdir(img_file):
for single_file in os.listdir(img_file):
file_path = os.path.join(img_file, single_file)
if os.path.isfile(file_path) and imghdr.what(file_path) in img_end:
imgs_lists.append(file_path)
if len(imgs_lists) == 0:
raise Exception("not found any img file in {}".format(img_file))
imgs_lists = sorted(imgs_lists)
return imgs_lists
import numpy as np
class DocTypeVocab:
key_words = [
'letter', 'form', 'email', 'handwritten', 'advertisement', 'scientific report', \
'scientific publication', 'specification', 'file folder', 'news article', \
'budget', 'invoice', 'presentation', 'questionnaire', 'resume', 'memo', 'docbank' ]
def __init__(self):
self._words_ids_map = dict()
self._ids_words_map = dict()
for word_id, word in enumerate(self.key_words):
self._words_ids_map[word] = word_id
self._ids_words_map[word_id] = word
def __len__(self):
return len(self._words_ids_map)
def word_to_id(self, word):
return self._words_ids_map[word]
def words_to_ids(self, words):
return [self.word_to_id(word) for word in words]
def id_to_word(self, word_id):
return self._ids_words_map[word_id]
def ids_to_words(self, words_id):
return [self.id_to_word(word_id) for word_id in words_id]
class FunsdTokenTypeVocab(DocTypeVocab):
key_words = ["O", "B-HEADER", "I-HEADER", "B-QUESTION", "I-QUESTION", "B-ANSWER", "I-ANSWER"]
class FunsdEntityVocab(DocTypeVocab):
key_words = ["OTHER", "HEADER", "QUESTION", "ANSWER"]
class HuaweikieEntityVocab(DocTypeVocab):
key_words = ["DATE", "COMPANY", "TAX", "TOTAL", "NAME", "CNT", "PRICE", "OTHER", "PRICE&CNT", "CNT&NAME"]
class CordEntityVocab(DocTypeVocab):
key_words = ['OTHER', 'MENU_CNT', 'MENU_UNITPRICE', 'MENU_NM', 'MENU_NUM', 'MENU_PRICE', 'MENU_DISCOUNTPRICE', \
'MENU_ITEMSUBTOTAL', 'MENU_ETC', 'MENU_SUB_CNT', 'MENU_SUB_ETC', 'MENU_SUB_NM', 'MENU_SUB_PRICE', 'MENU_SUB_UNITPRICE', \
'MENU_VATYN', 'SUB_TOTAL_DISCOUNT_PRICE', 'SUB_TOTAL_ETC', 'SUB_TOTAL_OTHERSVC_PRICE', 'SUB_TOTAL_SERVICE_PRICE', 'SUB_TOTAL_SUBTOTAL_PRICE', \
'SUB_TOTAL_TAX_PRICE', 'TOTAL_CASHPRICE', 'TOTAL_CHANGEPRICE', 'TOTAL_CREDITCARDPRICE', 'TOTAL_EMONEYPRICE', 'TOTAL_MENUQTY_CNT', 'TOTAL_MENUTYPE_CNT', \
'TOTAL_TOTAL_ETC', 'TOTAL_TOTAL_PRICE', 'VOID_MENU_NM', 'VOID_MENU_PRICE']
class SroieEntityVocab(DocTypeVocab):
key_words = ['O', 'COMPANY', 'ADDRESS', 'DATE', 'TOTAL']
\ No newline at end of file
import cv2
import json
import torch
import numpy as np
import sys
sys.path.append('./')
from layoutlmft.models.graphdoc.configuration_graphdoc import GraphDocConfig
from layoutlmft.models.graphdoc.modeling_graphdoc import GraphDocForEncode
from transformers import AutoModel, AutoTokenizer
def read_ocr(json_path):
ocr_info = json.load(open(json_path, 'r'))
polys = []
contents = []
for info in ocr_info:
contents.append(info['label'])
polys.append(info['points'])
return polys, contents
def polys2bboxes(polys):
bboxes = []
for poly in polys:
poly = np.array(poly).reshape(-1)
x1 = poly[0::2].min()
y1 = poly[1::2].min()
x2 = poly[0::2].max()
y2 = poly[1::2].max()
bboxes.append([x1, y1, x2, y2])
bboxes = np.array(bboxes).astype('int64')
return bboxes
def mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0] #First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
def extract_sentence_embeddings(contents, tokenizer, sentence_bert):
encoded_input = tokenizer(contents, padding=True, truncation=True, return_tensors='pt')
encoded_input= encoded_input.to(sentence_bert.device)
with torch.no_grad():
model_output = sentence_bert(**encoded_input)
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']).cpu().numpy()
return sentence_embeddings
def merge2d(tensors, pad_id):
dim1 = max([s.shape[0] for s in tensors])
dim2 = max([s.shape[1] for s in tensors])
out = tensors[0].new(len(tensors), dim1, dim2).fill_(pad_id)
for i, s in enumerate(tensors):
out[i, :s.shape[0], :s.shape[1]] = s
return out
def merge3d(tensors, pad_id):
dim1 = max([s.shape[0] for s in tensors])
dim2 = max([s.shape[1] for s in tensors])
dim3 = max([s.shape[2] for s in tensors])
out = tensors[0].new(len(tensors), dim1, dim2, dim3).fill_(pad_id)
for i, s in enumerate(tensors):
out[i, :s.shape[0], :s.shape[1], :s.shape[2]] = s
return out
def mask1d(tensors, pad_id):
lengths= [len(s) for s in tensors]
out = tensors[0].new(len(tensors), max(lengths)).fill_(pad_id)
for i, s in enumerate(tensors):
out[i,:len(s)] = 1
return out
model_name_or_path = 'pretrained_model/graphdoc'
sentence_model_path = 'pretrained_model/sentence-bert'
image_path = 'samples/001.jpg'
ocr_path = 'samples/001.json'
# init model
config = GraphDocConfig.from_pretrained(model_name_or_path)
graphdoc = GraphDocForEncode.from_pretrained(model_name_or_path, config=config)
graphdoc = graphdoc.cuda().eval()
tokenizer = AutoTokenizer.from_pretrained(sentence_model_path)
sentence_bert = AutoModel.from_pretrained(sentence_model_path)
sentence_bert = sentence_bert.cuda().eval()
# prepare input data
input_H = 512; input_W = 512
image = cv2.imread(image_path)
H, W = image.shape[:2]
ratio_H = input_H / H; ratio_W = input_W / W
image = cv2.resize(image, dsize=(input_W, input_H))
polys, contents = read_ocr(ocr_path)
bboxes = polys2bboxes(polys)
bboxes[:, 0::2] = bboxes[:, 0::2] * ratio_W
bboxes[:, 1::2] = bboxes[:, 1::2] * ratio_H
sentence_embeddings = extract_sentence_embeddings(contents, tokenizer, sentence_bert)
# append global node
global_bbox = np.array([0, 0, 512,512]).astype('int64')
bboxes = np.concatenate([global_bbox[None, :], bboxes], axis=0)
global_embed = np.zeros_like(sentence_embeddings[0])
sentence_embeddings = np.concatenate([global_embed[None, :], sentence_embeddings], axis=0)
input_images = merge3d([torch.from_numpy(image.transpose(2,0,1).astype(np.float32))], 0).cuda()
input_embeds = merge2d([torch.from_numpy(sentence_embeddings)], 0).cuda()
attention_mask = mask1d([torch.from_numpy(sentence_embeddings)], 0).cuda()
input_bboxes = merge2d([torch.from_numpy(bboxes)], 0).cuda()
input_data=dict(image=input_images, inputs_embeds=input_embeds, attention_mask=attention_mask, bbox=input_bboxes, return_dict=True)
output = graphdoc(**input_data)
print(output)
\ No newline at end of file
[{"label": "Nusr-Et", "points": [[420.24811887381634, 452.6062301028318], [633.7386395381061, 448.8499058420508], [631.85766578776, 505.1947697537657], [421.1886057489895, 506.1338508189608]]}, {"label": "Miami", "points": [[645.9649689153563, 445.09358158126975], [819.9550408223764, 438.52001412490307], [821.8360145727224, 488.5236964274483], [643.143508289837, 500.4993644277893]]}, {"label": "Miami,", "points": [[489.83292529225054, 556.1694901534349], [590.3390895938634, 555.2571635091425], [591.2527819966053, 589.0132493479612], [490.7466176949925, 592.6625559251307]]}, {"label": "FL", "points": [[611.4007253023769, 556.0079043231648], [651.1022920938441, 554.7690848810543], [652.3429660560774, 585.1201612127597], [612.6413992646103, 586.3589806548703]]}, {"label": "33131", "points": [[659.7870098294775, 553.530265438944], [744.7731762424618, 547.9555779494469], [745.3935132235785, 580.7842931653732], [659.1666728483608, 584.5007514917046]]}, {"label": "40", "points": [[359.40178177889965, 676.5885137190734], [395.7437348829449, 678.6425305287446], [396.429432111323, 706.714093594249], [357.34469009376505, 707.3987658641394]]}, {"label": "David", "points": [[405.34349608023985, 676.5885137190734], [491.7413468558943, 675.2191691792929], [491.7413468558943, 707.3987658641394], [405.3434960802398, 708.76811040392]]}, {"label": "P", "points": [[501.3411080531893, 676.5885137190734], [523.9691165896702, 677.2731859889639], [522.5977221329138, 704.6600767845779], [499.9697135964328, 706.714093594249]]}, {"label": "--------------------------------------", "points": [[326.4883148167455, 724.515572611398], [881.2173725747178, 708.0834381340296], [881.2173725747178, 721.7768835318367], [325.80261758836735, 734.100984389863]]}, {"label": "11’26’21", "points": [[475.28461337481724, 752.5871356769024], [592.5388394274912, 751.9024634070121], [592.5388394274912, 782.0280432821876], [475.97031060319546, 783.3973878219682]]}, {"label": "09:09PM", "points": [[645.1196471489627, 749.6499937124789], [775.256639870901, 745.1470368982017], [773.9681547944459, 776.6677345981418], [644.4754046107349, 779.2408527777287]]}, {"label": "--------------------------------------", "points": [[328.54540650188017, 802.568211378898], [879.8459781179614, 788.8747659810911], [880.5316753463394, 801.1988668391173], [327.85970927350195, 814.8923122369243]]}, {"label": "1", "points": [[345.00213998295726, 859.3960097797973], [361.45887346403435, 858.0266652400164], [360.77317623565597, 886.0982283055208], [347.74492889647, 886.0982283055208]]}, {"label": "Shrimp", "points": [[417.68604619104764, 835.4324803336349], [524.6548138180484, 836.8018248734155], [524.6548138180484, 871.7201106378236], [419.7431378761822, 866.927404748591]]}, {"label": "with", "points": [[530.8260888734521, 834.7478080637446], [591.853142199113, 833.3784635239638], [591.1674449707349, 863.5040433991392], [530.8260888734521, 864.87338793892]]}, {"label": "Tender", "points": [[598.024417254517, 830.6397744444023], [700.8790015112484, 830.6397744444023], [702.1296958455293, 860.9679477744381], [600.7672061680299, 862.8193711292488]]}, {"label": "$12", "points": [[759.1446837963579, 845.6270097272072], [818.4693799161158, 846.19116229334], [819.0343770220182, 875.5270957322454], [759.7096809022603, 878.9120111290422]]}, {"label": "Green", "points": [[420.14642025488405, 874.39879059998], [512.2409485169845, 875.5270957322454], [513.9359398346917, 901.478113774354], [421.27641446668895, 904.298876605018]]}, {"label": "1", "points": [[349.17822838229046, 934.7158615419929], [361.72974943403386, 934.7158615419929], [363.07455526100637, 962.0193731700325], [348.72995977329964, 962.0193731700325]]}, {"label": "Pepper", "points": [[421.79774303880566, 913.2311310805845], [529.382209196606, 916.36432093954], [528.9339405876153, 945.4582267726972], [420.4529372118332, 945.9058253239764]]}, {"label": "Salt", "points": [[537.4510441584412, 909.2027441190705], [594.8294261092681, 908.3075470165121], [594.8294261092681, 937.401452849669], [537.0027755494503, 938.744248503507]]}, {"label": "Fresh", "points": [[601.5534552441306, 908.7551455677913], [685.3796851254168, 906.5171528113945], [685.8279537344076, 936.9538542983895], [601.1051866351398, 936.058657195831]]}, {"label": "Squid", "points": [[422.6942802567874, 949.9342122854904], [509.2101217920186, 949.0390151829316], [509.65839040100946, 981.7137094263235], [421.7977430388057, 985.7420963878375]]}, {"label": "$15", "points": [[762.9301544808312, 921.2879050036127], [817.618924777713, 922.6307006574502], [816.2741189507407, 955.7529934521222], [762.0336172628495, 954.4101977982839]]}, {"label": "Subtotal", "points": [[457.44312831451543, 1105.174196543061], [581.2204949484362, 1095.8464721544822], [580.4420209444493, 1132.380059343082], [459.00007632248924, 1137.8212319030865]]}, {"label": "$27", "points": [[702.6624395703964, 1098.1784032516268], [757.9340938534677, 1101.2876447144865], [754.8201978375203, 1133.934680074512], [701.8839655664093, 1133.934680074512]]}, {"label": "Tax", "points": [[485.7520744928464, 1142.4179942629785], [537.0299072699112, 1141.70686670329], [539.8786757575258, 1170.151969090833], [485.7520744928464, 1169.4408415311445]]}, {"label": "$0.3", "points": [[704.3950559172756, 1137.4401013451586], [767.0679626447993, 1138.8623564645359], [767.7801547667029, 1170.8630966505216], [705.1072480391794, 1170.8630966505216]]}, {"label": "Total", "points": [[445.1571235443366, 1182.2411376055388], [517.8007199785118, 1178.685499807096], [519.9372963442229, 1207.8417297543276], [450.85466051956615, 1210.6862399930817]]}, {"label": "Due", "points": [[550.5615575860811, 1172.9964793295874], [608.9613115821828, 1175.1298620086532], [608.249119460279, 1200.0193265977532], [550.5615575860811, 1204.9972195155733]]}, {"label": "$27.3", "points": [[703.6828637953721, 1175.1298620086532], [784.1605735704876, 1178.685499807096], [786.2971499361984, 1211.3973675527704], [703.682863795372, 1210.6862399930817]]}, {"label": "Thank", "points": [[402.1162651965782, 1309.1170092001926], [498.12339271866495, 1304.0042828476462], [498.12339271866495, 1330.2070054044466], [408.5167403647171, 1338.5151857273347]]}, {"label": "you", "points": [[505.1639154036181, 1305.2824644357831], [558.287859299173, 1301.447919671373], [558.9279068159868, 1328.9288238163101], [505.803962920432, 1335.9588225510613]]}, {"label": "for", "points": [[565.9684295009399, 1294.4179209366218], [605.0113280265887, 1294.4179209366218], [606.2914230602164, 1324.4551882578319], [566.6084770177538, 1326.3724606400367]]}, {"label": "dining", "points": [[611.4118031947278, 1296.3351933188267], [693.3378853469085, 1301.447919671373], [691.417742796467, 1340.432458109539], [611.4118031947278, 1332.1242777866514]]}, {"label": "with", "points": [[699.0983129982337, 1305.2824644357831], [759.2627795787421, 1307.1997368179877], [757.3426370283, 1339.1542765214028], [700.3784080318617, 1337.237004139198]]}, {"label": "us!", "points": [[764.3831597132529, 1311.0342815823979], [811.7466759574827, 1311.0342815823979], [811.1066284406688, 1342.9888212858125], [766.3033022636948, 1341.071548903608]]}, {"label": "A", "points": [[391.8755049275553, 1347.4624568442907], [416.1973105664841, 1346.8233660502226], [418.1174531169258, 1373.026088607023], [392.51555244436935, 1373.665179401091]]}, {"label": "Service", "points": [[425.15797580187893, 1344.2670028739492], [532.6859586266164, 1336.5979133451297], [533.3260061434303, 1365.9960898722716], [423.23783325143705, 1371.7479070188863]]}, {"label": "Charge", "points": [[542.926718895639, 1334.6806409629246], [643.4141790354234, 1335.9588225510613], [643.4141790354234, 1372.3869978129542], [541.0065763451972, 1367.9133622544764]]}, {"label": "has", "points": [[651.7347967540043, 1340.432458109539], [700.3784080318616, 1344.2670028739492], [699.7383605150478, 1374.3042701951595], [651.7347967540043, 1370.4697254307496]]}, {"label": "been", "points": [[708.0589782336287, 1346.1842752561545], [781.0243951504146, 1353.2142739909054], [780.3843476336009, 1381.973359723979], [708.6990257504426, 1376.2215425773643]]}, {"label": "added", "points": [[789.3450128689957, 1353.2142739909054], [885.9921879078964, 1360.883363519725], [884.072045357455, 1392.8379032231398], [788.0649178353677, 1386.4469952824568]]}, {"label": "for", "points": [[405.9565502974615, 1380.6951781358425], [448.83973392399366, 1379.4169965477058], [448.1996864071798, 1406.2588098985746], [407.23664533108933, 1410.7324454570526]]}, {"label": "your", "points": [[454.60016157531896, 1381.973359723979], [519.8850082903383, 1375.5824517832964], [519.8850082903383, 1404.34153751637], [454.60016157531896, 1411.371536251121]]}, {"label": "convenience.", "points": [[528.8456735257329, 1371.1088162248177], [709.9791207840702, 1383.2515413121153], [707.4189307168148, 1419.6797165740093], [525.0053884248495, 1401.1460835460277]]}]
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment