Commit dad42587 authored by myhloli's avatar myhloli
Browse files

Merge remote-tracking branch 'origin/master'

parents 4d6dcb00 351078f1
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
name: mineru
on:
push:
branches:
- "master"
paths-ignore:
- "cmds/**"
- "**.md"
pull_request:
branches:
- "master"
paths-ignore:
- "cmds/**"
- "**.md"
workflow_dispatch:
jobs:
pdf-test:
runs-on: ubuntu-latest
timeout-minutes: 180
strategy:
fail-fast: true
steps:
- name: PDF benchmark
uses: actions/checkout@v3
with:
fetch-depth: 2
- name: check-requirements
run: |
pip install -r requirements.txt
pip install -r requirements-qa.txt
- name: get-benchmark-result
run: |
echo "start test"
echo $GITHUB_WORKSPACE
tree
cd $GITHUB_WORKSPACE && python tests/benchmark/benchmark.py
...@@ -33,6 +33,7 @@ jobs: ...@@ -33,6 +33,7 @@ jobs:
run: | run: |
pip install -r requirements.txt pip install -r requirements.txt
pip install -r requirements-qa.txt pip install -r requirements-qa.txt
pip install magic-pdf
- name: test_cli - name: test_cli
run: | run: |
cp magic-pdf.template.json ~/magic-pdf.json cp magic-pdf.template.json ~/magic-pdf.json
...@@ -40,3 +41,6 @@ jobs: ...@@ -40,3 +41,6 @@ jobs:
cd $GITHUB_WORKSPACE && export PYTHONPATH=. && pytest -s -v tests/test_unit.py cd $GITHUB_WORKSPACE && export PYTHONPATH=. && pytest -s -v tests/test_unit.py
cd $GITHUB_WORKSPACE && pytest -s -v tests/test_cli/test_cli.py cd $GITHUB_WORKSPACE && pytest -s -v tests/test_cli/test_cli.py
- name: benchmark
run: |
cd $GITHUB_WORKSPACE && pytest -s -v tests/test_cli/test_bench.py
{
"temp-output-dir": "/tmp/"
}
...@@ -2,6 +2,6 @@ import os ...@@ -2,6 +2,6 @@ import os
conf = { conf = {
"code_path": os.environ.get('GITHUB_WORKSPACE'), "code_path": os.environ.get('GITHUB_WORKSPACE'),
"pdf_dev_path" : os.environ.get('GITHUB_WORKSPACE') + "/tests/test_cli/pdf_dev", "pdf_dev_path" : os.environ.get('GITHUB_WORKSPACE') + "/tests/test_cli/pdf_dev",
"pdf_res_path": "/tmp" "pdf_res_path": "/tmp/magic-pdf"
} }
"""
calculate_score
"""
import os
import re
import json
from Levenshtein import distance
from lib import scoring
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
from nltk.tokenize import word_tokenize
import nltk
nltk.download('punkt')
class Scoring:
"""
calculate_score
"""
def __init__(self, result_path):
"""
init
"""
self.edit_distances = []
self.bleu_scores = []
self.sim_scores = []
self.filenames = []
self.score_dict = {}
self.anntion_cnt = 0
self.fw = open(result_path, "w+", encoding='utf-8')
def simple_bleu_score(self, candidate, reference):
"""
get bleu score
"""
candidate_tokens = word_tokenize(candidate)
reference_tokens = word_tokenize(reference)
return sentence_bleu([reference_tokens], candidate_tokens, smoothing_function=SmoothingFunction().method1)
def preprocess_string(self, s):
"""
preprocess_string
"""
sub_enter = re.sub(r'\n+', '\n', s)
return re.sub(r' ', ' ', sub_enter)
def calculate_similarity(self, annotion, actual, tool_type):
"""
calculate_similarity
"""
class_dict = {}
edit_distances = []
bleu_scores = []
sim_scores = list()
total_file = 0
for filename in os.listdir(annotion):
if filename.endswith('.md') and not filename.startswith('.'):
total_file = total_file + 1
with open(os.path.join(annotion, filename), 'r', encoding='utf-8') as file_a:
content_a = file_a.read()
self.anntion_cnt = self.anntion_cnt + 1
filepath_b = os.path.join(actual, filename)
if os.path.exists(filepath_b):
with open(filepath_b, 'r', encoding='utf-8') as file_b:
content_b = file_b.read()
self.filenames.append(filename)
edit_dist = distance(self.preprocess_string(content_b),self.preprocess_string(content_a)) / max(len(content_a), len(content_b))
self.edit_distances.append(edit_dist)
edit_distances.append(edit_dist)
bleu_score = self.simple_bleu_score(content_b, content_a)
bleu_scores.append(bleu_score)
self.bleu_scores.append(bleu_score)
score = scoring.score_text(content_b, content_a)
sim_scores.append(score)
self.sim_scores.append(score)
class_dict[filename] = {"edit_dist": edit_dist, "bleu_score": bleu_score, "sim_score": score}
self.score_dict[filename] = {"edit_dist": edit_dist, "bleu_score": bleu_score, "sim_score": score}
else:
print(f"File {filename} not found in actual directory.")
class_average_edit_distance = sum(edit_distances) / len(edit_distances) if edit_distances else 0
class_average_bleu_score = sum(bleu_scores) / len(bleu_scores) if bleu_scores else 0
class_average_sim_score = sum(sim_scores) / len(sim_scores) if sim_scores else 0
self.fw.write(json.dumps(class_dict, ensure_ascii=False) + "\n")
ratio = len(class_dict)/total_file
self.fw.write(f"{tool_type} extract ratio: {ratio}" + "\n")
self.fw.write(f"{tool_type} Average Levenshtein Distance: {class_average_edit_distance}" + "\n")
self.fw.write(f"{tool_type} Average BLEU Score: {class_average_bleu_score}" + "\n")
self.fw.write(f"{tool_type} Average Sim Score: {class_average_sim_score}" + "\n")
print (f"{tool_type} extract ratio: {ratio}")
print (f"{tool_type} Average Levenshtein Distance: {class_average_edit_distance}")
print (f"{tool_type} Average BLEU Score: {class_average_bleu_score}")
print (f"{tool_type} Average Sim Score: {class_average_sim_score}")
return self.score_dict
def summary_scores(self):
"""
calculate the average of edit distance, bleu score and sim score
"""
over_all_dict = dict()
average_edit_distance = sum(self.edit_distances) / len(self.edit_distances) if self.edit_distances else 0
average_bleu_score = sum(self.bleu_scores) / len(self.bleu_scores) if self.bleu_scores else 0
average_sim_score = sum(self.sim_scores) / len(self.sim_scores) if self.sim_scores else 0
over_all_dict["average_edit_distance"] = average_edit_distance
over_all_dict["average_bleu_score"] = average_bleu_score
over_all_dict["average_sim_score"] = average_sim_score
self.fw.write(json.dumps(over_all_dict, ensure_ascii=False) + "\n")
return over_all_dict
def calculate_similarity_total(self, tool_type, download_dir):
"""
calculate the average of edit distance, bleu score and sim score
"""
annotion = os.path.join(download_dir, "annotations", "cleaned")
actual = os.path.join(download_dir, tool_type, "cleaned")
score = self.calculate_similarity(annotion, actual, tool_type)
return score
import subprocess import subprocess
import os import os
def check_shell(cmd): def check_shell(cmd):
"""
shell successful
"""
res = os.system(cmd) res = os.system(cmd)
assert res == 0 assert res == 0
def count_folders_and_check_contents(directory): def count_folders_and_check_contents(file_path):
# 获取目录下的所有文件和文件夹 """"
contents = os.listdir(directory) 获取文件夹大小
folder_count = 0 """
for item in contents: if os.path.exists(file_path):
# 检查是否为文件夹 folder_count = os.path.getsize(file_path)
if os.path.isdir(os.path.join(directory, item)): assert folder_count > 0
# 检查文件夹是否为空
folder_path = os.path.join(directory, item)
for folder in os.listdir(folder_path):
folder_count = folder_count + 1
assert os.listdir(folder_path) is not None
print (folder_count)
assert folder_count == 14
if __name__ == "__main__": if __name__ == "__main__":
......
"""
clean data
"""
import argparse
import os
import re
import htmltabletomd # type: ignore
import pypandoc
import argparse
parser = argparse.ArgumentParser(description="get tool type")
parser.add_argument(
"--tool_name",
type=str,
required=True,
help="input tool name",
)
parser.add_argument(
"--download_dir",
type=str,
required=True,
help="input download dir",
)
args = parser.parse_args()
def clean_markdown_images(content):
"""
clean markdown images
"""
pattern = re.compile(r'!\[[^\]]*\]\([^)]*\)', re.IGNORECASE)
cleaned_content = pattern.sub('', content)
return cleaned_content
def clean_ocrmath_photo(content):
"""
clean ocrmath photo
"""
pattern = re.compile(r'\\includegraphics\[.*?\]\{.*?\}', re.IGNORECASE)
cleaned_content = pattern.sub('', content)
return cleaned_content
def convert_html_table_to_md(html_table):
"""
convert html table to markdown table
"""
lines = html_table.strip().split('\n')
md_table = ''
if lines and '<tr>' in lines[0]:
in_thead = True
for line in lines:
if '<th>' in line:
cells = re.findall(r'<th>(.*?)</th>', line)
md_table += '| ' + ' | '.join(cells) + ' |\n'
in_thead = False
elif '<td>' in line and not in_thead:
cells = re.findall(r'<td>(.*?)</td>', line)
md_table += '| ' + ' | '.join(cells) + ' |\n'
md_table = md_table.rstrip() + '\n'
return md_table
def convert_latext_to_md(content):
"""
convert latex table to markdown table
"""
tables = re.findall(r'\\begin\{tabular\}(.*?)\\end\{tabular\}', content, re.DOTALL)
placeholders = []
for table in tables:
placeholder = f"<!-- TABLE_PLACEHOLDER_{len(placeholders)} -->"
replace_str = f"\\begin{{tabular}}{table}cl\\end{{tabular}}"
content = content.replace(replace_str, placeholder)
try:
pypandoc.convert_text(replace_str, format="latex", to="md", outputfile="output.md", encoding="utf-8")
except:
markdown_string = replace_str
else:
markdown_string = open('output.md', 'r', encoding='utf-8').read()
placeholders.append((placeholder, markdown_string))
new_content = content
for placeholder, md_table in placeholders:
new_content = new_content.replace(placeholder, md_table)
# 写入文件
return new_content
def convert_htmltale_to_md(content):
"""
convert html table to markdown table
"""
tables = re.findall(r'<table>(.*?)</table>', content, re.DOTALL)
placeholders = []
for table in tables:
placeholder = f"<!-- TABLE_PLACEHOLDER_{len(placeholders)} -->"
content = content.replace(f"<table>{table}</table>", placeholder)
try:
convert_table = htmltabletomd.convert_table(table)
except:
convert_table = table
placeholders.append((placeholder,convert_table))
new_content = content
for placeholder, md_table in placeholders:
new_content = new_content.replace(placeholder, md_table)
# 写入文件
return new_content
def clean_data(prod_type, download_dir):
"""
clean data
"""
tgt_dir = os.path.join(download_dir, prod_type, "cleaned")
if not os.path.exists(tgt_dir):
os.makedirs(tgt_dir)
source_dir = os.path.join(download_dir, prod_type)
filenames = os.listdir(source_dir)
for filename in filenames:
if filename.endswith('.md'):
input_file = os.path.join(source_dir, filename)
output_file = os.path.join(tgt_dir, "cleaned_" + filename)
with open(input_file, 'r', encoding='utf-8') as fr:
content = fr.read()
new_content = clean_markdown_images(content)
with open(output_file, 'w', encoding='utf-8') as fw:
fw.write(new_content)
if __name__ == '__main__':
tool_type = args.tool_name
download_dir = args.download_dir
clean_data(tool_type, download_dir)
"""
Calculate simscore, refer to (https://github.com/VikParuchuri/marker?tab=readme-ov-file)
"""
import math
from rapidfuzz import fuzz
import re
import regex
from statistics import mean
CHUNK_MIN_CHARS = 25
def chunk_text(text, chunk_len=500):
chunks = [text[i:i+chunk_len] for i in range(0, len(text), chunk_len)]
chunks = [c for c in chunks if c.strip() and len(c) > CHUNK_MIN_CHARS]
return chunks
def overlap_score(hypothesis_chunks, reference_chunks):
if len(reference_chunks) > 0:
length_modifier = len(hypothesis_chunks) / len(reference_chunks)
else:
length_modifier = 0
search_distance = max(len(reference_chunks) // 5, 10)
chunk_scores = []
for i, hyp_chunk in enumerate(hypothesis_chunks):
max_score = 0
total_len = 0
i_offset = int(i * length_modifier)
chunk_range = range(max(0, i_offset-search_distance), min(len(reference_chunks), i_offset+search_distance))
for j in chunk_range:
ref_chunk = reference_chunks[j]
score = fuzz.ratio(hyp_chunk, ref_chunk, score_cutoff=30) / 100
if score > max_score:
max_score = score
total_len = len(ref_chunk)
chunk_scores.append(max_score)
return chunk_scores
def score_text(hypothesis, reference):
# Returns a 0-1 alignment score
hypothesis_chunks = chunk_text(hypothesis)
reference_chunks = chunk_text(reference)
chunk_scores = overlap_score(hypothesis_chunks, reference_chunks)
if len(chunk_scores) > 0:
mean_score = mean(chunk_scores)
return mean_score
else:
return 0
#return mean(chunk_scores)
\ No newline at end of file
{
"bucket_info":{
"bucket-name-1":["ak", "sk", "endpoint"],
"bucket-name-2":["ak", "sk", "endpoint"]
},
"temp-output-dir":"/tmp",
"models-dir":"/tmp/models",
"device-mode":"cpu"
}
\ No newline at end of file
[{"layout_dets": [{"category_id": 2, "poly": [87.20602416992188, 74.71974182128906, 199.11016845703125, 74.71974182128906, 199.11016845703125, 186.71868896484375, 87.20602416992188, 186.71868896484375], "score": 0.999997615814209}, {"category_id": 1, "poly": [88.9511489868164, 2102.3251953125, 239.1959686279297, 2102.3251953125, 239.1959686279297, 2129.5693359375, 88.9511489868164, 2129.5693359375], "score": 0.9999799728393555}, {"category_id": 1, "poly": [116.55731964111328, 1038.072509765625, 404.0063781738281, 1038.072509765625, 404.0063781738281, 1999.6717529296875, 116.55731964111328, 1999.6717529296875], "score": 0.9999767541885376}, {"category_id": 1, "poly": [118.98762512207031, 409.9966735839844, 241.70457458496094, 409.9966735839844, 241.70457458496094, 497.07415771484375, 118.98762512207031, 497.07415771484375], "score": 0.999961256980896}, {"category_id": 1, "poly": [530.2610473632812, 991.6522827148438, 656.4911499023438, 991.6522827148438, 656.4911499023438, 1020.6025390625, 530.2610473632812, 1020.6025390625], "score": 0.9999524354934692}, {"category_id": 1, "poly": [85.32998657226562, 2012.22607421875, 629.1658325195312, 2012.22607421875, 629.1658325195312, 2086.741943359375, 85.32998657226562, 2086.741943359375], "score": 0.9961878657341003}, {"category_id": 3, "poly": [88.28943634033203, 515.3642578125, 1571.0567626953125, 515.3642578125, 1571.0567626953125, 770.622314453125, 88.28943634033203, 770.622314453125], "score": 0.9950484037399292}, {"category_id": 1, "poly": [89.6951675415039, 355.63616943359375, 375.6934814453125, 355.63616943359375, 375.6934814453125, 385.2062072753906, 89.6951675415039, 385.2062072753906], "score": 0.968115508556366}, {"category_id": 4, "poly": [89.67754364013672, 769.5524291992188, 604.6519165039062, 769.5524291992188, 604.6519165039062, 797.0578002929688, 89.67754364013672, 797.0578002929688], "score": 0.9242470264434814}, {"category_id": 1, "poly": [88.6801986694336, 2160.639892578125, 1512.985595703125, 2160.639892578125, 1512.985595703125, 2237.59375, 88.6801986694336, 2237.59375], "score": 0.7926853895187378}, {"category_id": 2, "poly": [88.54387664794922, 2160.384521484375, 1513.3607177734375, 2160.384521484375, 1513.3607177734375, 2237.283935546875, 88.54387664794922, 2237.283935546875], "score": 0.31747689843177795}, {"category_id": 2, "poly": [89.85008239746094, 355.71478271484375, 374.5365295410156, 355.71478271484375, 374.5365295410156, 385.2820129394531, 89.85008239746094, 385.2820129394531], "score": 0.2773781418800354}, {"category_id": 15, "poly": [349.0, 319.0, 376.0, 319.0, 376.0, 339.0, 349.0, 339.0], "score": 0.99, "text": "Go"}, {"category_id": 15, "poly": [143.0, 412.0, 207.0, 412.0, 207.0, 439.0, 143.0, 439.0], "score": 1.0, "text": "Home"}, {"category_id": 15, "poly": [140.0, 436.0, 229.0, 436.0, 229.0, 470.0, 140.0, 470.0], "score": 0.88, "text": " Journals"}, {"category_id": 15, "poly": [138.0, 465.0, 239.0, 465.0, 239.0, 499.0, 138.0, 499.0], "score": 1.0, "text": "About Us"}, {"category_id": 15, "poly": [529.0, 994.0, 654.0, 994.0, 654.0, 1021.0, 529.0, 1021.0], "score": 1.0, "text": "Journal Menu"}, {"category_id": 15, "poly": [140.0, 1045.0, 320.0, 1045.0, 320.0, 1072.0, 140.0, 1072.0], "score": 0.96, "text": " About this Journal \u00b7"}, {"category_id": 15, "poly": [138.0, 1069.0, 382.0, 1075.0, 381.0, 1109.0, 137.0, 1103.0], "score": 0.98, "text": "Abstracting and Indexing \u00b7"}, {"category_id": 15, "poly": [143.0, 1106.0, 310.0, 1106.0, 310.0, 1133.0, 143.0, 1133.0], "score": 0.99, "text": "Advance Access \u00b7"}, {"category_id": 15, "poly": [139.0, 1125.0, 308.0, 1131.0, 307.0, 1165.0, 137.0, 1159.0], "score": 0.98, "text": "Aims and Scope \u00b7"}, {"category_id": 15, "poly": [143.0, 1162.0, 285.0, 1162.0, 285.0, 1189.0, 143.0, 1189.0], "score": 0.99, "text": "Annual Issues \u00b7"}, {"category_id": 15, "poly": [119.0, 1175.0, 132.0, 1166.0, 140.0, 1178.0, 127.0, 1186.0], "score": 0.52, "text": "\u00b7"}, {"category_id": 15, "poly": [140.0, 1186.0, 399.0, 1189.0, 398.0, 1223.0, 140.0, 1221.0], "score": 0.95, "text": "Article Processing Charges :"}, {"category_id": 15, "poly": [145.0, 1221.0, 298.0, 1221.0, 298.0, 1247.0, 145.0, 1247.0], "score": 0.99, "text": "Articles in Press"}, {"category_id": 15, "poly": [141.0, 1240.0, 313.0, 1245.0, 312.0, 1280.0, 140.0, 1274.0], "score": 0.99, "text": "Author Guidelines"}, {"category_id": 15, "poly": [140.0, 1274.0, 379.0, 1274.0, 379.0, 1308.0, 140.0, 1308.0], "score": 0.98, "text": "Bibliographic Information "}, {"category_id": 15, "poly": [140.0, 1304.0, 369.0, 1304.0, 369.0, 1338.0, 140.0, 1338.0], "score": 0.97, "text": "Citations to this Jounal ."}, {"category_id": 15, "poly": [141.0, 1330.0, 340.0, 1333.0, 339.0, 1367.0, 140.0, 1364.0], "score": 0.95, "text": " Contact Information \u00b7"}, {"category_id": 15, "poly": [143.0, 1364.0, 298.0, 1364.0, 298.0, 1391.0, 143.0, 1391.0], "score": 0.96, "text": "Editorial Board \u00b7"}, {"category_id": 15, "poly": [138.0, 1389.0, 330.0, 1391.0, 329.0, 1426.0, 138.0, 1423.0], "score": 0.96, "text": " Editorial Workflow \u00b7"}, {"category_id": 15, "poly": [138.0, 1415.0, 323.0, 1421.0, 322.0, 1455.0, 137.0, 1449.0], "score": 0.95, "text": "Free eTOC Alerts \u00b7"}, {"category_id": 15, "poly": [143.0, 1452.0, 312.0, 1452.0, 312.0, 1479.0, 143.0, 1479.0], "score": 0.99, "text": "Publication Ethics"}, {"category_id": 15, "poly": [140.0, 1479.0, 408.0, 1479.0, 408.0, 1511.0, 140.0, 1511.0], "score": 0.99, "text": "Reviewers Acknowledgment "}, {"category_id": 15, "poly": [138.0, 1503.0, 345.0, 1508.0, 344.0, 1543.0, 137.0, 1537.0], "score": 0.98, "text": "Submit a Manuscript -"}, {"category_id": 15, "poly": [138.0, 1532.0, 376.0, 1535.0, 376.0, 1569.0, 138.0, 1567.0], "score": 0.99, "text": "Subscription Information \u00b7"}, {"category_id": 15, "poly": [138.0, 1559.0, 313.0, 1565.0, 312.0, 1599.0, 137.0, 1593.0], "score": 0.99, "text": "Table ofContents"}, {"category_id": 15, "poly": [138.0, 1618.0, 342.0, 1618.0, 342.0, 1649.0, 138.0, 1649.0], "score": 0.96, "text": " Open Special Issues \u00b7"}, {"category_id": 15, "poly": [140.0, 1645.0, 374.0, 1645.0, 374.0, 1676.0, 140.0, 1676.0], "score": 0.96, "text": " Published Special Issues :"}, {"category_id": 15, "poly": [140.0, 1676.0, 367.0, 1676.0, 367.0, 1708.0, 140.0, 1708.0], "score": 0.98, "text": "Special Issue Guidelines"}, {"category_id": 15, "poly": [139.0, 1722.0, 234.0, 1728.0, 232.0, 1762.0, 137.0, 1756.0], "score": 1.0, "text": "Abstract"}, {"category_id": 15, "poly": [143.0, 1759.0, 283.0, 1759.0, 283.0, 1786.0, 143.0, 1786.0], "score": 0.98, "text": "Full- Text PDF"}, {"category_id": 15, "poly": [143.0, 1788.0, 298.0, 1788.0, 298.0, 1815.0, 143.0, 1815.0], "score": 0.98, "text": "Full- Text HTML"}, {"category_id": 15, "poly": [143.0, 1818.0, 293.0, 1818.0, 293.0, 1844.0, 143.0, 1844.0], "score": 0.97, "text": "Full- Text ePUB"}, {"category_id": 15, "poly": [143.0, 1847.0, 288.0, 1847.0, 288.0, 1874.0, 143.0, 1874.0], "score": 0.94, "text": " Full- Text XML"}, {"category_id": 15, "poly": [145.0, 1876.0, 320.0, 1876.0, 320.0, 1903.0, 145.0, 1903.0], "score": 0.99, "text": "Linked References"}, {"category_id": 15, "poly": [138.0, 1898.0, 357.0, 1903.0, 356.0, 1937.0, 137.0, 1932.0], "score": 0.97, "text": "Citations to this Article"}, {"category_id": 15, "poly": [140.0, 1932.0, 362.0, 1932.0, 362.0, 1964.0, 140.0, 1964.0], "score": 0.97, "text": "How to Cite this Article"}, {"category_id": 15, "poly": [140.0, 1961.0, 362.0, 1961.0, 362.0, 1993.0, 140.0, 1993.0], "score": 0.99, "text": "Complete Special Issue"}, {"category_id": 15, "poly": [86.0, 2010.0, 438.0, 2010.0, 438.0, 2042.0, 86.0, 2042.0], "score": 0.99, "text": "Abstract and Applied Analysis"}, {"category_id": 15, "poly": [86.0, 2030.0, 627.0, 2032.0, 627.0, 2066.0, 86.0, 2064.0], "score": 0.98, "text": "Volume 2013 (2013), Article ID 259470, 7 pages"}, {"category_id": 15, "poly": [81.0, 2056.0, 526.0, 2054.0, 526.0, 2088.0, 81.0, 2091.0], "score": 0.96, "text": "http: //dx.doi. org/10.1155/2013/259470"}, {"category_id": 15, "poly": [89.0, 2103.0, 241.0, 2103.0, 241.0, 2129.0, 89.0, 2129.0], "score": 1.0, "text": "Research Article"}], "page_info": {"page_no": 0, "height": 2339, "width": 1653}}, {"layout_dets": [{"category_id": 1, "poly": [88.79041290283203, 223.1548614501953, 596.5608520507812, 223.1548614501953, 596.5608520507812, 253.82266235351562, 88.79041290283203, 253.82266235351562], "score": 0.9999997019767761}, {"category_id": 1, "poly": [91.42396545410156, 476.6983947753906, 760.12841796875, 476.6983947753906, 760.12841796875, 507.5973815917969, 91.42396545410156, 507.5973815917969], "score": 0.9999997019767761}, {"category_id": 1, "poly": [90.55570220947266, 277.474609375, 378.6082458496094, 277.474609375, 378.6082458496094, 307.3374938964844, 90.55570220947266, 307.3374938964844], "score": 0.9999987483024597}, {"category_id": 1, "poly": [89.56867218017578, 327.1041259765625, 1510.0347900390625, 327.1041259765625, 1510.0347900390625, 389.6576232910156, 89.56867218017578, 389.6576232910156], "score": 0.9999944567680359}, {"category_id": 1, "poly": [119.38919067382812, 526.0288696289062, 1547.9219970703125, 526.0288696289062, 1547.9219970703125, 704.6207275390625, 119.38919067382812, 704.6207275390625], "score": 0.9999933242797852}, {"category_id": 1, "poly": [90.37564849853516, 139.7214813232422, 819.9425048828125, 139.7214813232422, 819.9425048828125, 201.7053985595703, 90.37564849853516, 201.7053985595703], "score": 0.9999151229858398}, {"category_id": 2, "poly": [89.54802703857422, 83.24105834960938, 424.4089050292969, 83.24105834960938, 424.4089050292969, 114.5113525390625, 89.54802703857422, 114.5113525390625], "score": 0.9983628988265991}, {"category_id": 1, "poly": [90.10054779052734, 417.296142578125, 437.4447021484375, 417.296142578125, 437.4447021484375, 447.23638916015625, 90.10054779052734, 447.23638916015625], "score": 0.8091702461242676}, {"category_id": 0, "poly": [89.9825210571289, 417.32232666015625, 437.4376525878906, 417.32232666015625, 437.4376525878906, 447.24578857421875, 89.9825210571289, 447.24578857421875], "score": 0.23477844893932343}, {"category_id": 13, "poly": [181, 332, 204, 332, 204, 355, 181, 355], "score": 0.73, "latex": "\\copyright"}, {"category_id": 13, "poly": [1167, 530, 1275, 530, 1275, 559, 1167, 559], "score": 0.68, "latex": "\\S\\S\\mathfrak{p h i}\\S\\S\\phi"}, {"category_id": 13, "poly": [803, 624, 817, 624, 817, 640, 803, 640], "score": 0.37, "latex": "\\cdot"}, {"category_id": 13, "poly": [824, 566, 838, 566, 838, 582, 824, 582], "score": 0.35, "latex": "\\cdot"}, {"category_id": 13, "poly": [1228, 530, 1274, 530, 1274, 559, 1228, 559], "score": 0.32, "latex": "\\S\\S\\phi"}, {"category_id": 13, "poly": [705, 681, 718, 681, 718, 698, 705, 698], "score": 0.32, "latex": "\\cdot"}, {"category_id": 15, "poly": [89.0, 136.0, 824.0, 141.0, 824.0, 176.0, 88.0, 170.0], "score": 0.99, "text": "Department of Mathematics, Zhejang Normal University, Zhejiang 321004, China"}, {"category_id": 15, "poly": [86.0, 168.0, 740.0, 171.0, 740.0, 212.0, 86.0, 209.0], "score": 0.98, "text": "2Department of Mathematics, Nanjing University, Nanjing 210093, China"}, {"category_id": 15, "poly": [89.0, 227.0, 595.0, 227.0, 595.0, 261.0, 89.0, 261.0], "score": 0.99, "text": "Received 15 January 2013; Accepted 18 February 2013"}, {"category_id": 15, "poly": [89.0, 275.0, 377.0, 280.0, 376.0, 315.0, 88.0, 309.0], "score": 1.0, "text": "Academic Editor: Yisheng Song"}, {"category_id": 15, "poly": [91.0, 358.0, 1018.0, 358.0, 1018.0, 392.0, 91.0, 392.0], "score": 0.98, "text": "unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited."}, {"category_id": 15, "poly": [89.0, 422.0, 440.0, 422.0, 440.0, 453.0, 89.0, 453.0], "score": 0.99, "text": "Citations to this Article [3 citations]"}, {"category_id": 15, "poly": [89.0, 480.0, 765.0, 480.0, 765.0, 512.0, 89.0, 512.0], "score": 0.98, "text": "The following is the list of published articles that have cited the current article."}, {"category_id": 15, "poly": [140.0, 587.0, 1550.0, 587.0, 1550.0, 621.0, 140.0, 621.0], "score": 0.99, "text": "Yuanheng Wang, and Humin Shi, \u201cA Modified Mixed Ishikawa Iteration for Common Fixed Points of Two Asymptotically Quasi Pseudocontractive Type Non-"}, {"category_id": 15, "poly": [140.0, 646.0, 1525.0, 646.0, 1525.0, 680.0, 140.0, 680.0], "score": 0.98, "text": "Yuanheng Wang, \u201cStrong Convergence Theorems for Common Fixed Points of an Infinite Family of Asymptotically Nonexpansive Mappings,\u201d Abstract and"}, {"category_id": 15, "poly": [89.0, 329.0, 180.0, 329.0, 180.0, 363.0, 89.0, 363.0], "score": 1.0, "text": "Copyright"}, {"category_id": 15, "poly": [205.0, 329.0, 1500.0, 329.0, 1500.0, 363.0, 205.0, 363.0], "score": 0.99, "text": "2013 Yuanheng Wang and Weifeng Xuan. This is an open access article distributed under the Creative Commons Attribution License, which permits"}, {"category_id": 15, "poly": [118.0, 531.0, 1166.0, 531.0, 1166.0, 565.0, 118.0, 565.0], "score": 0.97, "text": "\u25cf Pham Ky Anh, and Dang Van Hieu, Parallel and sequential hybrid methods for a finite family of asymptotically quasi"}, {"category_id": 15, "poly": [1276.0, 531.0, 1508.0, 531.0, 1508.0, 565.0, 1276.0, 565.0], "score": 0.94, "text": "-nonexpansive mappings,*\""}, {"category_id": 15, "poly": [145.0, 619.0, 802.0, 619.0, 802.0, 653.0, 145.0, 653.0], "score": 0.98, "text": "Self- Mappings,\" Abstract and Applied Analysis, 2014. View at Publisher"}, {"category_id": 15, "poly": [818.0, 619.0, 1036.0, 619.0, 1036.0, 653.0, 818.0, 653.0], "score": 0.98, "text": "View at Google Scholar"}, {"category_id": 15, "poly": [145.0, 560.0, 823.0, 560.0, 823.0, 592.0, 145.0, 592.0], "score": 0.98, "text": "Journal of AppliedMathematics and Computing, 2014.View at Publisher"}, {"category_id": 15, "poly": [839.0, 560.0, 1058.0, 560.0, 1058.0, 592.0, 839.0, 592.0], "score": 0.99, "text": "View at Google Scholar"}, {"category_id": 15, "poly": [145.0, 677.0, 704.0, 672.0, 704.0, 707.0, 145.0, 712.0], "score": 0.97, "text": "Applied Analysis, vol. 2014, pp. 1-6, 2014. View at Publisher"}, {"category_id": 15, "poly": [719.0, 677.0, 935.0, 672.0, 935.0, 707.0, 719.0, 712.0], "score": 1.0, "text": "View at Google Scholar"}], "page_info": {"page_no": 1, "height": 2339, "width": 1653}}]
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment