"git@developer.sourcefind.cn:OpenDAS/megatron-lm.git" did not exist on "d078e54ab6142f9bab04c4edb7804d5e417f3746"
Unverified Commit 6d571e2e authored by Kaiwen Liu's avatar Kaiwen Liu Committed by GitHub
Browse files

Merge pull request #7 from opendatalab/dev

Dev
parents a3358878 37c335ae
*.tar
*.tar.gz
*.zip
venv*/
envs/
slurm_logs/
sync1.sh
data_preprocess_pj1
data-preparation1
__pycache__
*.log
*.pyc
.vscode
debug/
*.ipynb
.idea
# vscode history
.history
.DS_Store
.env
bad_words/
bak/
app/tests/*
temp/
tmp/
tmp
.vscode
.vscode/
ocr_demo
.coveragerc
/app/common/__init__.py
/magic_pdf/config/__init__.py
source.dev.env
tmp
projects/web/node_modules
projects/web/dist
projects/web_demo/web_demo/static/
*.tar
*.tar.gz
*.zip
venv*/
envs/
slurm_logs/
sync1.sh
data_preprocess_pj1
data-preparation1
__pycache__
*.log
*.pyc
.vscode
debug/
*.ipynb
.idea
# vscode history
.history
.DS_Store
.env
bad_words/
bak/
app/tests/*
temp/
tmp/
tmp
.vscode
.vscode/
ocr_demo
.coveragerc
/app/common/__init__.py
/magic_pdf/config/__init__.py
source.dev.env
tmp
projects/web/node_modules
projects/web/dist
projects/web_demo/web_demo/static/
cli_debug/
debug_utils/
# sphinx docs
_build/
......@@ -3,7 +3,7 @@ repos:
rev: 5.0.4
hooks:
- id: flake8
args: ["--max-line-length=120", "--ignore=E131,E125,W503,W504,E203"]
args: ["--max-line-length=150", "--ignore=E131,E125,W503,W504,E203"]
- repo: https://github.com/PyCQA/isort
rev: 5.11.5
hooks:
......@@ -12,11 +12,12 @@ repos:
rev: v0.32.0
hooks:
- id: yapf
args: ["--style={based_on_style: google, column_limit: 120, indent_width: 4}"]
args: ["--style={based_on_style: google, column_limit: 150, indent_width: 4}"]
- repo: https://github.com/codespell-project/codespell
rev: v2.2.1
hooks:
- id: codespell
args: ['--skip', '*.json']
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.3.0
hooks:
......
version: 2
build:
os: ubuntu-22.04
tools:
python: "3.10"
formats:
- epub
python:
install:
- requirements: docs/zh_cn/requirements.txt
sphinx:
configuration: docs/zh_cn/conf.py
This diff is collapsed.
This diff is collapsed.
import os
import json
from loguru import logger
from magic_pdf.pipe.UNIPipe import UNIPipe
from magic_pdf.rw.DiskReaderWriter import DiskReaderWriter
import magic_pdf.model as model_config
model_config.__use_inside_model__ = True
try:
current_script_dir = os.path.dirname(os.path.abspath(__file__))
demo_name = "demo1"
pdf_path = os.path.join(current_script_dir, f"{demo_name}.pdf")
model_path = os.path.join(current_script_dir, f"{demo_name}.json")
pdf_bytes = open(pdf_path, "rb").read()
# model_json = json.loads(open(model_path, "r", encoding="utf-8").read())
model_json = [] # model_json传空list使用内置模型解析
jso_useful_key = {"_pdf_type": "", "model_list": model_json}
jso_useful_key = {"_pdf_type": "", "model_list": []}
local_image_dir = os.path.join(current_script_dir, 'images')
image_dir = str(os.path.basename(local_image_dir))
image_writer = DiskReaderWriter(local_image_dir)
pipe = UNIPipe(pdf_bytes, jso_useful_key, image_writer)
pipe.pipe_classify()
"""如果没有传入有效的模型数据,则使用内置model解析"""
if len(model_json) == 0:
if model_config.__use_inside_model__:
pipe.pipe_analyze()
else:
logger.error("need model list input")
exit(1)
pipe.pipe_analyze()
pipe.pipe_parse()
md_content = pipe.pipe_mk_markdown(image_dir, drop_mode="none")
with open(f"{demo_name}.md", "w", encoding="utf-8") as f:
......
......@@ -4,13 +4,12 @@ import copy
from loguru import logger
from magic_pdf.libs.draw_bbox import draw_layout_bbox, draw_span_bbox
from magic_pdf.pipe.UNIPipe import UNIPipe
from magic_pdf.pipe.OCRPipe import OCRPipe
from magic_pdf.pipe.TXTPipe import TXTPipe
from magic_pdf.rw.DiskReaderWriter import DiskReaderWriter
import magic_pdf.model as model_config
model_config.__use_inside_model__ = True
# todo: 设备类型选择 (?)
......@@ -47,11 +46,20 @@ def json_md_dump(
)
# 可视化
def draw_visualization_bbox(pdf_info, pdf_bytes, local_md_dir, pdf_file_name):
# 画布局框,附带排序结果
draw_layout_bbox(pdf_info, pdf_bytes, local_md_dir, pdf_file_name)
# 画 span 框
draw_span_bbox(pdf_info, pdf_bytes, local_md_dir, pdf_file_name)
def pdf_parse_main(
pdf_path: str,
parse_method: str = 'auto',
model_json_path: str = None,
is_json_md_dump: bool = True,
is_draw_visualization_bbox: bool = True,
output_dir: str = None
):
"""
......@@ -108,11 +116,7 @@ def pdf_parse_main(
# 如果没有传入模型数据,则使用内置模型解析
if not model_json:
if model_config.__use_inside_model__:
pipe.pipe_analyze() # 解析
else:
logger.error("need model list input")
exit(1)
pipe.pipe_analyze() # 解析
# 执行解析
pipe.pipe_parse()
......@@ -121,10 +125,11 @@ def pdf_parse_main(
content_list = pipe.pipe_mk_uni_format(image_path_parent, drop_mode="none")
md_content = pipe.pipe_mk_markdown(image_path_parent, drop_mode="none")
if is_json_md_dump:
json_md_dump(pipe, md_writer, pdf_name, content_list, md_content)
if is_draw_visualization_bbox:
draw_visualization_bbox(pipe.pdf_mid_data['pdf_info'], pdf_bytes, output_path, pdf_name)
except Exception as e:
logger.exception(e)
......@@ -132,5 +137,5 @@ def pdf_parse_main(
# 测试
if __name__ == '__main__':
pdf_path = r"C:\Users\XYTK2\Desktop\2024-2016-gb-cd-300.pdf"
pdf_path = r"D:\project\20240617magicpdf\Magic-PDF\demo\demo1.pdf"
pdf_parse_main(pdf_path)
......@@ -11,7 +11,7 @@ pip install magic-pdf[full]
### 2. Encountering the error `pickle.UnpicklingError: invalid load key, 'v'.` during use
This might be due to an incomplete download of the model file. You can try re-downloading the model file and then try again.
This might be due to an incomplete download of the model file. You can try re-downloading the model file and then try again.
Reference: https://github.com/opendatalab/MinerU/issues/143
### 3. Where should the model files be downloaded and how should the `/models-dir` configuration be set?
......@@ -24,7 +24,7 @@ The path for the model files is configured in "magic-pdf.json". just like:
}
```
This path is an absolute path, not a relative path. You can obtain the absolute path in the models directory using the "pwd" command.
This path is an absolute path, not a relative path. You can obtain the absolute path in the models directory using the "pwd" command.
Reference: https://github.com/opendatalab/MinerU/issues/155#issuecomment-2230216874
### 4. Encountered the error `ImportError: libGL.so.1: cannot open shared object file: No such file or directory` in Ubuntu 22.04 on WSL2
......@@ -38,17 +38,22 @@ sudo apt-get install libgl1-mesa-glx
Reference: https://github.com/opendatalab/MinerU/issues/388
### 5. Encountered error `ModuleNotFoundError: No module named 'fairscale'`
You need to uninstall the module and reinstall it:
```bash
pip uninstall fairscale
pip install fairscale
```
Reference: https://github.com/opendatalab/MinerU/issues/411
### 6. On some newer devices like the H100, the text parsed during OCR using CUDA acceleration is garbled.
The compatibility of cuda11 with new graphics cards is poor, and the CUDA version used by Paddle needs to be upgraded.
```bash
pip install paddlepaddle-gpu==3.0.0b1 -i https://www.paddlepaddle.org.cn/packages/stable/cu123/
```
Reference: https://github.com/opendatalab/MinerU/issues/558
# 常见问题解答
### 1.在较新版本的mac上使用命令安装pip install magic-pdf[full] zsh: no matches found: magic-pdf[full]
### 1.在较新版本的mac上使用命令安装pip install magic-pdf\[full\] zsh: no matches found: magic-pdf\[full\]
在 macOS 上,默认的 shell 从 Bash 切换到了 Z shell,而 Z shell 对于某些类型的字符串匹配有特殊的处理逻辑,这可能导致no matches found错误。
可以通过在命令行禁用globbing特性,再尝试运行安装命令
```bash
setopt no_nomatch
pip install magic-pdf[full]
......@@ -11,41 +12,50 @@ pip install magic-pdf[full]
### 2.使用过程中遇到_pickle.UnpicklingError: invalid load key, 'v'.错误
可能是由于模型文件未下载完整导致,可尝试重新下载模型文件后再试
可能是由于模型文件未下载完整导致,可尝试重新下载模型文件后再试
参考:https://github.com/opendatalab/MinerU/issues/143
### 3.模型文件应该下载到哪里/models-dir的配置应该怎么填
模型文件的路径输入是在"magic-pdf.json"中通过
```json
{
"models-dir": "/tmp/models"
}
```
进行配置的。
这个路径是绝对路径而不是相对路径,绝对路径的获取可在models目录中通过命令 "pwd" 获取。
这个路径是绝对路径而不是相对路径,绝对路径的获取可在models目录中通过命令 "pwd" 获取。
参考:https://github.com/opendatalab/MinerU/issues/155#issuecomment-2230216874
### 4.在WSL2的Ubuntu22.04中遇到报错`ImportError: libGL.so.1: cannot open shared object file: No such file or directory`
WSL2的Ubuntu22.04中缺少`libgl`库,可通过以下命令安装`libgl`库解决:
```bash
sudo apt-get install libgl1-mesa-glx
```
参考:https://github.com/opendatalab/MinerU/issues/388
### 5.遇到报错 `ModuleNotFoundError : Nomodulenamed 'fairscale'`
需要卸载该模块并重新安装
```bash
pip uninstall fairscale
pip install fairscale
```
参考:https://github.com/opendatalab/MinerU/issues/411
### 6.在部分较新的设备如H100上,使用CUDA加速OCR时解析出的文字乱码。
cuda11对新显卡的兼容性不好,需要升级paddle使用的cuda版本
```bash
pip install paddlepaddle-gpu==3.0.0b1 -i https://www.paddlepaddle.org.cn/packages/stable/cu123/
```
参考:https://github.com/opendatalab/MinerU/issues/558
# Ubuntu 22.04 LTS
### 1. Check if NVIDIA Drivers Are Installed
```sh
nvidia-smi
```
If you see information similar to the following, it means that the NVIDIA drivers are already installed, and you can skip Step 2.
```plaintext
+---------------------------------------------------------------------------------------+
| NVIDIA-SMI 537.34 Driver Version: 537.34 CUDA Version: 12.2 |
|-----------------------------------------+----------------------+----------------------+
| GPU Name TCC/WDDM | Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|=========================================+======================+======================|
| 0 NVIDIA GeForce RTX 3060 Ti WDDM | 00000000:01:00.0 On | N/A |
| 0% 51C P8 12W / 200W | 1489MiB / 8192MiB | 5% Default |
| | | N/A |
+-----------------------------------------+----------------------+----------------------+
```
```sh
nvidia-smi
```
If you see information similar to the following, it means that the NVIDIA drivers are already installed, and you can skip Step 2.
Notice:`CUDA Version` should be >= 12.1, If the displayed version number is less than 12.1, please upgrade the driver.
```plaintext
+---------------------------------------------------------------------------------------+
| NVIDIA-SMI 537.34 Driver Version: 537.34 CUDA Version: 12.2 |
|-----------------------------------------+----------------------+----------------------+
| GPU Name TCC/WDDM | Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|=========================================+======================+======================|
| 0 NVIDIA GeForce RTX 3060 Ti WDDM | 00000000:01:00.0 On | N/A |
| 0% 51C P8 12W / 200W | 1489MiB / 8192MiB | 5% Default |
| | | N/A |
+-----------------------------------------+----------------------+----------------------+
```
### 2. Install the Driver
If no driver is installed, use the following command:
```sh
sudo apt-get update
sudo apt-get install nvidia-driver-545
```
Install the proprietary driver and restart your computer after installation.
```sh
reboot
```
If no driver is installed, use the following command:
```sh
sudo apt-get update
sudo apt-get install nvidia-driver-545
```
Install the proprietary driver and restart your computer after installation.
```sh
reboot
```
### 3. Install Anaconda
If Anaconda is already installed, skip this step.
```sh
wget https://repo.anaconda.com/archive/Anaconda3-2024.06-1-Linux-x86_64.sh
bash Anaconda3-2024.06-1-Linux-x86_64.sh
```
In the final step, enter `yes`, close the terminal, and reopen it.
If Anaconda is already installed, skip this step.
```sh
wget https://repo.anaconda.com/archive/Anaconda3-2024.06-1-Linux-x86_64.sh
bash Anaconda3-2024.06-1-Linux-x86_64.sh
```
In the final step, enter `yes`, close the terminal, and reopen it.
### 4. Create an Environment Using Conda
Specify Python version 3.10.
```sh
conda create -n MinerU python=3.10
conda activate MinerU
```
Specify Python version 3.10.
```sh
conda create -n MinerU python=3.10
conda activate MinerU
```
### 5. Install Applications
```sh
pip install -U magic-pdf[full] --extra-index-url https://wheels.myhloli.com
```
```sh
pip install -U magic-pdf[full] --extra-index-url https://wheels.myhloli.com
```
❗ After installation, make sure to check the version of `magic-pdf` using the following command:
```sh
magic-pdf --version
```
If the version number is less than 0.7.0, please report the issue.
```sh
magic-pdf --version
```
If the version number is less than 0.7.0, please report the issue.
### 6. Download Models
Refer to detailed instructions on [how to download model files](how_to_download_models_en.md).
After downloading, move the `models` directory to an SSD with more space.
❗ After downloading the models, ensure they are complete:
- Check that the file sizes match the description on the website.
- If possible, verify the integrity using SHA256.
### 7. Configuration Before First Run
Obtain the configuration template file `magic-pdf.template.json` from the root directory of the repository.
❗ Execute the following command to copy the configuration file to your home directory, otherwise the program will not run:
```sh
wget https://github.com/opendatalab/MinerU/raw/master/magic-pdf.template.json
cp magic-pdf.template.json ~/magic-pdf.json
```
Find the `magic-pdf.json` file in your home directory and configure `"models-dir"` to be the directory where the model weights from Step 6 were downloaded.
❗ Correctly specify the absolute path of the directory containing the model weights; otherwise, the program will fail due to missing model files.
```json
{
"models-dir": "/tmp/models"
}
```
Refer to detailed instructions on [how to download model files](how_to_download_models_en.md).
## 7. Understand the Location of the Configuration File
After completing the [6. Download Models](#6-download-models) step, the script will automatically generate a `magic-pdf.json` file in the user directory and configure the default model path.
You can find the `magic-pdf.json` file in your user directory.
> The user directory for Linux is "/home/username".
### 8. First Run
Download a sample file from the repository and test it.
```sh
wget https://github.com/opendatalab/MinerU/raw/master/demo/small_ocr.pdf
magic-pdf -p small_ocr.pdf
```
Download a sample file from the repository and test it.
```sh
wget https://github.com/opendatalab/MinerU/raw/master/demo/small_ocr.pdf
magic-pdf -p small_ocr.pdf
```
### 9. Test CUDA Acceleration
If your graphics card has at least 8GB of VRAM, follow these steps to test CUDA acceleration:
If your graphics card has at least **8GB** of VRAM, follow these steps to test CUDA acceleration:
1. Modify the value of `"device-mode"` in the `magic-pdf.json` configuration file located in your home directory.
```json
......@@ -105,8 +110,6 @@ If your graphics card has at least 8GB of VRAM, follow these steps to test CUDA
### 10. Enable CUDA Acceleration for OCR
❗ The following operations require a graphics card with at least 16GB of VRAM; otherwise, the program may crash or experience reduced performance.
1. Download `paddlepaddle-gpu`. Installation will automatically enable OCR acceleration.
```sh
python -m pip install paddlepaddle-gpu==3.0.0b1 -i https://www.paddlepaddle.org.cn/packages/stable/cu118/
......
# Ubuntu 22.04 LTS
## 1. 检测是否已安装nvidia驱动
```bash
nvidia-smi
nvidia-smi
```
如果看到类似如下的信息,说明已经安装了nvidia驱动,可以跳过步骤2
注意:`CUDA Version` 显示的版本号应 >= 12.1,如显示的版本号小于12.1,请升级驱动
```plaintext
```
+---------------------------------------------------------------------------------------+
| NVIDIA-SMI 537.34 Driver Version: 537.34 CUDA Version: 12.2 |
......@@ -18,96 +24,108 @@ nvidia-smi
| | | N/A |
+-----------------------------------------+----------------------+----------------------+
```
## 2. 安装驱动
如没有驱动,则通过如下命令
```bash
sudo apt-get update
sudo apt-get install nvidia-driver-545
```
安装专有驱动,安装完成后,重启电脑
```bash
reboot
```
## 3. 安装anacoda
如果已安装conda,可以跳过本步骤
```bash
wget -U NoSuchBrowser/1.0 https://mirrors.tuna.tsinghua.edu.cn/anaconda/archive/Anaconda3-2024.06-1-Linux-x86_64.sh
bash Anaconda3-2024.06-1-Linux-x86_64.sh
```
最后一步输入yes,关闭终端重新打开
## 4. 使用conda 创建环境
需指定python版本为3.10
```bash
conda create -n MinerU python=3.10
conda activate MinerU
```
## 5. 安装应用
```bash
pip install -U magic-pdf[full] --extra-index-url https://wheels.myhloli.com -i https://pypi.tuna.tsinghua.edu.cn/simple
```
> ❗️下载完成后,务必通过以下命令确认magic-pdf的版本是否正确
>
>
> ```bash
> magic-pdf --version
>```
> ```
>
> 如果版本号小于0.7.0,请到issue中向我们反馈
## 6. 下载模型
详细参考 [如何下载模型文件](how_to_download_models_zh_cn.md)
下载后请将models目录移动到空间较大的ssd磁盘目录
> ❗️模型下载后请务必检查模型文件是否下载完整
>
> 请检查目录下的模型文件大小与网页上描述是否一致,如果可以的话,最好通过sha256校验模型是否下载完整
>
## 7. 第一次运行前的配置
在仓库根目录可以获得 [magic-pdf.template.json](../magic-pdf.template.json) 配置模版文件
> ❗️务必执行以下命令将配置文件拷贝到【用户目录】下,否则程序将无法运行
>
> linux用户目录为 "/home/用户名"
```bash
wget https://gitee.com/myhloli/MinerU/raw/master/magic-pdf.template.json
cp magic-pdf.template.json ~/magic-pdf.json
```
在用户目录中找到magic-pdf.json文件并配置"models-dir"为[6. 下载模型](#6-下载模型)中下载的模型权重文件所在目录
> ❗️务必正确配置模型权重文件所在目录的【绝对路径】,否则会因为找不到模型文件而导致程序无法运行
>
```json
{
"models-dir": "/tmp/models"
}
```
详细参考 [如何下载模型文件](how_to_download_models_zh_cn.md)
## 7. 了解配置文件存放的位置
完成[6.下载模型](#6-下载模型)步骤后,脚本会自动生成用户目录下的magic-pdf.json文件,并自动配置默认模型路径。
您可在【用户目录】下找到magic-pdf.json文件。
> linux用户目录为 "/home/用户名"
## 8. 第一次运行
从仓库中下载样本文件,并测试
```bash
wget https://gitee.com/myhloli/MinerU/raw/master/demo/small_ocr.pdf
magic-pdf -p small_ocr.pdf
```
## 9. 测试CUDA加速
如果您的显卡显存大于等于8G,可以进行以下流程,测试CUDA解析加速效果
如果您的显卡显存大于等于 **8GB** ,可以进行以下流程,测试CUDA解析加速效果
**1.修改【用户目录】中配置文件magic-pdf.json中"device-mode"的值**
```json
{
"device-mode":"cuda"
}
```
**2.运行以下命令测试cuda加速效果**
```bash
magic-pdf -p small_ocr.pdf
```
> 提示:CUDA加速是否生效可以根据log中输出的各个阶段cost耗时来简单判断,通常情况下,`layout detection cost` 和 `mfr time` 应提速10倍以上。
## 10. 为ocr开启cuda加速
> ❗️以下操作需显卡显存大于等于16G才可进行,否则会因为显存不足导致程序崩溃或运行速度下降
**1.下载paddlepaddle-gpu, 安装完成后会自动开启ocr加速**
```bash
python -m pip install paddlepaddle-gpu==3.0.0b1 -i https://www.paddlepaddle.org.cn/packages/stable/cu118/
```
**2.运行以下命令测试ocr加速效果**
```bash
magic-pdf -p small_ocr.pdf
```
> 提示:CUDA加速是否生效可以根据log中输出的各个阶段cost耗时来简单判断,通常情况下,`ocr cost`应提速10倍以上。
# Windows 10/11
### 1. Install CUDA and cuDNN
Required versions: CUDA 11.8 + cuDNN 8.7.0
- CUDA 11.8: https://developer.nvidia.com/cuda-11-8-0-download-archive
- cuDNN v8.7.0 (November 28th, 2022), for CUDA 11.x: https://developer.nvidia.com/rdp/cudnn-archive
- CUDA 11.8: https://developer.nvidia.com/cuda-11-8-0-download-archive
- cuDNN v8.7.0 (November 28th, 2022), for CUDA 11.x: https://developer.nvidia.com/rdp/cudnn-archive
### 2. Install Anaconda
If Anaconda is already installed, you can skip this step.
If Anaconda is already installed, you can skip this step.
Download link: https://repo.anaconda.com/archive/Anaconda3-2024.06-1-Windows-x86_64.exe
### 3. Create an Environment Using Conda
Python version must be 3.10.
```
conda create -n MinerU python=3.10
conda activate MinerU
```
Python version must be 3.10.
```
conda create -n MinerU python=3.10
conda activate MinerU
```
### 4. Install Applications
```
pip install -U magic-pdf[full] --extra-index-url https://wheels.myhloli.com
```
>❗️After installation, verify the version of `magic-pdf`:
> ```bash
> magic-pdf --version
> ```
> If the version number is less than 0.7.0, please report it in the issues section.
```
pip install -U magic-pdf[full] --extra-index-url https://wheels.myhloli.com
```
> ❗️After installation, verify the version of `magic-pdf`:
>
> ```bash
> magic-pdf --version
> ```
>
> If the version number is less than 0.7.0, please report it in the issues section.
### 5. Download Models
Refer to detailed instructions on [how to download model files](how_to_download_models_en.md).
After downloading, move the `models` directory to an SSD with more space.
>❗ After downloading the models, ensure they are complete:
>- Check that the file sizes match the description on the website.
>- If possible, verify the integrity using SHA256.
### 6. Configuration Before the First Run
Obtain the configuration template file `magic-pdf.template.json` from the repository root directory.
>❗️Execute the following command to copy the configuration file to your user directory, or the program will not run.
>
> In Windows, user directory is "C:\Users\username"
```powershell
(New-Object System.Net.WebClient).DownloadFile('https://github.com/opendatalab/MinerU/raw/master/magic-pdf.template.json', 'magic-pdf.template.json')
cp magic-pdf.template.json ~/magic-pdf.json
Refer to detailed instructions on [how to download model files](how_to_download_models_en.md).
### 6. Understand the Location of the Configuration File
After completing the [5. Download Models](#5-download-models) step, the script will automatically generate a `magic-pdf.json` file in the user directory and configure the default model path.
You can find the `magic-pdf.json` file in your 【user directory】 .
> The user directory for Windows is "C:/Users/username".
### 7. First Run
Download a sample file from the repository and test it.
```powershell
wget https://github.com/opendatalab/MinerU/raw/master/demo/small_ocr.pdf -O small_ocr.pdf
magic-pdf -p small_ocr.pdf
```
### 8. Test CUDA Acceleration
If your graphics card has at least 8GB of VRAM, follow these steps to test CUDA-accelerated parsing performance.
1. **Overwrite the installation of torch and torchvision** supporting CUDA.
```
pip install --force-reinstall torch==2.3.1 torchvision==0.18.1 --index-url https://download.pytorch.org/whl/cu118
```
Find the `magic-pdf.json` file in your user directory and configure `"models-dir"` to point to the directory where the model weights from step 5 were downloaded.
> ❗️Ensure the absolute path of the model weights directory is correctly configured, or the program will fail to run due to not finding the model files.
>
> In Windows, this path should include the drive letter and replace all `"\"` to `"/"`.
>
> Example: If the models are placed in the root directory of drive D, the value for `model-dir` should be `"D:/models"`.
> ❗️Ensure the following versions are specified in the command:
>
> ```
> torch==2.3.1 torchvision==0.18.1
> ```
>
> These are the highest versions we support. Installing higher versions without specifying them will cause the program to fail.
2. **Modify the value of `"device-mode"`** in the `magic-pdf.json` configuration file located in your user directory.
```json
{
"models-dir": "/tmp/models"
"device-mode": "cuda"
}
```
### 7. First Run
Download a sample file from the repository and test it.
```powershell
(New-Object System.Net.WebClient).DownloadFile('https://github.com/opendatalab/MinerU/raw/master/demo/small_ocr.pdf', 'small_ocr.pdf')
magic-pdf -p small_ocr.pdf
```
3. **Run the following command to test CUDA acceleration**:
### 8. Test CUDA Acceleration
If your graphics card has at least 8GB of VRAM, follow these steps to test CUDA-accelerated parsing performance.
1. **Overwrite the installation of torch and torchvision** supporting CUDA.
```
pip install --force-reinstall torch==2.3.1 torchvision==0.18.1 --index-url https://download.pytorch.org/whl/cu118
```
>❗️Ensure the following versions are specified in the command:
>```
> torch==2.3.1 torchvision==0.18.1
>```
>These are the highest versions we support. Installing higher versions without specifying them will cause the program to fail.
2. **Modify the value of `"device-mode"`** in the `magic-pdf.json` configuration file located in your user directory.
```json
{
"device-mode": "cuda"
}
```
3. **Run the following command to test CUDA acceleration**:
```
magic-pdf -p small_ocr.pdf
```
```
magic-pdf -p small_ocr.pdf
```
### 9. Enable CUDA Acceleration for OCR
>❗️This operation requires at least 16GB of VRAM on your graphics card, otherwise it will cause the program to crash or slow down.
1. **Download paddlepaddle-gpu**, which will automatically enable OCR acceleration upon installation.
```
pip install paddlepaddle-gpu==2.6.1
```
2. **Run the following command to test OCR acceleration**:
```
magic-pdf -p small_ocr.pdf
```
1. **Download paddlepaddle-gpu**, which will automatically enable OCR acceleration upon installation.
```
pip install paddlepaddle-gpu==2.6.1
```
2. **Run the following command to test OCR acceleration**:
```
magic-pdf -p small_ocr.pdf
```
......@@ -3,103 +3,106 @@
## 1. 安装cuda和cuDNN
需要安装的版本 CUDA 11.8 + cuDNN 8.7.0
- CUDA 11.8 https://developer.nvidia.com/cuda-11-8-0-download-archive
- cuDNN v8.7.0 (November 28th, 2022), for CUDA 11.x https://developer.nvidia.com/rdp/cudnn-archive
## 2. 安装anaconda
如果已安装conda,可以跳过本步骤
下载链接:
https://mirrors.tuna.tsinghua.edu.cn/anaconda/archive/Anaconda3-2024.06-1-Windows-x86_64.exe
## 3. 使用conda 创建环境
需指定python版本为3.10
```bash
conda create -n MinerU python=3.10
conda activate MinerU
```
## 4. 安装应用
```bash
pip install -U magic-pdf[full] --extra-index-url https://wheels.myhloli.com -i https://pypi.tuna.tsinghua.edu.cn/simple
```
> ❗️下载完成后,务必通过以下命令确认magic-pdf的版本是否正确
>
>
> ```bash
> magic-pdf --version
>```
> ```
>
> 如果版本号小于0.7.0,请到issue中向我们反馈
## 5. 下载模型
详细参考 [如何下载模型文件](how_to_download_models_zh_cn.md)
下载后请将models目录移动到空间较大的ssd磁盘目录
> ❗️模型下载后请务必检查模型文件是否下载完整
>
> 请检查目录下的模型文件大小与网页上描述是否一致,如果可以的话,最好通过sha256校验模型是否下载完整
## 6. 第一次运行前的配置
在仓库根目录可以获得 [magic-pdf.template.json](../magic-pdf.template.json) 配置模版文件
> ❗️务必执行以下命令将配置文件拷贝到【用户目录】下,否则程序将无法运行
>
> windows用户目录为 "C:\Users\用户名"
```powershell
(New-Object System.Net.WebClient).DownloadFile('https://gitee.com/myhloli/MinerU/raw/master/magic-pdf.template.json', 'magic-pdf.template.json')
cp magic-pdf.template.json ~/magic-pdf.json
```
在用户目录中找到magic-pdf.json文件并配置"models-dir"为[5. 下载模型](#5-下载模型)中下载的模型权重文件所在目录
> ❗️务必正确配置模型权重文件所在目录的【绝对路径】,否则会因为找不到模型文件而导致程序无法运行
>
> windows系统中此路径应包含盘符,且需把路径中所有的`"\"`替换为`"/"`,否则会因为转义原因导致json文件语法错误。
>
> 例如:模型放在D盘根目录的models目录,则model-dir的值应为"D:/models"
```json
{
"models-dir": "/tmp/models"
}
```
详细参考 [如何下载模型文件](how_to_download_models_zh_cn.md)
## 6. 了解配置文件存放的位置
完成[5.下载模型](#5-下载模型)步骤后,脚本会自动生成用户目录下的magic-pdf.json文件,并自动配置默认模型路径。
您可在【用户目录】下找到magic-pdf.json文件。
> windows用户目录为 "C:/Users/用户名"
## 7. 第一次运行
从仓库中下载样本文件,并测试
```powershell
(New-Object System.Net.WebClient).DownloadFile('https://gitee.com/myhloli/MinerU/raw/master/demo/small_ocr.pdf', 'small_ocr.pdf')
magic-pdf -p small_ocr.pdf
wget https://github.com/opendatalab/MinerU/raw/master/demo/small_ocr.pdf -O small_ocr.pdf
magic-pdf -p small_ocr.pdf
```
## 8. 测试CUDA加速
如果您的显卡显存大于等于8G,可以进行以下流程,测试CUDA解析加速效果
如果您的显卡显存大于等于 **8GB** ,可以进行以下流程,测试CUDA解析加速效果
**1.覆盖安装支持cuda的torch和torchvision**
```bash
pip install --force-reinstall torch==2.3.1 torchvision==0.18.1 --index-url https://download.pytorch.org/whl/cu118
```
> ❗️务必在命令中指定以下版本
>
> ```bash
> torch==2.3.1 torchvision==0.18.1
> torch==2.3.1 torchvision==0.18.1
> ```
>
> 这是我们支持的最高版本,如果不指定版本会自动安装更高版本导致程序无法运行
**2.修改【用户目录】中配置文件magic-pdf.json中"device-mode"的值**
```json
{
"device-mode":"cuda"
}
```
**3.运行以下命令测试cuda加速效果**
```bash
magic-pdf -p small_ocr.pdf
```
> 提示:CUDA加速是否生效可以根据log中输出的各个阶段cost耗时来简单判断,通常情况下,`layout detection cost` 和 `mfr time` 应提速10倍以上。
> 提示:CUDA加速是否生效可以根据log中输出的各个阶段的耗时来简单判断,通常情况下,`layout detection time` 和 `mfr time` 应提速10倍以上。
## 9. 为ocr开启cuda加速
> ❗️以下操作需显卡显存大于等于16G才可进行,否则会因为显存不足导致程序崩溃或运行速度下降
**1.下载paddlepaddle-gpu, 安装完成后会自动开启ocr加速**
```bash
pip install paddlepaddle-gpu==2.6.1
```
**2.运行以下命令测试ocr加速效果**
```bash
magic-pdf -p small_ocr.pdf
```
> 提示:CUDA加速是否生效可以根据log中输出的各个阶段cost耗时来简单判断,通常情况下,`ocr cost`应提速10倍以上。
> 提示:CUDA加速是否生效可以根据log中输出的各个阶段cost耗时来简单判断,通常情况下,`ocr time`应提速10倍以上。
# use modelscope sdk download models
import json
import os
import requests
from modelscope import snapshot_download
model_dir = snapshot_download('opendatalab/PDF-Extract-Kit')
print(f"model dir is: {model_dir}/models")
def download_json(url):
# 下载JSON文件
response = requests.get(url)
response.raise_for_status() # 检查请求是否成功
return response.json()
def download_and_modify_json(url, local_filename, modifications):
if os.path.exists(local_filename):
data = json.load(open(local_filename))
config_version = data.get('config_version', '0.0.0')
if config_version < '1.0.0':
data = download_json(url)
else:
data = download_json(url)
# 修改内容
for key, value in modifications.items():
data[key] = value
# 保存修改后的内容
with open(local_filename, 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=4)
if __name__ == '__main__':
mineru_patterns = [
"models/Layout/LayoutLMv3/*",
"models/Layout/YOLO/*",
"models/MFD/YOLO/*",
"models/MFR/unimernet_small/*",
"models/TabRec/TableMaster/*",
"models/TabRec/StructEqTable/*",
]
model_dir = snapshot_download('opendatalab/PDF-Extract-Kit-1.0', allow_patterns=mineru_patterns)
layoutreader_model_dir = snapshot_download('ppaanngggg/layoutreader')
model_dir = model_dir + '/models'
print(f'model_dir is: {model_dir}')
print(f'layoutreader_model_dir is: {layoutreader_model_dir}')
json_url = 'https://gitee.com/myhloli/MinerU/raw/dev/magic-pdf.template.json'
config_file_name = 'magic-pdf.json'
home_dir = os.path.expanduser('~')
config_file = os.path.join(home_dir, config_file_name)
json_mods = {
'models-dir': model_dir,
'layoutreader-model-dir': layoutreader_model_dir,
}
download_and_modify_json(json_url, config_file, json_mods)
print(f'The configuration file has been configured successfully, the path is: {config_file}')
import json
import os
import requests
from huggingface_hub import snapshot_download
model_dir = snapshot_download('opendatalab/PDF-Extract-Kit')
print(f"model dir is: {model_dir}/models")
def download_json(url):
# 下载JSON文件
response = requests.get(url)
response.raise_for_status() # 检查请求是否成功
return response.json()
def download_and_modify_json(url, local_filename, modifications):
if os.path.exists(local_filename):
data = json.load(open(local_filename))
config_version = data.get('config_version', '0.0.0')
if config_version < '1.0.0':
data = download_json(url)
else:
data = download_json(url)
# 修改内容
for key, value in modifications.items():
data[key] = value
# 保存修改后的内容
with open(local_filename, 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=4)
if __name__ == '__main__':
mineru_patterns = [
"models/Layout/LayoutLMv3/*",
"models/Layout/YOLO/*",
"models/MFD/YOLO/*",
"models/MFR/unimernet_small/*",
"models/TabRec/TableMaster/*",
"models/TabRec/StructEqTable/*",
]
model_dir = snapshot_download('opendatalab/PDF-Extract-Kit-1.0', allow_patterns=mineru_patterns)
layoutreader_pattern = [
"*.json",
"*.safetensors",
]
layoutreader_model_dir = snapshot_download('hantian/layoutreader', allow_patterns=layoutreader_pattern)
model_dir = model_dir + '/models'
print(f'model_dir is: {model_dir}')
print(f'layoutreader_model_dir is: {layoutreader_model_dir}')
json_url = 'https://github.com/opendatalab/MinerU/raw/dev/magic-pdf.template.json'
config_file_name = 'magic-pdf.json'
home_dir = os.path.expanduser('~')
config_file = os.path.join(home_dir, config_file_name)
json_mods = {
'models-dir': model_dir,
'layoutreader-model-dir': layoutreader_model_dir,
}
download_and_modify_json(json_url, config_file, json_mods)
print(f'The configuration file has been configured successfully, the path is: {config_file}')
Model downloads are divided into initial downloads and updates to the model directory. Please refer to the corresponding documentation for instructions on how to proceed.
# Initial download of model files
### 1. Download the Model from Hugging Face
Use a Python Script to Download Model Files from Hugging Face
```bash
pip install huggingface_hub
wget https://github.com/opendatalab/MinerU/raw/master/docs/download_models_hf.py
wget https://github.com/opendatalab/MinerU/raw/master/docs/download_models_hf.py -O download_models_hf.py
python download_models_hf.py
```
After the Python script finishes executing, it will output the directory where the models are downloaded.
### 2. To modify the model path address in the configuration file
Additionally, in `~/magic-pdf.json`, update the model directory path to the absolute path of the `models` directory output by the previous Python script. Otherwise, you will encounter an error indicating that the model cannot be loaded.
The Python script will automatically download the model files and configure the model directory in the configuration file.
The configuration file can be found in the user directory, with the filename `magic-pdf.json`.
# How to update models previously downloaded
## 1. Models downloaded via Git LFS
>Due to feedback from some users that downloading model files using git lfs was incomplete or resulted in corrupted model files, this method is no longer recommended.
> Due to feedback from some users that downloading model files using git lfs was incomplete or resulted in corrupted model files, this method is no longer recommended.
When magic-pdf <= 0.8.1, if you have previously downloaded the model files via git lfs, you can navigate to the previous download directory and update the models using the `git pull` command.
If you previously downloaded model files via git lfs, you can navigate to the previous download directory and use the `git pull` command to update the model.
> For versions 0.9.x and later, due to the repository change and the addition of the layout sorting model in PDF-Extract-Kit 1.0, the models cannot be updated using the `git pull` command. Instead, a Python script must be used for one-click updates.
## 2. Models downloaded via Hugging Face or Model Scope
......
......@@ -8,9 +8,8 @@
<summary>方法一:从 Hugging Face 下载模型</summary>
<p>使用python脚本 从Hugging Face下载模型文件</p>
<pre><code>pip install huggingface_hub
wget https://gitee.com/myhloli/MinerU/raw/master/docs/download_models_hf.py
wget https://gitee.com/myhloli/MinerU/raw/master/docs/download_models_hf.py -O download_models_hf.py
python download_models_hf.py</code></pre>
<p>python脚本执行完毕后,会输出模型下载目录</p>
</details>
## 方法二:从 ModelScope 下载模型
......@@ -19,24 +18,26 @@ python download_models_hf.py</code></pre>
```bash
pip install modelscope
wget https://gitee.com/myhloli/MinerU/raw/master/docs/download_models.py
wget https://gitee.com/myhloli/MinerU/raw/master/docs/download_models.py -O download_models.py
python download_models.py
```
python脚本执行完毕后,会输出模型下载目录
python脚本会自动下载模型文件并配置好配置文件中的模型目录
## 下载完成后的操作:修改magic-pdf.json中的模型路径
`~/magic-pdf.json`里修改模型的目录指向上一步脚本输出的models目录的绝对路径,否则会报模型无法加载的错误。
配置文件可以在用户目录中找到,文件名为`magic-pdf.json`
> windows的用户目录为 "C:\\Users\\用户名", linux用户目录为 "/home/用户名", macOS用户目录为 "/Users/用户名"
# 此前下载过模型,如何更新
## 1. 通过git lfs下载过模型
>由于部分用户反馈通过git lfs下载模型文件遇到下载不全和模型文件损坏情况,现已不推荐使用该方式下载。
> 由于部分用户反馈通过git lfs下载模型文件遇到下载不全和模型文件损坏情况,现已不推荐使用该方式下载。
当magic-pdf <= 0.8.1时,如此前通过 git lfs 下载过模型文件,可以进入到之前的下载目录中,通过`git pull`命令更新模型。
> 0.9.x及以后版本由于PDF-Extract-Kit 1.0更换仓库和新增layout排序模型,不能通过`git pull`命令更新,需要使用python脚本一键更新。
如此前通过 git lfs 下载过模型文件,可以进入到之前的下载目录中,通过`git pull`命令更新模型。
## 2. 通过 Hugging Face 或 Model Scope 下载过模型
......
......@@ -4,10 +4,20 @@
"bucket-name-2":["ak", "sk", "endpoint"]
},
"models-dir":"/tmp/models",
"layoutreader-model-dir":"/tmp/layoutreader",
"device-mode":"cpu",
"layout-config": {
"model": "layoutlmv3"
},
"formula-config": {
"mfd_model": "yolo_v8_mfd",
"mfr_model": "unimernet_small",
"enable": true
},
"table-config": {
"model": "TableMaster",
"is_table_recog_enable": false,
"model": "tablemaster",
"enable": false,
"max_time": 400
}
},
"config_version": "1.0.0"
}
\ No newline at end of file
import enum
class SupportedPdfParseMethod(enum.Enum):
OCR = 'ocr'
TXT = 'txt'
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment