Unverified Commit 38dfe835 authored by Xiaomeng Zhao's avatar Xiaomeng Zhao Committed by GitHub
Browse files

Merge pull request #2691 from zjx20/lazily-import

chore: speed up "mineru --help"
parents 80b5e4fe 5b26a387
......@@ -7,7 +7,6 @@ from loguru import logger
from mineru.utils.config_reader import get_device
from mineru.utils.model_utils import get_vram
from ..version import __version__
from .common import do_parse, read_fn, pdf_suffixes, image_suffixes
@click.command()
......@@ -139,6 +138,8 @@ from .common import do_parse, read_fn, pdf_suffixes, image_suffixes
def main(input_path, output_dir, method, backend, lang, server_url, start_page_id, end_page_id, formula_enable, table_enable, device_mode, virtual_vram, model_source):
from .common import do_parse, read_fn, pdf_suffixes, image_suffixes
def get_device_mode() -> str:
if device_mode is not None:
return device_mode
......
......@@ -2,7 +2,6 @@
import json
import os
import torch
from loguru import logger
# 定义配置文件名常量
......@@ -72,6 +71,8 @@ def get_device():
if device_mode is not None:
return device_mode
else:
import torch
if torch.cuda.is_available():
return "cuda"
elif torch.backends.mps.is_available():
......
import time
import torch
import gc
from PIL import Image
from loguru import logger
......@@ -298,6 +297,8 @@ def get_res_list_from_layout_res(layout_res, iou_threshold=0.7, overlap_threshol
def clean_memory(device='cuda'):
import torch
if device == 'cuda':
if torch.cuda.is_available():
torch.cuda.empty_cache()
......@@ -321,6 +322,8 @@ def clean_vram(device, vram_threshold=8):
def get_vram(device):
import torch
if torch.cuda.is_available() and str(device).startswith("cuda"):
total_memory = torch.cuda.get_device_properties(device).total_memory / (1024 ** 3) # 将字节转换为 GB
return total_memory
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment