Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
wangsen
MinerU
Commits
cf6ffc6b
Unverified
Commit
cf6ffc6b
authored
Apr 07, 2025
by
Xiaomeng Zhao
Committed by
GitHub
Apr 07, 2025
Browse files
Merge pull request #2128 from myhloli/dev
fix(model): improve VRAM detection and handling
parents
dfb3cbfb
d32a63ca
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
8 additions
and
3 deletions
+8
-3
magic_pdf/model/doc_analyze_by_custom_model.py
magic_pdf/model/doc_analyze_by_custom_model.py
+7
-2
magic_pdf/model/sub_modules/model_utils.py
magic_pdf/model/sub_modules/model_utils.py
+1
-1
No files found.
magic_pdf/model/doc_analyze_by_custom_model.py
View file @
cf6ffc6b
...
@@ -255,8 +255,9 @@ def may_batch_image_analyze(
...
@@ -255,8 +255,9 @@ def may_batch_image_analyze(
torch
.
npu
.
set_compile_mode
(
jit_compile
=
False
)
torch
.
npu
.
set_compile_mode
(
jit_compile
=
False
)
if
str
(
device
).
startswith
(
'npu'
)
or
str
(
device
).
startswith
(
'cuda'
):
if
str
(
device
).
startswith
(
'npu'
)
or
str
(
device
).
startswith
(
'cuda'
):
gpu_memory
=
int
(
os
.
getenv
(
'VIRTUAL_VRAM_SIZE'
,
round
(
get_vram
(
device
))))
vram
=
get_vram
(
device
)
if
gpu_memory
is
not
None
:
if
vram
is
not
None
:
gpu_memory
=
int
(
os
.
getenv
(
'VIRTUAL_VRAM_SIZE'
,
round
(
vram
)))
if
gpu_memory
>=
16
:
if
gpu_memory
>=
16
:
batch_ratio
=
16
batch_ratio
=
16
elif
gpu_memory
>=
12
:
elif
gpu_memory
>=
12
:
...
@@ -268,6 +269,10 @@ def may_batch_image_analyze(
...
@@ -268,6 +269,10 @@ def may_batch_image_analyze(
else
:
else
:
batch_ratio
=
1
batch_ratio
=
1
logger
.
info
(
f
'gpu_memory:
{
gpu_memory
}
GB, batch_ratio:
{
batch_ratio
}
'
)
logger
.
info
(
f
'gpu_memory:
{
gpu_memory
}
GB, batch_ratio:
{
batch_ratio
}
'
)
else
:
# Default batch_ratio when VRAM can't be determined
batch_ratio
=
1
logger
.
info
(
f
'Could not determine GPU memory, using default batch_ratio:
{
batch_ratio
}
'
)
# doc_analyze_start = time.time()
# doc_analyze_start = time.time()
...
...
magic_pdf/model/sub_modules/model_utils.py
View file @
cf6ffc6b
...
@@ -57,7 +57,7 @@ def clean_vram(device, vram_threshold=8):
...
@@ -57,7 +57,7 @@ def clean_vram(device, vram_threshold=8):
def
get_vram
(
device
):
def
get_vram
(
device
):
if
torch
.
cuda
.
is_available
()
and
device
!=
'cpu'
:
if
torch
.
cuda
.
is_available
()
and
str
(
device
).
startswith
(
"cuda"
)
:
total_memory
=
torch
.
cuda
.
get_device_properties
(
device
).
total_memory
/
(
1024
**
3
)
# 将字节转换为 GB
total_memory
=
torch
.
cuda
.
get_device_properties
(
device
).
total_memory
/
(
1024
**
3
)
# 将字节转换为 GB
return
total_memory
return
total_memory
elif
str
(
device
).
startswith
(
"npu"
):
elif
str
(
device
).
startswith
(
"npu"
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment