Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
wangsen
MinerU
Commits
5d4263d4
Commit
5d4263d4
authored
Jul 02, 2025
by
myhloli
Browse files
fix: standardize variable naming and improve interface visibility logic in gradio_app.py
parent
67715001
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
3 additions
and
5 deletions
+3
-5
mineru/cli/gradio_app.py
mineru/cli/gradio_app.py
+3
-5
No files found.
mineru/cli/gradio_app.py
View file @
5d4263d4
...
@@ -180,7 +180,7 @@ def to_pdf(file_path):
...
@@ -180,7 +180,7 @@ def to_pdf(file_path):
def
update_interface
(
backend_choice
):
def
update_interface
(
backend_choice
):
if
backend_choice
in
[
"vlm-transformers"
,
"vlm-sglang-engine"
]:
if
backend_choice
in
[
"vlm-transformers"
,
"vlm-sglang-engine"
]:
return
gr
.
update
(
visible
=
False
),
gr
.
update
(
visible
=
False
)
return
gr
.
update
(
visible
=
False
),
gr
.
update
(
visible
=
False
)
elif
backend_choice
in
[
"vlm-sglang-client"
]:
# pipeline
elif
backend_choice
in
[
"vlm-sglang-client"
]:
return
gr
.
update
(
visible
=
True
),
gr
.
update
(
visible
=
False
)
return
gr
.
update
(
visible
=
True
),
gr
.
update
(
visible
=
False
)
elif
backend_choice
in
[
"pipeline"
]:
elif
backend_choice
in
[
"pipeline"
]:
return
gr
.
update
(
visible
=
False
),
gr
.
update
(
visible
=
True
)
return
gr
.
update
(
visible
=
False
),
gr
.
update
(
visible
=
True
)
...
@@ -230,7 +230,7 @@ def main(example_enable, sglang_engine_enable, mem_fraction_static, torch_compil
...
@@ -230,7 +230,7 @@ def main(example_enable, sglang_engine_enable, mem_fraction_static, torch_compil
try
:
try
:
print
(
"Start init SgLang engine..."
)
print
(
"Start init SgLang engine..."
)
from
mineru.backend.vlm.vlm_analyze
import
ModelSingleton
from
mineru.backend.vlm.vlm_analyze
import
ModelSingleton
modelsingleton
=
ModelSingleton
()
model
_
singleton
=
ModelSingleton
()
model_params
=
{
model_params
=
{
"enable_torch_compile"
:
torch_compile_enable
"enable_torch_compile"
:
torch_compile_enable
...
@@ -239,7 +239,7 @@ def main(example_enable, sglang_engine_enable, mem_fraction_static, torch_compil
...
@@ -239,7 +239,7 @@ def main(example_enable, sglang_engine_enable, mem_fraction_static, torch_compil
if
mem_fraction_static
is
not
None
:
if
mem_fraction_static
is
not
None
:
model_params
[
"mem_fraction_static"
]
=
mem_fraction_static
model_params
[
"mem_fraction_static"
]
=
mem_fraction_static
predictor
=
modelsingleton
.
get_model
(
predictor
=
model
_
singleton
.
get_model
(
"sglang-engine"
,
"sglang-engine"
,
None
,
None
,
None
,
None
,
...
@@ -266,8 +266,6 @@ def main(example_enable, sglang_engine_enable, mem_fraction_static, torch_compil
...
@@ -266,8 +266,6 @@ def main(example_enable, sglang_engine_enable, mem_fraction_static, torch_compil
drop_list
=
[
"pipeline"
,
"vlm-transformers"
,
"vlm-sglang-client"
]
drop_list
=
[
"pipeline"
,
"vlm-transformers"
,
"vlm-sglang-client"
]
preferred_option
=
"pipeline"
preferred_option
=
"pipeline"
backend
=
gr
.
Dropdown
(
drop_list
,
label
=
"Backend"
,
value
=
preferred_option
)
backend
=
gr
.
Dropdown
(
drop_list
,
label
=
"Backend"
,
value
=
preferred_option
)
# with gr.Row(visible=False) as lang_options:
with
gr
.
Row
(
visible
=
False
)
as
client_options
:
with
gr
.
Row
(
visible
=
False
)
as
client_options
:
url
=
gr
.
Textbox
(
label
=
'Server URL'
,
value
=
'http://localhost:30000'
,
placeholder
=
'http://localhost:30000'
)
url
=
gr
.
Textbox
(
label
=
'Server URL'
,
value
=
'http://localhost:30000'
,
placeholder
=
'http://localhost:30000'
)
with
gr
.
Row
(
equal_height
=
True
):
with
gr
.
Row
(
equal_height
=
True
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment