Commit 5d4263d4 authored by myhloli's avatar myhloli
Browse files

fix: standardize variable naming and improve interface visibility logic in gradio_app.py

parent 67715001
...@@ -180,7 +180,7 @@ def to_pdf(file_path): ...@@ -180,7 +180,7 @@ def to_pdf(file_path):
def update_interface(backend_choice): def update_interface(backend_choice):
if backend_choice in ["vlm-transformers", "vlm-sglang-engine"]: if backend_choice in ["vlm-transformers", "vlm-sglang-engine"]:
return gr.update(visible=False), gr.update(visible=False) return gr.update(visible=False), gr.update(visible=False)
elif backend_choice in ["vlm-sglang-client"]: # pipeline elif backend_choice in ["vlm-sglang-client"]:
return gr.update(visible=True), gr.update(visible=False) return gr.update(visible=True), gr.update(visible=False)
elif backend_choice in ["pipeline"]: elif backend_choice in ["pipeline"]:
return gr.update(visible=False), gr.update(visible=True) return gr.update(visible=False), gr.update(visible=True)
...@@ -230,7 +230,7 @@ def main(example_enable, sglang_engine_enable, mem_fraction_static, torch_compil ...@@ -230,7 +230,7 @@ def main(example_enable, sglang_engine_enable, mem_fraction_static, torch_compil
try: try:
print("Start init SgLang engine...") print("Start init SgLang engine...")
from mineru.backend.vlm.vlm_analyze import ModelSingleton from mineru.backend.vlm.vlm_analyze import ModelSingleton
modelsingleton = ModelSingleton() model_singleton = ModelSingleton()
model_params = { model_params = {
"enable_torch_compile": torch_compile_enable "enable_torch_compile": torch_compile_enable
...@@ -239,7 +239,7 @@ def main(example_enable, sglang_engine_enable, mem_fraction_static, torch_compil ...@@ -239,7 +239,7 @@ def main(example_enable, sglang_engine_enable, mem_fraction_static, torch_compil
if mem_fraction_static is not None: if mem_fraction_static is not None:
model_params["mem_fraction_static"] = mem_fraction_static model_params["mem_fraction_static"] = mem_fraction_static
predictor = modelsingleton.get_model( predictor = model_singleton.get_model(
"sglang-engine", "sglang-engine",
None, None,
None, None,
...@@ -266,8 +266,6 @@ def main(example_enable, sglang_engine_enable, mem_fraction_static, torch_compil ...@@ -266,8 +266,6 @@ def main(example_enable, sglang_engine_enable, mem_fraction_static, torch_compil
drop_list = ["pipeline", "vlm-transformers", "vlm-sglang-client"] drop_list = ["pipeline", "vlm-transformers", "vlm-sglang-client"]
preferred_option = "pipeline" preferred_option = "pipeline"
backend = gr.Dropdown(drop_list, label="Backend", value=preferred_option) backend = gr.Dropdown(drop_list, label="Backend", value=preferred_option)
# with gr.Row(visible=False) as lang_options:
with gr.Row(visible=False) as client_options: with gr.Row(visible=False) as client_options:
url = gr.Textbox(label='Server URL', value='http://localhost:30000', placeholder='http://localhost:30000') url = gr.Textbox(label='Server URL', value='http://localhost:30000', placeholder='http://localhost:30000')
with gr.Row(equal_height=True): with gr.Row(equal_height=True):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment