Unverified Commit 38000a5f authored by Xinyuan Tong's avatar Xinyuan Tong Committed by GitHub
Browse files

Fix gemma3n with hybrid swa (#8240)


Signed-off-by: default avatarXinyuan Tong <xinyuantong.cs@gmail.com>
Co-authored-by: default avatarXinyuan Tong <xinyuantong.cs@gmail.com>
parent 70251e93
......@@ -276,6 +276,7 @@ class ModelRunner:
self.sampler = Sampler()
self.load_model()
# Check if the model is using hybrid SWA
if (
not self.server_args.disable_hybrid_swa_memory
and self.sliding_window_size is not None
......@@ -1008,8 +1009,11 @@ class ModelRunner:
try:
layers = self.model.language_model.model.layers
except:
self.is_hybrid = False
return
try:
layers = self.model.language_model.layers
except:
self.is_hybrid = False
return
for layer in layers:
if (
......
......@@ -105,7 +105,7 @@ suites = {
TestFile("test_vision_chunked_prefill.py", 175),
TestFile("test_vlm_input_format.py", 300),
TestFile("test_vision_openai_server_a.py", 584),
TestFile("test_vision_openai_server_b.py", 556),
TestFile("test_vision_openai_server_b.py", 620),
TestFile("test_w8a8_quantization.py", 46),
TestFile("test_reasoning_parser.py", 5),
],
......
......@@ -151,6 +151,27 @@ class TestGemma3itServer(TestOpenAIVisionServer):
pass
class TestGemma3nServer(TestOpenAIVisionServer):
@classmethod
def setUpClass(cls):
cls.model = "google/gemma-3n-E2B-it"
cls.base_url = DEFAULT_URL_FOR_TEST
cls.api_key = "sk-123456"
cls.process = popen_launch_server(
cls.model,
cls.base_url,
timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
other_args=[
"--trust-remote-code",
"--mem-fraction-static",
"0.70",
"--cuda-graph-max-bs",
"1",
],
)
cls.base_url += "/v1"
class TestKimiVLServer(TestOpenAIVisionServer):
@classmethod
def setUpClass(cls):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment