Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
change
sglang
Commits
45b3a6a2
"vscode:/vscode.git/clone" did not exist on "e85c43ab87553426f830f5f49ddc34d7231c5212"
Unverified
Commit
45b3a6a2
authored
Sep 08, 2025
by
Yineng Zhang
Committed by
GitHub
Sep 08, 2025
Browse files
Revert "[ModelOpt] Fix Weight Loading for DSR1-FP4 Quantization (#9712)" (#10176)
parent
9a18aa54
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
2 additions
and
10 deletions
+2
-10
python/sglang/srt/layers/linear.py
python/sglang/srt/layers/linear.py
+2
-3
python/sglang/srt/layers/quantization/modelopt_quant.py
python/sglang/srt/layers/quantization/modelopt_quant.py
+0
-7
No files found.
python/sglang/srt/layers/linear.py
View file @
45b3a6a2
...
@@ -235,9 +235,8 @@ class ReplicatedLinear(LinearBase):
...
@@ -235,9 +235,8 @@ class ReplicatedLinear(LinearBase):
loaded_weight
=
loaded_weight
[:
1
]
loaded_weight
=
loaded_weight
[:
1
]
else
:
else
:
raise
ValueError
(
f
"
{
loaded_weight
}
are not all equal"
)
raise
ValueError
(
f
"
{
loaded_weight
}
are not all equal"
)
assert
(
param
.
size
()
==
loaded_weight
.
size
()
assert
param
.
size
()
==
loaded_weight
.
size
()
),
f
"Loading weight error: param:
{
param
.
size
()
}
, loaded_weight:
{
loaded_weight
.
size
()
}
"
param
.
data
.
copy_
(
loaded_weight
)
param
.
data
.
copy_
(
loaded_weight
)
def
forward
(
self
,
x
:
torch
.
Tensor
)
->
Tuple
[
torch
.
Tensor
,
Optional
[
torch
.
Tensor
]]:
def
forward
(
self
,
x
:
torch
.
Tensor
)
->
Tuple
[
torch
.
Tensor
,
Optional
[
torch
.
Tensor
]]:
...
...
python/sglang/srt/layers/quantization/modelopt_quant.py
View file @
45b3a6a2
...
@@ -646,13 +646,6 @@ class ModelOptFp4Config(QuantizationConfig):
...
@@ -646,13 +646,6 @@ class ModelOptFp4Config(QuantizationConfig):
regex_str
=
pattern
.
replace
(
"."
,
r
"\."
).
replace
(
"*"
,
r
".*"
)
regex_str
=
pattern
.
replace
(
"."
,
r
"\."
).
replace
(
"*"
,
r
".*"
)
if
re
.
fullmatch
(
regex_str
,
prefix
):
if
re
.
fullmatch
(
regex_str
,
prefix
):
return
True
return
True
# Check if the last part of the excluded pattern is contained in the last part of the prefix
# This handles fused modules like fused_qkv_a_proj_with_mqa that contain q_a_proj and kv_a_proj_with_mqa
pattern_last_part
=
pattern
.
split
(
"."
)[
-
1
]
prefix_last_part
=
prefix
.
split
(
"."
)[
-
1
]
if
pattern_last_part
in
prefix_last_part
:
return
True
return
False
return
False
def
get_quant_method
(
def
get_quant_method
(
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment