Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
text-generation-inference
Commits
bd6e8b3c
Unverified
Commit
bd6e8b3c
authored
Nov 19, 2024
by
drbh
Committed by
GitHub
Nov 19, 2024
Browse files
fix: adjust llama MLP name from dense to mlp to correctly apply lora (#2760)
parent
5489406c
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
3 additions
and
3 deletions
+3
-3
server/text_generation_server/models/custom_modeling/flash_llama_modeling.py
...ion_server/models/custom_modeling/flash_llama_modeling.py
+3
-3
No files found.
server/text_generation_server/models/custom_modeling/flash_llama_modeling.py
View file @
bd6e8b3c
...
@@ -422,7 +422,7 @@ class FlashLlamaLayer(nn.Module):
...
@@ -422,7 +422,7 @@ class FlashLlamaLayer(nn.Module):
if
SparseMoELayer
.
is_supported
(
weights
)
if
SparseMoELayer
.
is_supported
(
weights
)
else
DenseMoELayer
else
DenseMoELayer
)
)
self
.
dense
=
Phi3MoE
(
self
.
mlp
=
Phi3MoE
(
f
"
{
prefix
}
.block_sparse_moe"
,
config
,
moe_layer_cls
,
weights
f
"
{
prefix
}
.block_sparse_moe"
,
config
,
moe_layer_cls
,
weights
)
)
# with moe the layernorms are are not rmsnorms and they have bias
# with moe the layernorms are are not rmsnorms and they have bias
...
@@ -437,7 +437,7 @@ class FlashLlamaLayer(nn.Module):
...
@@ -437,7 +437,7 @@ class FlashLlamaLayer(nn.Module):
eps
=
config
.
rms_norm_eps
,
eps
=
config
.
rms_norm_eps
,
)
)
else
:
else
:
self
.
dense
=
LlamaMLP
(
self
.
mlp
=
LlamaMLP
(
prefix
=
f
"
{
prefix
}
.mlp"
,
config
=
config
,
weights
=
weights
,
index
=
index
prefix
=
f
"
{
prefix
}
.mlp"
,
config
=
config
,
weights
=
weights
,
index
=
index
)
)
self
.
input_layernorm
=
FastRMSNorm
.
load
(
self
.
input_layernorm
=
FastRMSNorm
.
load
(
...
@@ -493,7 +493,7 @@ class FlashLlamaLayer(nn.Module):
...
@@ -493,7 +493,7 @@ class FlashLlamaLayer(nn.Module):
attn_output
,
res
attn_output
,
res
)
)
mlp_output
=
self
.
dense
(
normed_attn_res_output
,
adapter_data
)
mlp_output
=
self
.
mlp
(
normed_attn_res_output
,
adapter_data
)
if
self
.
residual_multiplier
is
not
None
:
if
self
.
residual_multiplier
is
not
None
:
mlp_output
*=
self
.
residual_multiplier
mlp_output
*=
self
.
residual_multiplier
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment