Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
change
sglang
Commits
66318ffe
Unverified
Commit
66318ffe
authored
Nov 18, 2024
by
Jani Monoses
Committed by
GitHub
Nov 18, 2024
Browse files
Rename layer_idx to layer_id for consistency (#2078)
parent
76619261
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
11 additions
and
11 deletions
+11
-11
python/sglang/srt/models/gemma2.py
python/sglang/srt/models/gemma2.py
+8
-8
python/sglang/srt/models/olmo.py
python/sglang/srt/models/olmo.py
+3
-3
No files found.
python/sglang/srt/models/gemma2.py
View file @
66318ffe
...
...
@@ -97,7 +97,7 @@ class Gemma2MLP(nn.Module):
class
Gemma2Attention
(
nn
.
Module
):
def
__init__
(
self
,
layer_id
x
:
int
,
layer_id
:
int
,
config
:
PretrainedConfig
,
hidden_size
:
int
,
num_heads
:
int
,
...
...
@@ -109,7 +109,7 @@ class Gemma2Attention(nn.Module):
quant_config
:
Optional
[
QuantizationConfig
]
=
None
,
)
->
None
:
super
().
__init__
()
self
.
layer_id
x
=
layer_id
x
self
.
layer_id
=
layer_id
self
.
config
=
config
self
.
hidden_size
=
hidden_size
tp_size
=
get_tensor_model_parallel_world_size
()
...
...
@@ -156,13 +156,13 @@ class Gemma2Attention(nn.Module):
dtype
=
torch
.
get_default_dtype
(),
)
use_sliding_window
=
layer_id
x
%
2
==
0
and
hasattr
(
config
,
"sliding_window"
)
use_sliding_window
=
layer_id
%
2
==
0
and
hasattr
(
config
,
"sliding_window"
)
self
.
attn
=
RadixAttention
(
self
.
num_heads
,
self
.
head_dim
,
self
.
scaling
,
num_kv_heads
=
self
.
num_kv_heads
,
layer_id
=
layer_id
x
,
layer_id
=
layer_id
,
logit_cap
=
self
.
config
.
attn_logit_softcapping
,
sliding_window_size
=
(
get_attention_sliding_window_size
(
config
)
...
...
@@ -188,7 +188,7 @@ class Gemma2Attention(nn.Module):
class
Gemma2DecoderLayer
(
nn
.
Module
):
def
__init__
(
self
,
layer_id
x
:
int
,
layer_id
:
int
,
config
:
PretrainedConfig
,
cache_config
=
None
,
quant_config
:
Optional
[
QuantizationConfig
]
=
None
,
...
...
@@ -196,7 +196,7 @@ class Gemma2DecoderLayer(nn.Module):
super
().
__init__
()
self
.
hidden_size
=
config
.
hidden_size
self
.
self_attn
=
Gemma2Attention
(
layer_id
x
=
layer_id
x
,
layer_id
=
layer_id
,
config
=
config
,
hidden_size
=
self
.
hidden_size
,
num_heads
=
config
.
num_attention_heads
,
...
...
@@ -269,8 +269,8 @@ class Gemma2Model(nn.Module):
)
self
.
layers
=
nn
.
ModuleList
(
[
Gemma2DecoderLayer
(
layer_id
x
,
config
,
cache_config
,
quant_config
)
for
layer_id
x
in
range
(
config
.
num_hidden_layers
)
Gemma2DecoderLayer
(
layer_id
,
config
,
cache_config
,
quant_config
)
for
layer_id
in
range
(
config
.
num_hidden_layers
)
]
)
self
.
norm
=
GemmaRMSNorm
(
config
.
hidden_size
,
eps
=
config
.
rms_norm_eps
)
...
...
python/sglang/srt/models/olmo.py
View file @
66318ffe
...
...
@@ -223,8 +223,8 @@ class OlmoModel(nn.Module):
)
self
.
layers
=
nn
.
ModuleList
(
[
OlmoDecoderLayer
(
config
,
layer_id
x
,
quant_config
)
for
layer_id
x
in
range
(
config
.
num_hidden_layers
)
OlmoDecoderLayer
(
config
,
layer_id
,
quant_config
)
for
layer_id
in
range
(
config
.
num_hidden_layers
)
]
)
self
.
norm
=
nn
.
LayerNorm
(
...
...
@@ -250,7 +250,7 @@ class OlmoModel(nn.Module):
hidden_states
=
input_embeds
# Apply blocks one-by-one.
for
layer_id
x
,
decoder_layer
in
enumerate
(
self
.
layers
):
for
layer_id
,
decoder_layer
in
enumerate
(
self
.
layers
):
# shape: (batch_size, seq_len, d_model)
hidden_states
=
decoder_layer
(
positions
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment