Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
text-generation-inference
Commits
abc32537
Unverified
Commit
abc32537
authored
Jul 23, 2024
by
Nicolas Patry
Committed by
GitHub
Jul 23, 2024
Browse files
Fixing mistral nemo. (#2276)
parent
47004651
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
10 additions
and
4 deletions
+10
-4
server/text_generation_server/models/__init__.py
server/text_generation_server/models/__init__.py
+0
-2
server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py
...n_server/models/custom_modeling/flash_mistral_modeling.py
+4
-1
server/text_generation_server/models/flash_causal_lm.py
server/text_generation_server/models/flash_causal_lm.py
+6
-1
No files found.
server/text_generation_server/models/__init__.py
View file @
abc32537
...
...
@@ -762,8 +762,6 @@ def get_model(
default_dtype
=
torch
.
bfloat16
,
trust_remote_code
=
trust_remote_code
,
lora_adapter_ids
=
lora_adapter_ids
,
# hidden_size / num_attention_heads is wrong in `google/gemma-2-9b-it`
head_size
=
config_dict
[
"head_dim"
],
)
elif
sharded
:
raise
NotImplementedError
(
FLASH_ATT_ERROR_MESSAGE
.
format
(
"Sharded Gemma2"
))
...
...
server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py
View file @
abc32537
...
...
@@ -117,7 +117,10 @@ class MistralAttention(torch.nn.Module):
)
self
.
num_heads
=
config
.
num_attention_heads
self
.
hidden_size
=
config
.
hidden_size
self
.
head_size
=
self
.
hidden_size
//
self
.
num_heads
if
hasattr
(
config
,
"head_dim"
):
self
.
head_size
=
config
.
head_dim
else
:
self
.
head_size
=
self
.
hidden_size
//
self
.
num_heads
self
.
rotary_emb
=
PositionRotaryEmbedding
.
static
(
config
=
config
,
...
...
server/text_generation_server/models/flash_causal_lm.py
View file @
abc32537
...
...
@@ -925,7 +925,12 @@ class FlashCausalLM(Model):
assert
self
.
num_kv_heads
>
0
if
head_size
is
None
:
self
.
head_size
=
config
.
hidden_size
//
config
.
num_attention_heads
# Some models use GQA and different sizes for o_proj
# and q_proj, that allows for that.
if
hasattr
(
config
,
"head_dim"
):
self
.
head_size
=
config
.
head_dim
else
:
self
.
head_size
=
config
.
hidden_size
//
config
.
num_attention_heads
else
:
self
.
head_size
=
head_size
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment