Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
e694e985
Unverified
Commit
e694e985
authored
Jan 29, 2024
by
xkszltl
Committed by
GitHub
Jan 29, 2024
Browse files
Fix typo of `Block`. (#28727)
parent
9e8f35fa
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
10 additions
and
2 deletions
+10
-2
src/transformers/models/mixtral/modeling_mixtral.py
src/transformers/models/mixtral/modeling_mixtral.py
+10
-2
No files found.
src/transformers/models/mixtral/modeling_mixtral.py
View file @
e694e985
...
@@ -787,7 +787,7 @@ MIXTRAL_ATTENTION_CLASSES = {
...
@@ -787,7 +787,7 @@ MIXTRAL_ATTENTION_CLASSES = {
}
}
class
MixtralB
L
ockSparseTop2MLP
(
nn
.
Module
):
class
MixtralB
l
ockSparseTop2MLP
(
nn
.
Module
):
def
__init__
(
self
,
config
:
MixtralConfig
):
def
__init__
(
self
,
config
:
MixtralConfig
):
super
().
__init__
()
super
().
__init__
()
self
.
ffn_dim
=
config
.
intermediate_size
self
.
ffn_dim
=
config
.
intermediate_size
...
@@ -805,6 +805,14 @@ class MixtralBLockSparseTop2MLP(nn.Module):
...
@@ -805,6 +805,14 @@ class MixtralBLockSparseTop2MLP(nn.Module):
return
current_hidden_states
return
current_hidden_states
class
MixtralBLockSparseTop2MLP
(
MixtralBlockSparseTop2MLP
):
def
__init__
(
self
,
*
args
,
**
kwargs
):
logger
.
warning_once
(
"MixtralBLockSparseTop2MLP is deprecated by MixtralBlockSparseTop2MLP and will be removed in v4.40."
)
super
().
__init__
(
*
args
,
**
kwargs
)
class
MixtralSparseMoeBlock
(
nn
.
Module
):
class
MixtralSparseMoeBlock
(
nn
.
Module
):
"""
"""
This implementation is
This implementation is
...
@@ -827,7 +835,7 @@ class MixtralSparseMoeBlock(nn.Module):
...
@@ -827,7 +835,7 @@ class MixtralSparseMoeBlock(nn.Module):
# gating
# gating
self
.
gate
=
nn
.
Linear
(
self
.
hidden_dim
,
self
.
num_experts
,
bias
=
False
)
self
.
gate
=
nn
.
Linear
(
self
.
hidden_dim
,
self
.
num_experts
,
bias
=
False
)
self
.
experts
=
nn
.
ModuleList
([
MixtralB
L
ockSparseTop2MLP
(
config
)
for
_
in
range
(
self
.
num_experts
)])
self
.
experts
=
nn
.
ModuleList
([
MixtralB
l
ockSparseTop2MLP
(
config
)
for
_
in
range
(
self
.
num_experts
)])
def
forward
(
self
,
hidden_states
:
torch
.
Tensor
)
->
torch
.
Tensor
:
def
forward
(
self
,
hidden_states
:
torch
.
Tensor
)
->
torch
.
Tensor
:
""" """
""" """
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment