Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
FastMoE
Commits
85306aa7
Commit
85306aa7
authored
Jan 03, 2021
by
Jiezhong Qiu
Browse files
CUDA error at moe_cuda_kernel.cu:86 code=13(CUBLAS_STATUS_EXECUTION_FAILED)
parent
20cc924b
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
5 additions
and
2 deletions
+5
-2
pytorch/cuda/cuda_stream_manager.h
pytorch/cuda/cuda_stream_manager.h
+3
-0
pytorch/mem_transformer.py
pytorch/mem_transformer.py
+2
-2
No files found.
pytorch/cuda/cuda_stream_manager.h
View file @
85306aa7
...
@@ -5,11 +5,14 @@
...
@@ -5,11 +5,14 @@
#include <cublas_v2.h>
#include <cublas_v2.h>
#include <helper_cuda.h>
#include <helper_cuda.h>
#include <cstdio>
class
CudaStreamManager
{
class
CudaStreamManager
{
public:
public:
CudaStreamManager
(
const
size_t
num_expert_
,
const
int
device_
)
:
num_expert
(
num_expert_
),
device
(
device_
)
{
CudaStreamManager
(
const
size_t
num_expert_
,
const
int
device_
)
:
num_expert
(
num_expert_
),
device
(
device_
)
{
checkCudaErrors
(
cudaSetDevice
(
device
));
checkCudaErrors
(
cudaSetDevice
(
device
));
printf
(
"set device %d
\n
"
,
device
);
streams
=
new
cudaStream_t
[
num_expert
];
streams
=
new
cudaStream_t
[
num_expert
];
checkCudaErrors
(
cublasCreate
(
&
handle
));
checkCudaErrors
(
cublasCreate
(
&
handle
));
for
(
size_t
i
=
0
;
i
<
num_expert
;
++
i
)
{
for
(
size_t
i
=
0
;
i
<
num_expert
;
++
i
)
{
...
...
pytorch/mem_transformer.py
View file @
85306aa7
...
@@ -45,7 +45,7 @@ class CustomizedMoEPositionwiseFF(nn.Module):
...
@@ -45,7 +45,7 @@ class CustomizedMoEPositionwiseFF(nn.Module):
self
.
d_inner
=
d_inner
self
.
d_inner
=
d_inner
self
.
dropout
=
dropout
self
.
dropout
=
dropout
self
.
gate
=
nn
.
Linear
(
d_model
,
d_inn
er
)
self
.
gate
=
nn
.
Linear
(
d_model
,
num_exp
er
t
)
self
.
moe1
=
MOELayer
(
num_expert
=
num_expert
,
in_feat
=
d_model
,
out_feat
=
d_inner
)
self
.
moe1
=
MOELayer
(
num_expert
=
num_expert
,
in_feat
=
d_model
,
out_feat
=
d_inner
)
self
.
moe2
=
MOELayer
(
num_expert
=
num_expert
,
in_feat
=
d_inner
,
out_feat
=
d_model
)
self
.
moe2
=
MOELayer
(
num_expert
=
num_expert
,
in_feat
=
d_inner
,
out_feat
=
d_model
)
...
@@ -81,7 +81,7 @@ class CustomizedMoEPositionwiseFF(nn.Module):
...
@@ -81,7 +81,7 @@ class CustomizedMoEPositionwiseFF(nn.Module):
for
i
in
range
(
self
.
top_k
):
for
i
in
range
(
self
.
top_k
):
print
(
"top %d"
%
i
)
print
(
"top %d"
%
i
)
gate_idx
=
gate_top_k_idx
[:,
i
].
contiguous
()
gate_idx
=
gate_top_k_idx
[:,
i
].
contiguous
()
print
(
inp
.
size
(),
gate_idx
.
size
())
print
(
inp
.
size
(),
gate_idx
.
size
()
,
inp
.
device
,
gate_idx
.
device
)
x
=
self
.
moe1
(
inp
,
gate_idx
)
x
=
self
.
moe1
(
inp
,
gate_idx
)
x
=
self
.
dropout
(
F
.
relu
(
x
))
x
=
self
.
dropout
(
F
.
relu
(
x
))
# x = F.pad(x, pad=(0, 1), mode='constant', value=1.0)
# x = F.pad(x, pad=(0, 1), mode='constant', value=1.0)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment