Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
apex
Commits
8cdcc821
"...text-generation-inference.git" did not exist on "3dd7da21986d5efd8e9d1e935645109240e3efc7"
Commit
8cdcc821
authored
Sep 02, 2021
by
Thor Johnsen
Browse files
Bug fixes
parent
67a0ffcb
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
70 additions
and
1 deletion
+70
-1
apex/contrib/bottleneck/bottleneck.py
apex/contrib/bottleneck/bottleneck.py
+1
-1
apex/contrib/csrc/bottleneck/bottleneck.cpp
apex/contrib/csrc/bottleneck/bottleneck.cpp
+69
-0
No files found.
apex/contrib/bottleneck/bottleneck.py
View file @
8cdcc821
...
...
@@ -354,7 +354,7 @@ class SpatialBottleneckFunction(torch.autograd.Function):
btm_halo
=
all_halos
[
ctx
.
local_rank
+
1
][:,:
1
,:,:]
fat_halo
[:,:
2
,:,:].
copy_
(
grad_out2
[:,
Hs
-
2
:,:,:])
fat_halo
[:,
2
:,:,:].
copy_
(
btm_halo
)
relu_halo
[:,:
2
,:,:].
copy_
(
relu1
[:,
Hs
-
2
,:,:])
relu_halo
[:,:
2
,:,:].
copy_
(
relu1
[:,
Hs
-
2
:
,:,:])
relu_halo
[:,
2
:,:,:].
zero_
()
btm_grad_out1_halo
=
fast_bottleneck
.
backward_grad_out1_halo
(
ctx
.
nhwc
,
ctx
.
stride_1x1
,
t_list
,
grads
,
fat_halo
,
relu_halo
)
btm_grad_out1_halo
=
btm_grad_out1_halo
[:,
1
:
2
,:,:]
...
...
apex/contrib/csrc/bottleneck/bottleneck.cpp
View file @
8cdcc821
...
...
@@ -2158,6 +2158,73 @@ at::Tensor bottleneck_backward_grad_out2(bool explicit_nhwc, int stride_1X1, std
return
grad_out2
;
}
// compute dgrad of 3x3 convolution without fusing with drelu and dscale
at
::
Tensor
bottleneck_backward_dgrad1
(
bool
explicit_nhwc
,
int
stride_1X1
,
std
::
vector
<
at
::
Tensor
>
inputs
,
std
::
vector
<
at
::
Tensor
>
outputs
,
at
::
Tensor
grad_out2
)
{
bool
requires_grad
=
inputs
[
0
].
requires_grad
();
std
::
cout
<<
std
::
fixed
;
auto
output_format
=
explicit_nhwc
?
at
::
MemoryFormat
::
Contiguous
:
at
::
MemoryFormat
::
ChannelsLast
;
// dgrad
at
::
Half
*
dy2
=
grad_out2
.
data_ptr
<
at
::
Half
>
();
// dgrad
auto
dgrad1
=
at
::
empty
(
backward_state
.
outdim1
,
inputs
[
0
].
type
(),
output_format
);
at
::
Half
*
dy1
=
dgrad1
.
data_ptr
<
at
::
Half
>
();
at
::
Half
*
w
=
inputs
[
2
].
data_ptr
<
at
::
Half
>
();
at
::
Half
*
z
=
inputs
[
4
].
data_ptr
<
at
::
Half
>
();
at
::
Half
*
relu1
=
inputs
[
12
].
data_ptr
<
at
::
Half
>
();
//printf("relu.shape = [%d,%d,%d,%d]\n",inputs[12].size(0),inputs[12].size(1),inputs[12].size(2),inputs[12].size(3));
// dgrad
run_dconv
(
backward_state
.
outdimA1
,
backward_state
.
padA1
,
backward_state
.
convstrideA
,
backward_state
.
dilationA
,
backward_state
.
filterdimA2
,
backward_state
.
outdimA2
,
CUDNN_DATA_HALF
,
dy1
,
w
,
dy2
,
CUDNN_BACKEND_OPERATION_CONVOLUTION_BACKWARD_DATA_DESCRIPTOR
);
return
dgrad1
;
}
at
::
Tensor
bottleneck_backward_dgrad1_halo
(
bool
explicit_nhwc
,
int
stride_1X1
,
std
::
vector
<
at
::
Tensor
>
inputs
,
std
::
vector
<
at
::
Tensor
>
outputs
,
at
::
Tensor
grad_out2_halo
)
{
bool
requires_grad
=
inputs
[
0
].
requires_grad
();
std
::
cout
<<
std
::
fixed
;
auto
output_format
=
explicit_nhwc
?
at
::
MemoryFormat
::
Contiguous
:
at
::
MemoryFormat
::
ChannelsLast
;
// dgrad
at
::
Half
*
dy2h
=
grad_out2_halo
.
data_ptr
<
at
::
Half
>
();
// dgrad
auto
dgrad1_halo
=
at
::
empty
(
backward_state
.
outdim1h
,
inputs
[
0
].
type
(),
output_format
);
at
::
Half
*
dy1h
=
dgrad1_halo
.
data_ptr
<
at
::
Half
>
();
at
::
Half
*
w
=
inputs
[
2
].
data_ptr
<
at
::
Half
>
();
// dgrad
run_dconv
(
backward_state
.
outdimA1h
,
backward_state
.
padA1
,
backward_state
.
convstrideA
,
backward_state
.
dilationA
,
backward_state
.
filterdimA2
,
backward_state
.
outdimA2h
,
CUDNN_DATA_HALF
,
dy1h
,
w
,
dy2h
,
CUDNN_BACKEND_OPERATION_CONVOLUTION_BACKWARD_DATA_DESCRIPTOR
);
return
dgrad1_halo
;
}
at
::
Tensor
bottleneck_backward_grad_out1
(
bool
explicit_nhwc
,
int
stride_1X1
,
std
::
vector
<
at
::
Tensor
>
inputs
,
std
::
vector
<
at
::
Tensor
>
outputs
,
at
::
Tensor
grad_out2
)
{
bool
requires_grad
=
inputs
[
0
].
requires_grad
();
...
...
@@ -2480,6 +2547,8 @@ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m
.
def
(
"backward_grad_out2"
,
&
bottleneck_backward_grad_out2
,
"Bottleneck block backward"
);
m
.
def
(
"backward_grad_out1"
,
&
bottleneck_backward_grad_out1
,
"Bottleneck block backward"
);
m
.
def
(
"backward_grad_out1_halo"
,
&
bottleneck_backward_grad_out1_halo
,
"Bottleneck block backward"
);
m
.
def
(
"backward_dgrad1"
,
&
bottleneck_backward_dgrad1
,
"Bottleneck block backward"
);
m
.
def
(
"backward_dgrad1_halo"
,
&
bottleneck_backward_dgrad1_halo
,
"Bottleneck block backward"
);
m
.
def
(
"backward_wgrad2"
,
&
bottleneck_backward_wgrad2
,
"Bottleneck block backward"
);
m
.
def
(
"backward_wgrad2_halo"
,
&
bottleneck_backward_wgrad2_halo
,
"Bottleneck block backward"
);
m
.
def
(
"backward_rest"
,
&
bottleneck_backward_rest
,
"Bottleneck block backward"
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment