Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
TransformerEngine
Commits
804f1203
Unverified
Commit
804f1203
authored
Jun 20, 2023
by
asfiyab-nvidia
Committed by
GitHub
Jun 20, 2023
Browse files
Fix BF16 ONNX export for successful ONNX Runtime Verification (#290)
Signed-off-by:
Asfiya Baig
<
asfiyab@nvidia.com
>
parent
0426feb6
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
9 additions
and
1 deletion
+9
-1
transformer_engine/pytorch/attention.py
transformer_engine/pytorch/attention.py
+6
-1
transformer_engine/pytorch/te_onnx_extensions.py
transformer_engine/pytorch/te_onnx_extensions.py
+3
-0
No files found.
transformer_engine/pytorch/attention.py
View file @
804f1203
...
...
@@ -169,14 +169,19 @@ class UnfusedDotProductAttention(torch.nn.Module):
key_layer
=
key_layer
.
reshape
(
output_size
[
3
],
output_size
[
0
]
*
output_size
[
1
],
-
1
)
# preallocting result tensor: [b * np, sq, sk]
# WAR to set dtype to FP32 as ONNX lacks BF16 support for ConstantOfShape operator
is_bf16
=
query_layer
.
dtype
==
torch
.
bfloat16
matmul_result
=
torch
.
empty
(
output_size
[
0
]
*
output_size
[
1
],
output_size
[
2
],
output_size
[
3
],
dtype
=
query_layer
.
dtype
,
dtype
=
torch
.
float32
if
is_in_onnx_export_mode
()
and
is_bf16
else
query_layer
.
dtype
,
device
=
torch
.
cuda
.
current_device
(),
)
if
is_in_onnx_export_mode
()
and
is_bf16
:
matmul_result
=
matmul_result
.
bfloat16
()
scale
=
self
.
norm_factor
if
apply_qk_layer_scaling
:
scale
*=
self
.
layer_number
...
...
transformer_engine/pytorch/te_onnx_extensions.py
View file @
804f1203
...
...
@@ -254,6 +254,7 @@ def onnx_te_gemm(
"""ONNX graph for te_gemm"""
# pylint: disable=unused-argument
is_fp16
=
is_dtype_fp16
(
inputs
)
is_bf16
=
is_dtype_bf16
(
inputs
)
if
input_type
==
int
(
tex
.
DType
.
kFloat8E4M3
):
inputs
=
dequantize
(
g
,
inputs
,
input_scale_inverse
,
input_fp8_tensor
,
out_type
)
...
...
@@ -277,6 +278,8 @@ def onnx_te_gemm(
else
:
if
is_fp16
:
output
=
g
.
op
(
"Cast"
,
output
,
to_i
=
_C_onnx
.
TensorProtoDataType
.
FLOAT16
)
elif
is_bf16
:
output
=
g
.
op
(
"Cast"
,
output
,
to_i
=
_C_onnx
.
TensorProtoDataType
.
BFLOAT16
)
return
output
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment