Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
d7dfe995
Commit
d7dfe995
authored
Dec 05, 2023
by
Khalique Ahmed
Browse files
Merge branch 'develop' of
https://github.com/ROCmSoftwarePlatform/AMDMIGraphX
into auto_contig_fix
parents
c6ec6638
e3e00547
Changes
108
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
1391 additions
and
522 deletions
+1391
-522
src/onnx/parse_unique.cpp
src/onnx/parse_unique.cpp
+92
-0
src/targets/cpu/dnnl.cpp
src/targets/cpu/dnnl.cpp
+1
-0
src/targets/cpu/lowering.cpp
src/targets/cpu/lowering.cpp
+12
-1
src/targets/gpu/CMakeLists.txt
src/targets/gpu/CMakeLists.txt
+0
-4
src/targets/gpu/compile_gen.cpp
src/targets/gpu/compile_gen.cpp
+10
-0
src/targets/gpu/device/gather.cpp
src/targets/gpu/device/gather.cpp
+0
-67
src/targets/gpu/device/pad.cpp
src/targets/gpu/device/pad.cpp
+0
-66
src/targets/gpu/fuse_mlir.cpp
src/targets/gpu/fuse_mlir.cpp
+279
-170
src/targets/gpu/gather.cpp
src/targets/gpu/gather.cpp
+0
-45
src/targets/gpu/include/migraphx/gpu/device/gather.hpp
src/targets/gpu/include/migraphx/gpu/device/gather.hpp
+0
-44
src/targets/gpu/include/migraphx/gpu/device/pad.hpp
src/targets/gpu/include/migraphx/gpu/device/pad.hpp
+0
-48
src/targets/gpu/include/migraphx/gpu/fuse_mlir.hpp
src/targets/gpu/include/migraphx/gpu/fuse_mlir.hpp
+2
-1
src/targets/gpu/include/migraphx/gpu/gather.hpp
src/targets/gpu/include/migraphx/gpu/gather.hpp
+0
-62
src/targets/gpu/include/migraphx/gpu/gemm_softmax_gemm.hpp
src/targets/gpu/include/migraphx/gpu/gemm_softmax_gemm.hpp
+4
-0
src/targets/gpu/kernels/include/migraphx/kernels/bit_cast.hpp
...targets/gpu/kernels/include/migraphx/kernels/bit_cast.hpp
+37
-0
src/targets/gpu/kernels/include/migraphx/kernels/float8.hpp
src/targets/gpu/kernels/include/migraphx/kernels/float8.hpp
+568
-0
src/targets/gpu/kernels/include/migraphx/kernels/float8_impl.hpp
...gets/gpu/kernels/include/migraphx/kernels/float8_impl.hpp
+331
-0
src/targets/gpu/kernels/include/migraphx/kernels/layernorm.hpp
...argets/gpu/kernels/include/migraphx/kernels/layernorm.hpp
+12
-9
src/targets/gpu/kernels/include/migraphx/kernels/math.hpp
src/targets/gpu/kernels/include/migraphx/kernels/math.hpp
+40
-3
src/targets/gpu/kernels/include/migraphx/kernels/pad.hpp
src/targets/gpu/kernels/include/migraphx/kernels/pad.hpp
+3
-2
No files found.
src/
targets/gpu/include/migraphx/gpu/pad.h
pp
→
src/
onnx/parse_unique.c
pp
View file @
d7dfe995
/*
/*
* The MIT License (MIT)
* The MIT License (MIT)
*
*
* Copyright (c) 2015-202
2
Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-202
3
Advanced Micro Devices, Inc. All rights reserved.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* of this software and associated documentation files (the "Software"), to deal
...
@@ -21,41 +21,72 @@
...
@@ -21,41 +21,72 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
* THE SOFTWARE.
*/
*/
#ifndef MIGRAPHX_GUARD_RTGLIB_PAD_HPP
#define MIGRAPHX_GUARD_RTGLIB_PAD_HPP
#include <migraphx/argument.hpp>
#include <migraphx/onnx/op_parser.hpp>
#include <migraphx/reflect.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/op/pad.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/tune_axis.hpp>
#include <optional>
namespace
migraphx
{
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
namespace
onnx
{
struct
context
;
// generate unique output stream y, given input stream x;
//
// case unsorted:
// input x: [2, 1, 1, 3, 4, 3], attr_sorted = 0;
// output(s):
// y: [2, 1, 3, 4] --- the unique output
// y_indices: [0, 1, 3, 4] --- first incidence, in terms of indices of x
// x_rev_indices: [0, 1, 1, 2, 3, 2] --- x seen in terms of indices of y
// y_count: [1, 2, 2, 1] -- count at each y_index. sum = len(x)
//
// case sorted:
// input x: [2, 1, 1, 3, 4, 3], attr_sorted = 1;
// output(s):
// y: [1, 2, 3, 4] --- the unique output
// y_indices: [1, 0, 3, 4] --- first incidence, in terms of indices of x
// x_rev_indices: [1, 0, 0, 2, 3, 2] --- x seen in terms of indices of y
// y_count: [2, 1, 2, 1] -- count at each y_index. sum = len(x)
struct
hip_pad
struct
parse_unique
:
op_parser
<
parse_unique
>
{
{
op
::
pad
op
;
template
<
class
Self
,
class
F
>
std
::
vector
<
op_desc
>
operators
()
const
{
return
{{
"Unique"
}};
}
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
migraphx
::
reflect
(
self
.
op
,
f
);
}
std
::
string
name
()
const
{
return
"gpu::pad"
;
}
std
::
vector
<
instruction_ref
>
parse
(
const
op_desc
&
opd
,
shape
compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
;
const
onnx_parser
&
parser
,
argument
const
onnx_parser
::
node_info
&
info
,
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
std
::
vector
<
instruction_ref
>
args
)
const
std
::
ptrdiff_t
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
{
return
shapes
.
size
()
-
1
;
int64_t
sorted
=
1
;
// default = sorted.
if
(
contains
(
info
.
attributes
,
"sorted"
))
sorted
=
parser
.
parse_value
(
info
.
attributes
.
at
(
"sorted"
)).
at
<
int
>
();
std
::
optional
<
int64_t
>
axis
;
if
(
contains
(
info
.
attributes
,
"axis"
))
{
auto
n_dim
=
args
[
0
]
->
get_shape
().
ndim
();
axis
=
parser
.
parse_value
(
info
.
attributes
.
at
(
"axis"
)).
at
<
int
>
();
axis
=
tune_axis
(
n_dim
,
*
axis
,
opd
.
op_name
);
}
migraphx
::
argument
data_arg
=
args
.
back
()
->
eval
();
auto
opr
=
axis
?
migraphx
::
make_op
(
"unique"
,
{{
"axis"
,
*
axis
},
{
"sorted"
,
sorted
}})
:
migraphx
::
make_op
(
"unique"
,
{{
"sorted"
,
sorted
}});
auto
u_opr
=
info
.
add_instruction
(
opr
,
args
.
at
(
0
));
auto
i_y
=
info
.
add_instruction
(
make_op
(
"get_tuple_elem"
,
{{
"index"
,
0
}}),
u_opr
);
auto
i_y_idx
=
info
.
add_instruction
(
make_op
(
"get_tuple_elem"
,
{{
"index"
,
1
}}),
u_opr
);
auto
i_x_idx
=
info
.
add_instruction
(
make_op
(
"get_tuple_elem"
,
{{
"index"
,
2
}}),
u_opr
);
auto
i_count
=
info
.
add_instruction
(
make_op
(
"get_tuple_elem"
,
{{
"index"
,
3
}}),
u_opr
);
return
{
i_y
,
i_y_idx
,
i_x_idx
,
i_count
};
}
}
};
};
}
// namespace
gpu
}
// namespace
onnx
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
}
// namespace migraphx
#endif
src/targets/cpu/dnnl.cpp
View file @
d7dfe995
...
@@ -68,6 +68,7 @@ dnnl::memory::data_type to_dnnl_memory_data_type(shape::type_t t)
...
@@ -68,6 +68,7 @@ dnnl::memory::data_type to_dnnl_memory_data_type(shape::type_t t)
case
st
::
int32_type
:
return
dt
::
s32
;
case
st
::
int32_type
:
return
dt
::
s32
;
case
st
::
int8_type
:
return
dt
::
s8
;
case
st
::
int8_type
:
return
dt
::
s8
;
case
st
::
uint8_type
:
return
dt
::
u8
;
case
st
::
uint8_type
:
return
dt
::
u8
;
case
st
::
fp8e4m3fnuz_type
:
MIGRAPHX_THROW
(
"fp8e4m3fnuz unsupported in DNNL"
);
default:
MIGRAPHX_THROW
(
"Unsupported data type"
);
default:
MIGRAPHX_THROW
(
"Unsupported data type"
);
}
}
}
}
...
...
src/targets/cpu/lowering.cpp
View file @
d7dfe995
...
@@ -340,7 +340,6 @@ struct cpu_apply
...
@@ -340,7 +340,6 @@ struct cpu_apply
{
"reduce_min"
,
"reduction_min"
},
{
"reduce_min"
,
"reduction_min"
},
{
"reduce_sum"
,
"reduction_sum"
},
{
"reduce_sum"
,
"reduction_sum"
},
});
});
extend_op
(
"concat"
,
"dnnl::concat"
);
extend_op
(
"concat"
,
"dnnl::concat"
);
extend_op
(
"contiguous"
,
"dnnl::reorder"
);
extend_op
(
"contiguous"
,
"dnnl::reorder"
);
extend_op
(
"convolution"
,
"dnnl::convolution"
);
extend_op
(
"convolution"
,
"dnnl::convolution"
);
...
@@ -376,6 +375,12 @@ struct cpu_apply
...
@@ -376,6 +375,12 @@ struct cpu_apply
// Apply these operators first so the inputs can be const folded
// Apply these operators first so the inputs can be const folded
for
(
auto
it
:
iterator_for
(
*
modl
))
for
(
auto
it
:
iterator_for
(
*
modl
))
{
{
// skip lowering if input has fp8 as one of the inputs since oneDNN doesn't have fp8
// supported yet.
if
(
std
::
any_of
(
it
->
inputs
().
begin
(),
it
->
inputs
().
end
(),
[](
const
auto
&
i
)
{
return
i
->
get_shape
().
type
()
==
migraphx
::
shape
::
fp8e4m3fnuz_type
;
}))
continue
;
if
(
it
->
name
()
==
"pow"
)
if
(
it
->
name
()
==
"pow"
)
{
{
apply_pow
(
it
);
apply_pow
(
it
);
...
@@ -383,6 +388,12 @@ struct cpu_apply
...
@@ -383,6 +388,12 @@ struct cpu_apply
}
}
for
(
auto
it
:
iterator_for
(
*
modl
))
for
(
auto
it
:
iterator_for
(
*
modl
))
{
{
// skip lowering if input has fp8 as one of the inputs since oneDNN doesn't have fp8
// supported yet.
if
(
std
::
any_of
(
it
->
inputs
().
begin
(),
it
->
inputs
().
end
(),
[](
const
auto
&
i
)
{
return
i
->
get_shape
().
type
()
==
migraphx
::
shape
::
fp8e4m3fnuz_type
;
}))
continue
;
if
(
it
->
name
()
==
"pooling"
)
if
(
it
->
name
()
==
"pooling"
)
{
{
apply_pooling
(
it
);
apply_pooling
(
it
);
...
...
src/targets/gpu/CMakeLists.txt
View file @
d7dfe995
...
@@ -126,7 +126,6 @@ add_library(migraphx_gpu
...
@@ -126,7 +126,6 @@ add_library(migraphx_gpu
fuse_ck.cpp
fuse_ck.cpp
fuse_mlir.cpp
fuse_mlir.cpp
fuse_ops.cpp
fuse_ops.cpp
gather.cpp
gemm_impl.cpp
gemm_impl.cpp
hip.cpp
hip.cpp
kernel.cpp
kernel.cpp
...
@@ -140,7 +139,6 @@ add_library(migraphx_gpu
...
@@ -140,7 +139,6 @@ add_library(migraphx_gpu
nonzero.cpp
nonzero.cpp
pack_args.cpp
pack_args.cpp
prefuse_ops.cpp
prefuse_ops.cpp
pad.cpp
perfdb.cpp
perfdb.cpp
pooling.cpp
pooling.cpp
reverse.cpp
reverse.cpp
...
@@ -168,12 +166,10 @@ endfunction()
...
@@ -168,12 +166,10 @@ endfunction()
register_migraphx_gpu_ops
(
hip_
register_migraphx_gpu_ops
(
hip_
argmax
argmax
argmin
argmin
gather
logsoftmax
logsoftmax
loop
loop
multinomial
multinomial
nonzero
nonzero
pad
prefix_scan_sum
prefix_scan_sum
reverse
reverse
scatter
scatter
...
...
src/targets/gpu/compile_gen.cpp
View file @
d7dfe995
...
@@ -54,6 +54,11 @@ vectorize vectorize::elements(std::size_t axis,
...
@@ -54,6 +54,11 @@ vectorize vectorize::elements(std::size_t axis,
const
std
::
vector
<
shape
>&
inputs
,
const
std
::
vector
<
shape
>&
inputs
,
const
std
::
vector
<
std
::
size_t
>&
sizes
)
const
std
::
vector
<
std
::
size_t
>&
sizes
)
{
{
// disable vectorization for fp8 types
if
(
std
::
any_of
(
inputs
.
begin
(),
inputs
.
end
(),
[
&
](
auto
ishape
)
{
return
ishape
.
type
()
==
migraphx
::
shape
::
fp8e4m3fnuz_type
;
}))
return
{
1
,
axis
};
if
(
std
::
all_of
(
if
(
std
::
all_of
(
inputs
.
begin
(),
inputs
.
end
(),
[
&
](
const
auto
&
s
)
{
return
s
.
lens
()[
axis
]
==
1
;
}))
inputs
.
begin
(),
inputs
.
end
(),
[
&
](
const
auto
&
s
)
{
return
s
.
lens
()[
axis
]
==
1
;
}))
return
{
1
,
axis
};
return
{
1
,
axis
};
...
@@ -86,6 +91,11 @@ vectorize vectorize::elements(std::size_t axis,
...
@@ -86,6 +91,11 @@ vectorize vectorize::elements(std::size_t axis,
vectorize
vectorize
::
elements
(
context
&
ctx
,
std
::
size_t
axis
,
const
std
::
vector
<
shape
>&
inputs
)
vectorize
vectorize
::
elements
(
context
&
ctx
,
std
::
size_t
axis
,
const
std
::
vector
<
shape
>&
inputs
)
{
{
// disable vectorization for fp8 types
if
(
std
::
any_of
(
inputs
.
begin
(),
inputs
.
end
(),
[
&
](
auto
ishape
)
{
return
ishape
.
type
()
==
migraphx
::
shape
::
fp8e4m3fnuz_type
;
}))
return
{
1
,
axis
};
if
(
inputs
.
empty
())
if
(
inputs
.
empty
())
return
{
1
,
axis
};
return
{
1
,
axis
};
std
::
size_t
n
=
std
::
max_element
(
inputs
.
begin
(),
std
::
size_t
n
=
std
::
max_element
(
inputs
.
begin
(),
...
...
src/targets/gpu/device/gather.cpp
deleted
100644 → 0
View file @
c6ec6638
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/shape.hpp>
#include <migraphx/argument.hpp>
#include <migraphx/gpu/device/gather.hpp>
#include <migraphx/gpu/device/tensor.hpp>
#include <migraphx/gpu/device/launch.hpp>
#include <migraphx/gpu/device/types.hpp>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
namespace
device
{
argument
gather
(
hipStream_t
stream
,
argument
result
,
argument
arg1
,
argument
arg2
,
int64_t
axis
)
{
const
auto
&
input_shape
=
arg1
.
get_shape
();
auto
lens
=
input_shape
.
lens
();
auto
axis_dim_size
=
lens
[
axis
];
lens
[
axis
]
=
arg2
.
get_shape
().
elements
();
shape
out_comp_shape
{
result
.
get_shape
().
type
(),
lens
};
std
::
size_t
nelements
=
result
.
get_shape
().
elements
();
visit_all
(
result
,
arg1
)([
&
](
auto
output
,
auto
input_v
)
{
hip_visit_views
(
input_v
,
out_comp_shape
)([
&
](
auto
input
,
auto
out_comp
)
{
arg2
.
visit
([
&
](
auto
indices
)
{
const
auto
*
indices_ptr
=
device_cast
(
indices
.
data
());
auto
*
output_ptr
=
device_cast
(
output
.
data
());
gs_launch
(
stream
,
nelements
,
256
)([
=
](
auto
i
)
__device__
{
auto
idx
=
out_comp
.
multi
(
i
);
auto
in_index
=
indices_ptr
[
idx
[
axis
]];
in_index
=
(
in_index
<
0
)
?
in_index
+
axis_dim_size
:
in_index
;
idx
[
axis
]
=
in_index
;
output_ptr
[
i
]
=
input
[
idx
];
});
});
});
});
return
result
;
}
}
// namespace device
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
src/targets/gpu/device/pad.cpp
deleted
100644 → 0
View file @
c6ec6638
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/shape.hpp>
#include <migraphx/argument.hpp>
#include <migraphx/clamp.hpp>
#include <migraphx/gpu/device/nary.hpp>
#include <migraphx/gpu/device/pad.hpp>
#include <migraphx/gpu/device/tensor.hpp>
#include <migraphx/gpu/device/launch.hpp>
#include <migraphx/float_equal.hpp>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
namespace
device
{
argument
pad
(
hipStream_t
stream
,
argument
result
,
argument
arg1
,
float
value
,
std
::
vector
<
std
::
int64_t
>
pads
)
{
std
::
size_t
nelements
=
arg1
.
get_shape
().
elements
();
hip_visit_all
(
result
,
arg1
)([
&
](
auto
output
,
auto
input
)
{
using
type
=
typename
decltype
(
output
)
::
value_type
;
using
hip_index
=
typename
decltype
(
output
)
::
hip_index
;
type
device_val
=
pad_clamp
<
host_type
<
type
>>
(
value
);
gs_launch
(
stream
,
result
.
get_shape
().
elements
())(
[
=
](
auto
i
)
__device__
{
output
.
data
()[
i
]
=
device_val
;
});
hip_index
offsets
;
std
::
copy
(
pads
.
begin
(),
pads
.
begin
()
+
offsets
.
size
(),
offsets
.
begin
());
gs_launch
(
stream
,
nelements
)([
=
](
auto
i
)
__device__
{
auto
idx
=
input
.
get_shape
().
multi
(
i
);
for
(
std
::
size_t
j
=
0
;
j
<
offsets
.
size
();
j
++
)
{
idx
[
j
]
+=
offsets
[
j
];
}
output
[
idx
]
=
input
.
data
()[
i
];
});
});
return
result
;
}
}
// namespace device
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
src/targets/gpu/fuse_mlir.cpp
View file @
d7dfe995
This diff is collapsed.
Click to expand it.
src/targets/gpu/gather.cpp
deleted
100644 → 0
View file @
c6ec6638
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/gpu/gather.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/device/gather.hpp>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
shape
hip_gather
::
compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
{
inputs
.
pop_back
();
return
op
.
normalize_compute_shape
(
inputs
);
}
argument
hip_gather
::
compute
(
context
&
ctx
,
const
shape
&
,
const
std
::
vector
<
argument
>&
args
)
const
{
return
device
::
gather
(
ctx
.
get_stream
().
get
(),
args
.
back
(),
args
[
0
],
args
[
1
],
op
.
axis
);
}
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
src/targets/gpu/include/migraphx/gpu/device/gather.hpp
deleted
100644 → 0
View file @
c6ec6638
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_GATHER_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_GATHER_HPP
#include <migraphx/argument.hpp>
#include <migraphx/gpu/device/config.hpp>
#include <hip/hip_runtime_api.h>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
namespace
device
{
argument
MIGRAPHX_DEVICE_EXPORT
gather
(
hipStream_t
stream
,
argument
result
,
argument
arg1
,
argument
arg2
,
int64_t
axis
);
}
// namespace device
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
#endif
src/targets/gpu/include/migraphx/gpu/device/pad.hpp
deleted
100644 → 0
View file @
c6ec6638
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_PAD_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_PAD_HPP
#include <migraphx/argument.hpp>
#include <migraphx/gpu/device/config.hpp>
#include <hip/hip_runtime_api.h>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
namespace
device
{
argument
MIGRAPHX_DEVICE_EXPORT
pad
(
hipStream_t
stream
,
argument
result
,
argument
arg1
,
float
value
,
std
::
vector
<
std
::
int64_t
>
pads
);
}
// namespace device
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
#endif
src/targets/gpu/include/migraphx/gpu/fuse_mlir.hpp
View file @
d7dfe995
...
@@ -34,10 +34,11 @@ struct module_pass_manager;
...
@@ -34,10 +34,11 @@ struct module_pass_manager;
namespace
gpu
{
namespace
gpu
{
MIGRAPHX_GPU_EXPORT
bool
mlir_enabled
();
MIGRAPHX_GPU_EXPORT
bool
mlir_enabled
();
MIGRAPHX_GPU_EXPORT
bool
mlir_attention_enabled
();
struct
MIGRAPHX_GPU_EXPORT
fuse_mlir
struct
MIGRAPHX_GPU_EXPORT
fuse_mlir
{
{
context
*
ctx
=
nullptr
;
context
*
ctx
=
nullptr
;
bool
enable_extra
=
false
;
bool
enable_extra
=
false
;
std
::
string
name
()
const
{
return
"gpu::fuse_mlir"
;
}
std
::
string
name
()
const
{
return
"gpu::fuse_mlir"
;
}
void
apply
(
module_pass_manager
&
mpm
)
const
;
void
apply
(
module_pass_manager
&
mpm
)
const
;
...
...
src/targets/gpu/include/migraphx/gpu/gather.hpp
deleted
100644 → 0
View file @
c6ec6638
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_RTGLIB_GATHER_HPP
#define MIGRAPHX_GUARD_RTGLIB_GATHER_HPP
#include <migraphx/argument.hpp>
#include <migraphx/reflect.hpp>
#include <migraphx/op/gather.hpp>
#include <migraphx/gpu/context.hpp>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
struct
context
;
struct
hip_gather
{
op
::
gather
op
;
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
migraphx
::
reflect
(
self
.
op
,
f
);
}
std
::
string
name
()
const
{
return
"gpu::gather"
;
}
shape
compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
;
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
std
::
ptrdiff_t
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
};
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
#endif
src/targets/gpu/include/migraphx/gpu/gemm_softmax_gemm.hpp
View file @
d7dfe995
...
@@ -66,6 +66,10 @@ struct gemm_softmax_gemm
...
@@ -66,6 +66,10 @@ struct gemm_softmax_gemm
}
}
static
bool
is_ck_supported_type
(
shape
::
type_t
t
)
{
return
contains
({
shape
::
half_type
},
t
);
}
static
bool
is_ck_supported_type
(
shape
::
type_t
t
)
{
return
contains
({
shape
::
half_type
},
t
);
}
static
bool
is_mlir_supported_type
(
shape
::
type_t
t
)
{
return
contains
({
shape
::
type_t
::
float_type
,
shape
::
half_type
},
t
);
}
};
};
}
// namespace gpu
}
// namespace gpu
...
...
src/targets/gpu/kernels/include/migraphx/kernels/bit_cast.hpp
0 → 100644
View file @
d7dfe995
/* ************************************************************************
* Copyright (C) 2016-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell cop-
* ies of the Software, and to permit persons to whom the Software is furnished
* to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IM-
* PLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNE-
* CTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* ************************************************************************ */
#ifndef MIGRAPHX_GUARD_KERNELS_BITCAST_HPP
#define MIGRAPHX_GUARD_KERNELS_BITCAST_HPP
#include <migraphx/kernels/type_traits.hpp>
namespace
migraphx
{
template
<
typename
To
,
typename
From
,
MIGRAPHX_REQUIRES
(
is_trivially_copyable
<
To
>{}
and
is_trivially_copyable
<
From
>
{})
>
inline
constexpr
To
bit_cast
(
From
fr
)
noexcept
{
static_assert
(
sizeof
(
To
)
==
sizeof
(
From
));
return
__builtin_bit_cast
(
To
,
fr
);
}
}
// namespace migraphx
#endif // MIGRAPHX_GUARD_KERNELS_BITCAST_HPP
src/targets/gpu/kernels/include/migraphx/kernels/float8.hpp
0 → 100644
View file @
d7dfe995
This diff is collapsed.
Click to expand it.
src/targets/gpu/kernels/include/migraphx/kernels/float8_impl.hpp
0 → 100644
View file @
d7dfe995
/* ************************************************************************
* Copyright (C) 2016-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell cop-
* ies of the Software, and to permit persons to whom the Software is furnished
* to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IM-
* PLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNE-
* CTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* ************************************************************************ */
#ifndef MIGRAPHX_GUARD_KERNELS_FP8_IMPL_HPP
#define MIGRAPHX_GUARD_KERNELS_FP8_IMPL_HPP
#include <migraphx/kernels/bit_cast.hpp>
#include <migraphx/kernels/type_traits.hpp>
#if defined(__clang__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wreserved-identifier"
#endif
namespace
migraphx
{
namespace
fp8
{
namespace
impl
{
// NOLINTBEGIN
template
<
int
Wm
,
int
We
,
typename
T
,
bool
NegativeZeroNan
,
bool
Clip
>
__device__
constexpr
uint8_t
cast_to_f8
(
T
f_x
,
bool
stoch
=
false
,
uint32_t
rng
=
0
)
{
constexpr
bool
is_float
=
true
;
// half is not supported for now
constexpr
bool
is_half
=
false
;
static_assert
(
Wm
+
We
==
7
,
"Wm+We==7"
);
static_assert
(
is_float
or
is_half
,
"Only float can be cast to f8"
);
const
uint32_t
mfmt
=
(
sizeof
(
T
)
==
4
)
?
23
:
10
;
typename
migraphx
::
conditional_t
<
sizeof
(
T
)
==
2
,
uint16_t
,
uint32_t
>
x
;
if
constexpr
(
sizeof
(
T
)
==
4
)
x
=
migraphx
::
bit_cast
<
uint32_t
>
(
f_x
);
else
x
=
migraphx
::
bit_cast
<
uint16_t
>
(
f_x
);
uint32_t
head
=
0
;
uint32_t
mantissa
=
0
;
int
exponent
=
0
;
uint32_t
bias
=
0
;
uint32_t
sign
=
0
;
if
constexpr
(
sizeof
(
T
)
==
4
)
{
head
=
x
&
0xFF800000
;
mantissa
=
x
&
0x7FFFFF
;
exponent
=
(
head
>>
23
)
&
0xFF
;
sign
=
head
>>
31
;
bias
=
127
;
}
else
{
head
=
x
&
0xFC00
;
mantissa
=
x
&
0x3FF
;
exponent
=
(
head
>>
10
)
&
0x1F
;
sign
=
head
>>
15
;
bias
=
15
;
}
uint32_t
signed_inf
=
(
sign
<<
7
)
+
(((
1
<<
We
)
-
1
)
<<
Wm
);
uint32_t
signed_all_ones
=
(
sign
<<
7
)
+
((((
1
<<
We
)
-
1
)
<<
Wm
)
+
((
1
<<
Wm
)
-
1
));
// Calcualte maximum singed value FLT_MAX, FLT_MIN
uint32_t
signed_max
=
signed_all_ones
;
if
(
not
NegativeZeroNan
)
signed_max
=
(
Wm
==
2
)
?
(
signed_max
-
4
)
:
(
signed_max
-
1
);
// Deal with inf and NaNs
if
(
NegativeZeroNan
)
// For the FNUZ cases, it is simple just return NaNs
{
if
((
sizeof
(
T
)
==
4
and
((
x
&
0x7F800000
)
==
0x7F800000
))
or
(
sizeof
(
T
)
==
2
and
((
x
&
0x7C00
)
==
0x7C00
)))
return
0x80
;
}
else
{
// calculate most common NaN mantissa for FP8, which is all Ones in binary
uint32_t
nan_mantissa
=
1
;
for
(
auto
i
=
1
;
i
<
Wm
;
++
i
)
{
nan_mantissa
|=
(
nan_mantissa
<<
1
);
}
if
((
sizeof
(
T
)
==
4
and
((
x
&
0x7F800000
)
==
0x7F800000
))
or
(
sizeof
(
T
)
==
2
and
((
x
&
0x7C00
)
==
0x7C00
)))
{
// infinity
if
(
mantissa
==
0
)
{
if
(
sign
==
0
)
return
(
Wm
==
2
)
?
0x7B
:
0x7E
;
else
return
(
Wm
==
2
)
?
0xFB
:
0xFE
;
}
else
// NaNs
return
signed_inf
+
nan_mantissa
;
}
}
// handle positive zero
if
(
x
==
0
)
return
0
;
// handle negative zero
else
if
((
sizeof
(
T
)
==
4
and
x
==
0x80000000
)
or
(
sizeof
(
T
)
==
2
and
x
==
0x8000
))
{
return
NegativeZeroNan
?
0
:
0x80
;
// For FNUZ types neg zero is just positive zero
}
/* First need to check if it is normal or denorm as there is a difference of implict 1
Then need to adjust the exponent to align with the F8 exponent, in the meanwhile, shift
The mantissa. Then for stochastic rounding, add rng to mantissa and truncate. And for
RNE, no need to add rng. Then probably need to check whether there is carry and adjust
exponent and mantissa again*/
// For IEEE bias mode, the bias is 2^(k-1) -1 where k is the width of exponent bits
const
int
f8_bias
=
(
1
<<
(
We
-
1u
))
-
1
+
(
NegativeZeroNan
?
1
:
0
);
const
int
f8_denormal_act_exponent
=
1
-
f8_bias
;
// actual exponent of f8 denormal
/* act_exponent is the actual exponent of fp32/fp16 (after subtracting bias)
f8_exponent is the converted f8 exponent with bias encoding
exponent_diff is the diff between fp32/fp16 exponent and f8 exponent,
the difference needs to be adjusted and mantissa shifted*/
int
act_exponent
=
0
;
int
f8_exponent
=
0
;
int
exponent_diff
=
0
;
if
(
exponent
==
0
and
mantissa
!=
0
)
{
// fp32/fp16 is in denormal.
/* fp32 denormal is below 2^-127 so it is usually not a concern here, we mostly concern fp16
here. In this case, f8 is usually in denormal. But there could be exceptions. fp16 denormal
has exponent bias 15 while bf8 with FNUZ has exponent bias 16. It means that there are some
numbers in fp16 denormal but they are bf8 (FNUZ) normals - smallest bf8 (FNUZ) normal is
2^-15. fp16 numbers where exponent==0 (actual exponent -14) and highest bit of mantissa is 1
are bf8 (FNUZ) normal. In this case, the fp16 mantissa should be shift left by 1 */
act_exponent
=
1
-
bias
;
exponent_diff
=
f8_denormal_act_exponent
-
act_exponent
;
// actual exponent is exponent-bias+1 as it is denormal
}
else
{
// fp32/fp16 is normal with implicit 1
act_exponent
=
exponent
-
bias
;
if
(
act_exponent
<=
f8_denormal_act_exponent
)
{
/* This is the case where fp32/fp16 is normal but it is in f8 denormal range.
For example fp8 FNUZ mode, denormal exponent is -7, but if the fp32/fp16
actual exponent is -7, it is actually larger due to the implict 1,
Therefore it needs to be adjust to -6 and mantissa shift right by 1.
So for fp32/fp16, exponent -8 is the cut point to convert to fp8 FNUZ */
exponent_diff
=
f8_denormal_act_exponent
-
act_exponent
;
}
else
{
// both fp32/fp16 and f8 are in normal range
exponent_diff
=
0
;
// exponent_diff=0 does not mean there is no difference for this case,
// act_exponent could be larger. Just that it does not need shift mantissa
}
mantissa
+=
(
1
<<
mfmt
);
// Add the implicit 1 into mantissa
}
// need to know whether the number is right in the middle of two adjacent fp8 numbers. use max
// value of 31 to avoid undefined behaviour
bool
midpoint
=
(
mantissa
&
((
1u
<<
(
mfmt
-
Wm
+
exponent_diff
))
-
1
))
==
(
1u
<<
(
mfmt
-
Wm
+
exponent_diff
-
1
));
/* This part is a bit tricky. The judgment of whether it is a tie needs to be done before we
shift right as shift right could rip off some residual part and make something not midpoint look
like midpoint. For example, the fp16 number 0x1002 (0 00100 0000000010), it is larger than
midpoint, but after shift right by 4 bits, it would look like midpoint.
*/
if
(
exponent_diff
>
0
)
mantissa
>>=
exponent_diff
;
else
if
(
exponent_diff
==
-
1
)
mantissa
<<=
-
exponent_diff
;
bool
implicit_one
=
mantissa
&
(
1
<<
mfmt
);
// if there is no implict 1, it means the f8 is denormal and need to adjust to denorm exponent
f8_exponent
=
(
act_exponent
+
exponent_diff
)
/*actual f8 exponent*/
+
f8_bias
-
(
implicit_one
?
0
:
1
);
// Now we have the exponent and mantissa adjusted
uint32_t
drop_mask
=
(
1
<<
(
mfmt
-
Wm
))
-
1
;
bool
odd
=
mantissa
&
(
1
<<
(
mfmt
-
Wm
));
// if the least significant bit that is not truncated is 1
/*
This part is doing rounding by adding mantissa part that is going to get dropped.
e.g. if the dropped part for less than 0.5 than it would round down.
if the dropped part is more than 0.5 then it would round up by rolling carry to LSB of retained
mantissa.
For the mid point when bit pattern is like this for Odd: `xy1:10000000` for Odd and
`xy0:10000000` for the Even. where `:` is delimiter for dropped v/s retained part.
For the odd case :
this will add xy1:10000000 + 000:10000000 which would roll over carry to LSB of retained
part making it RNE.
For the even case : this will add xy0:10000000 + 000:01111111 which would
round down and keep number Even
*/
mantissa
+=
(
stoch
?
rng
:
(
midpoint
?
(
odd
?
mantissa
:
mantissa
-
1
)
:
mantissa
))
&
drop_mask
;
// Now we deal with overflow
if
(
f8_exponent
==
0
and
((
1
<<
mfmt
)
&
mantissa
))
{
f8_exponent
=
1
;
// denormal overflow to become normal, promote exponent
}
else
if
((
1
<<
(
mfmt
+
1
))
&
mantissa
)
{
mantissa
>>=
1
;
f8_exponent
++
;
}
mantissa
>>=
(
mfmt
-
Wm
);
// above range: quantize to maximum possible float of the same sign
// for e5m2 case, max_exp is 14, since exp = 15 is reserved for Infs and Nans
const
int
max_exp
=
(
1
<<
We
)
-
((
NegativeZeroNan
or
Wm
==
3
)
?
1
:
2
);
if
(
f8_exponent
>
max_exp
)
{
if
(
Clip
)
return
signed_max
;
else
{
// https://onnx.ai/onnx/technical/float8.html#cast
if
(
NegativeZeroNan
)
return
0x80
;
else
return
(
Wm
==
2
)
?
signed_inf
:
signed_all_ones
;
}
}
if
(
f8_exponent
==
0
and
mantissa
==
0
)
return
NegativeZeroNan
?
0
:
(
sign
<<
7
);
mantissa
&=
(
1
<<
Wm
)
-
1
;
return
(
sign
<<
7
)
|
(
f8_exponent
<<
Wm
)
|
mantissa
;
}
// NOLINTEND
template
<
int
Wm
,
int
We
,
typename
T
,
bool
NegativeZeroNan
>
__device__
constexpr
T
cast_from_f8
(
uint8_t
x
)
{
// half is not supported for now
constexpr
bool
is_half
=
false
;
constexpr
bool
is_float
=
true
;
static_assert
(
is_float
or
is_half
,
"Only float are supported"
);
constexpr
int
weo
=
is_half
?
5
:
8
;
constexpr
int
wmo
=
is_half
?
10
:
(
is_float
?
23
:
7
);
// NOLINTNEXTLINE
T
f_inf
,
f_neg_inf
,
f_nan
,
f_neg0
;
if
constexpr
(
is_float
)
{
const
uint32_t
if_inf
=
0x7F800000
;
const
uint32_t
if_neg_inf
=
0xFF800000
;
const
uint32_t
if_nan
=
0x7F800001
;
const
uint32_t
if_neg0
=
0x80000000
;
f_inf
=
migraphx
::
bit_cast
<
float
>
(
if_inf
);
f_neg_inf
=
migraphx
::
bit_cast
<
float
>
(
if_neg_inf
);
f_nan
=
migraphx
::
bit_cast
<
float
>
(
if_nan
);
f_neg0
=
migraphx
::
bit_cast
<
float
>
(
if_neg0
);
}
if
(
x
==
0
)
return
0
;
uint32_t
sign
=
x
>>
7
;
// NOLINT
uint32_t
mantissa
=
x
&
((
1
<<
Wm
)
-
1
);
// NOLINT
int
exponent
=
(
x
&
0x7F
)
>>
Wm
;
// NOLINT
if
(
NegativeZeroNan
)
{
if
(
x
==
0x80
)
return
f_nan
;
}
else
{
if
(
x
==
0x80
)
return
f_neg0
;
if
(
exponent
==
((
1
<<
We
)
-
1
)
and
Wm
==
2
)
// NOLINT
return
(
mantissa
==
0
)
?
(
sign
?
f_neg_inf
:
f_inf
)
:
f_nan
;
else
if
(
Wm
==
3
and
(
x
==
0x7F
or
x
==
0xFF
))
return
f_nan
;
}
typename
migraphx
::
conditional_t
<
sizeof
(
T
)
==
2
,
uint16_t
,
uint32_t
>
retval
;
const
int
exp_low_cutoff
=
(
1
<<
(
weo
-
1
))
-
(
1
<<
(
We
-
1
))
+
1
-
(
NegativeZeroNan
?
1
:
0
);
// NOLINT
// subnormal input
if
(
exponent
==
0
)
{
// guaranteed mantissa!=0 since cases 0x0 and 0x80 are handled above
int
sh
=
1
+
__builtin_clz
(
mantissa
)
-
(
32
-
Wm
);
mantissa
<<=
sh
;
// NOLINT
exponent
+=
1
-
sh
;
mantissa
&=
((
1
<<
Wm
)
-
1
);
// NOLINT
}
exponent
+=
exp_low_cutoff
-
1
;
mantissa
<<=
wmo
-
Wm
;
// NOLINT
// subnormal output (occurs when T=half, We=5, negative_zero_nan=true)
if
(
exponent
<=
0
)
{
mantissa
|=
1
<<
wmo
;
// NOLINT
mantissa
>>=
1
-
exponent
;
// NOLINT
exponent
=
0
;
}
if
(
sizeof
(
T
)
==
2
)
retval
=
(
sign
<<
15
)
|
(
exponent
<<
10
)
|
mantissa
;
// NOLINT
else
retval
=
(
sign
<<
31
)
|
(
exponent
<<
23
)
|
mantissa
;
// NOLINT
return
migraphx
::
bit_cast
<
T
>
(
retval
);
}
}
// namespace impl
}
// namespace fp8
}
// namespace migraphx
#if defined(__clang__)
#pragma clang diagnostic pop
#endif
#endif // MIGRAPHX_GUARD_KERNELS_FP8_IMPL_HPP
src/targets/gpu/kernels/include/migraphx/kernels/layernorm.hpp
View file @
d7dfe995
...
@@ -52,22 +52,25 @@ __device__ void generic_binary_layernorm(
...
@@ -52,22 +52,25 @@ __device__ void generic_binary_layernorm(
block
::
template
run
<
reduce_output
>([
&
](
auto
,
auto
r
)
{
block
::
template
run
<
reduce_output
>([
&
](
auto
,
auto
r
)
{
auto
input
=
r
.
inner
([
&
](
auto
x1
,
auto
x2
)
{
return
op
(
x1
,
x2
);
})(
input1
,
input2
);
auto
input
=
r
.
inner
([
&
](
auto
x1
,
auto
x2
)
{
return
op
(
x1
,
x2
);
})(
input1
,
input2
);
using
value_type
=
typename
Input1
::
type
;
using
value_type
=
typename
Input1
::
type
;
using
vec_value_type
=
vec_type
<
value_type
>
;
constexpr
auto
relements
=
r
.
template
elements
<
Input1
>();
constexpr
auto
relements
=
r
.
template
elements
<
Input1
>();
constexpr
auto
relements_r
=
vec_
type
<
value_type
>
{
1.0
/
relements
};
constexpr
auto
relements_r
=
vec_value_type
{
1.0
/
relements
};
auto
relements_rsqrt
=
sqrt
(
relements_r
);
auto
relements_rsqrt
=
sqrt
(
relements_r
);
auto
means
=
r
.
reduce
(
op
::
sum
{},
make_array
<
vec_type
<
value_type
>>
(
0
,
0
),
[
&
](
auto
x
)
{
auto
means
=
r
.
reduce
(
op
::
sum
{},
auto
x_out
=
x
*
relements_r
;
make_array
<
vec_value_type
>
(
vec_value_type
{
0
},
vec_value_type
{
0
}),
// dividing x by sqrt(relements) before squaring allows computing higher values
[
&
](
auto
x
)
{
// before overflow in low precision
auto
x_out
=
x
*
relements_r
;
auto
x2_sqrt
=
x
*
relements_rsqrt
;
// dividing x by sqrt(relements) before squaring allows computing
return
make_array
(
x_out
,
x2_sqrt
*
x2_sqrt
);
// higher values before overflow in low precision
})(
input
);
auto
x2_sqrt
=
x
*
relements_rsqrt
;
return
make_array
(
x_out
,
x2_sqrt
*
x2_sqrt
);
})(
input
);
auto
mean_x
=
means
[
0
];
auto
mean_x
=
means
[
0
];
auto
mean_x2
=
means
[
1
];
auto
mean_x2
=
means
[
1
];
auto
variance
=
mean_x2
-
(
mean_x
*
mean_x
);
auto
variance
=
mean_x2
-
(
mean_x
*
mean_x
);
value_type
eps_val
=
eps
;
//
implicit
conversion
for
eps
value_type
eps_val
=
implicit
_
conversion
(
eps
);
r
.
inner
([
&
](
auto
&
y
,
auto
x
,
auto
...
xs
)
{
r
.
inner
([
&
](
auto
&
y
,
auto
x
,
auto
...
xs
)
{
auto
m
=
x
-
mean_x
;
auto
m
=
x
-
mean_x
;
...
...
src/targets/gpu/kernels/include/migraphx/kernels/math.hpp
View file @
d7dfe995
...
@@ -29,11 +29,15 @@
...
@@ -29,11 +29,15 @@
#include <migraphx/kernels/functional.hpp>
#include <migraphx/kernels/functional.hpp>
#include <migraphx/kernels/type_traits.hpp>
#include <migraphx/kernels/type_traits.hpp>
#include <migraphx/kernels/hip.hpp>
#include <migraphx/kernels/hip.hpp>
#include <migraphx/kernels/float8.hpp>
namespace
migraphx
{
namespace
migraphx
{
namespace
math
{
namespace
math
{
constexpr
float
as_float
(
migraphx
::
half
x
)
{
return
x
;
}
constexpr
float
as_float
(
migraphx
::
half
x
)
{
return
x
;
}
constexpr
float
as_float
(
migraphx
::
fp8
::
fp8e4m3fnuz
x
)
{
return
x
;
}
template
<
class
T
>
template
<
class
T
>
constexpr
T
as_float
(
T
x
)
constexpr
T
as_float
(
T
x
)
{
{
...
@@ -57,14 +61,14 @@ constexpr T as_float(T x)
...
@@ -57,14 +61,14 @@ constexpr T as_float(T x)
// NOLINTNEXTLINE
// NOLINTNEXTLINE
#define MIGRAPHX_DEVICE_MATH_FOR(type, name, fname) \
#define MIGRAPHX_DEVICE_MATH_FOR(type, name, fname) \
template <class... Ts, MIGRAPHX_REQUIRES(not is_any_vec<Ts...>())> \
template <class... Ts, MIGRAPHX_REQUIRES(not is_any_vec<Ts...>())> \
auto __device__ name(type x, Ts... xs)->type
\
auto __device__ name(type x, Ts... xs)
->
type \
{ \
{ \
return fname(x, xs...); \
return fname(x, xs...); \
}
}
// NOLINTNEXTLINE
// NOLINTNEXTLINE
#define MIGRAPHX_DEVICE_MATH_BINARY_FOR(type, name, fname) \
#define MIGRAPHX_DEVICE_MATH_BINARY_FOR(type, name, fname) \
inline auto __device__ name(type x, type y)->type { return fname(x, y); }
inline auto __device__ name(type x, type y)
->
type { return fname(x, y); }
// NOLINTNEXTLINE
// NOLINTNEXTLINE
#define MIGRAPHX_DEVICE_MATH_HALF(name, fname) \
#define MIGRAPHX_DEVICE_MATH_HALF(name, fname) \
...
@@ -72,6 +76,12 @@ constexpr T as_float(T x)
...
@@ -72,6 +76,12 @@ constexpr T as_float(T x)
auto __device__ name(migraphx::half x, Ts... xs) \
auto __device__ name(migraphx::half x, Ts... xs) \
MIGRAPHX_RETURNS(fname(math::as_float(x), math::as_float(xs)...))
MIGRAPHX_RETURNS(fname(math::as_float(x), math::as_float(xs)...))
// NOLINTNEXTLINE
#define MIGRAPHX_DEVICE_MATH_FP8(name, fname) \
template <class... Ts, MIGRAPHX_REQUIRES(not is_any_vec<Ts...>())> \
auto __device__ name(migraphx::fp8::fp8e4m3fnuz x, Ts... xs) MIGRAPHX_RETURNS( \
migraphx::fp8::fp8e4m3fnuz(fname(math::as_float(x), math::as_float(xs)...)))
// Template with two overloads for math functions, one for half2 type and one for more generic
// Template with two overloads for math functions, one for half2 type and one for more generic
// <half, N> vectorization where N is 4 or another even number.
// <half, N> vectorization where N is 4 or another even number.
...
@@ -162,6 +172,33 @@ MIGRAPHX_DEVICE_MATH_HALF(tan, ::tan)
...
@@ -162,6 +172,33 @@ MIGRAPHX_DEVICE_MATH_HALF(tan, ::tan)
MIGRAPHX_DEVICE_MATH_HALF
(
tanh
,
::
tanh
)
MIGRAPHX_DEVICE_MATH_HALF
(
tanh
,
::
tanh
)
MIGRAPHX_DEVICE_MATH_HALF
(
fmod
,
::
fmod
)
MIGRAPHX_DEVICE_MATH_HALF
(
fmod
,
::
fmod
)
// use float to compute fp8 overload
MIGRAPHX_DEVICE_MATH_FP8
(
abs
,
::
abs
)
MIGRAPHX_DEVICE_MATH_FP8
(
acos
,
::
acos
)
MIGRAPHX_DEVICE_MATH_FP8
(
acosh
,
::
acosh
)
MIGRAPHX_DEVICE_MATH_FP8
(
asin
,
::
asin
)
MIGRAPHX_DEVICE_MATH_FP8
(
asinh
,
::
asinh
)
MIGRAPHX_DEVICE_MATH_FP8
(
atan
,
::
atan
)
MIGRAPHX_DEVICE_MATH_FP8
(
atanh
,
::
atanh
)
MIGRAPHX_DEVICE_MATH_FP8
(
ceil
,
::
ceil
)
MIGRAPHX_DEVICE_MATH_FP8
(
cos
,
::
cos
)
MIGRAPHX_DEVICE_MATH_FP8
(
cosh
,
::
cosh
)
MIGRAPHX_DEVICE_MATH_FP8
(
erf
,
::
erf
)
MIGRAPHX_DEVICE_MATH_FP8
(
exp
,
::
exp
)
MIGRAPHX_DEVICE_MATH_FP8
(
floor
,
::
floor
)
MIGRAPHX_DEVICE_MATH_FP8
(
isnan
,
::
isnan
)
MIGRAPHX_DEVICE_MATH_FP8
(
log
,
::
log
)
MIGRAPHX_DEVICE_MATH_FP8
(
pow
,
::
pow
)
MIGRAPHX_DEVICE_MATH_FP8
(
remainder
,
::
remainder
)
MIGRAPHX_DEVICE_MATH_FP8
(
round
,
::
round
)
MIGRAPHX_DEVICE_MATH_FP8
(
rsqrt
,
::
rsqrt
)
MIGRAPHX_DEVICE_MATH_FP8
(
sin
,
::
sin
)
MIGRAPHX_DEVICE_MATH_FP8
(
sinh
,
::
sinh
)
MIGRAPHX_DEVICE_MATH_FP8
(
sqrt
,
::
sqrt
)
MIGRAPHX_DEVICE_MATH_FP8
(
tan
,
::
tan
)
MIGRAPHX_DEVICE_MATH_FP8
(
tanh
,
::
tanh
)
MIGRAPHX_DEVICE_MATH_FP8
(
fmod
,
::
fmod
)
// Map math functions to hip half2 functions
// Map math functions to hip half2 functions
// The half2 type is defined in include/hip/amd_detail/hip_fp16_gcc.h and is 2 16-bit floats
// The half2 type is defined in include/hip/amd_detail/hip_fp16_gcc.h and is 2 16-bit floats
// packed into a 32-bit number. See include/hip/amd_detail/hip_fp16_math_fwd.h for the HIP names
// packed into a 32-bit number. See include/hip/amd_detail/hip_fp16_math_fwd.h for the HIP names
...
@@ -253,7 +290,7 @@ MIGRAPHX_DEVICE_MATH_VEC(where)
...
@@ -253,7 +290,7 @@ MIGRAPHX_DEVICE_MATH_VEC(where)
template
<
class
T
,
class
U
>
template
<
class
T
,
class
U
>
constexpr
auto
convert
(
U
v
)
constexpr
auto
convert
(
U
v
)
{
{
return
vec_transform
(
v
)([](
auto
x
)
->
T
{
return
x
;
});
return
vec_transform
(
v
)([](
auto
x
)
->
T
{
return
static_cast
<
T
>
(
x
)
;
});
}
}
}
// namespace migraphx
}
// namespace migraphx
...
...
src/targets/gpu/kernels/include/migraphx/kernels/pad.hpp
View file @
d7dfe995
...
@@ -28,6 +28,7 @@
...
@@ -28,6 +28,7 @@
#include <migraphx/kernels/index.hpp>
#include <migraphx/kernels/index.hpp>
#include <migraphx/kernels/algorithm.hpp>
#include <migraphx/kernels/algorithm.hpp>
#include <migraphx/kernels/ranges.hpp>
#include <migraphx/kernels/ranges.hpp>
#include <migraphx/kernels/vec.hpp>
namespace
migraphx
{
namespace
migraphx
{
...
@@ -53,9 +54,9 @@ __device__ void pad(const index& idx,
...
@@ -53,9 +54,9 @@ __device__ void pad(const index& idx,
if
(
any_of
(
range_multi
.
begin
(),
range_multi
.
end
(),
[
&
](
auto
j
)
{
if
(
any_of
(
range_multi
.
begin
(),
range_multi
.
end
(),
[
&
](
auto
j
)
{
return
multi
[
j
]
<
offsets
[
j
]
or
input_idx
[
j
]
>=
input_bounds
[
j
];
return
multi
[
j
]
<
offsets
[
j
]
or
input_idx
[
j
]
>=
input_bounds
[
j
];
}))
}))
output
[
multi
]
=
pad_val
;
output
[
multi
]
=
implicit_conversion
(
pad_val
)
;
else
else
output
[
multi
]
=
input
[
input_idx
];
output
[
multi
]
=
implicit_conversion
(
input
[
input_idx
]
)
;
});
});
}
}
...
...
Prev
1
2
3
4
5
6
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment