Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
4ea39116
Commit
4ea39116
authored
Nov 10, 2023
by
Khalique Ahmed
Browse files
manual merge
parents
20128cae
d8011adf
Changes
315
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
502 additions
and
55 deletions
+502
-55
src/include/migraphx/pad_calc.hpp
src/include/migraphx/pad_calc.hpp
+1
-0
src/include/migraphx/run_loop.hpp
src/include/migraphx/run_loop.hpp
+1
-0
src/include/migraphx/source_location.hpp
src/include/migraphx/source_location.hpp
+1
-0
src/include/migraphx/streamutils.hpp
src/include/migraphx/streamutils.hpp
+14
-0
src/include/migraphx/tmp_dir.hpp
src/include/migraphx/tmp_dir.hpp
+1
-0
src/include/migraphx/type_name.hpp
src/include/migraphx/type_name.hpp
+1
-1
src/normalize_attributes.cpp
src/normalize_attributes.cpp
+8
-8
src/onnx/broadcast_qdq.cpp
src/onnx/broadcast_qdq.cpp
+76
-0
src/onnx/include/migraphx/onnx/broadcast_qdq.hpp
src/onnx/include/migraphx/onnx/broadcast_qdq.hpp
+26
-33
src/onnx/include/migraphx/onnx/onnx_parser.hpp
src/onnx/include/migraphx/onnx/onnx_parser.hpp
+5
-4
src/onnx/onnx.cpp
src/onnx/onnx.cpp
+1
-0
src/onnx/onnx_parser.cpp
src/onnx/onnx_parser.cpp
+1
-1
src/onnx/padding.cpp
src/onnx/padding.cpp
+1
-1
src/onnx/parse_arg_op.cpp
src/onnx/parse_arg_op.cpp
+14
-3
src/onnx/parse_clip.cpp
src/onnx/parse_clip.cpp
+1
-1
src/onnx/parse_depthtospace.cpp
src/onnx/parse_depthtospace.cpp
+1
-2
src/onnx/parse_generic_op.cpp
src/onnx/parse_generic_op.cpp
+1
-1
src/onnx/parse_groupnorm.cpp
src/onnx/parse_groupnorm.cpp
+130
-0
src/onnx/parse_isinf.cpp
src/onnx/parse_isinf.cpp
+87
-0
src/onnx/parse_layernorm.cpp
src/onnx/parse_layernorm.cpp
+131
-0
No files found.
src/include/migraphx/pad_calc.hpp
View file @
4ea39116
...
@@ -64,6 +64,7 @@ shape compute_padded_shape(const shape& input,
...
@@ -64,6 +64,7 @@ shape compute_padded_shape(const shape& input,
// Used for dynamic auto padding of pooling operators where padding needs to be computed at
// Used for dynamic auto padding of pooling operators where padding needs to be computed at
// evaulation time.
// evaulation time.
MIGRAPHX_EXPORT
shape
compute_padded_pool_shape
(
const
shape
&
input
,
shape
compute_padded_pool_shape
(
const
shape
&
input
,
const
shape
&
kernel
,
const
shape
&
kernel
,
const
std
::
vector
<
std
::
size_t
>&
padding
,
const
std
::
vector
<
std
::
size_t
>&
padding
,
...
...
src/include/migraphx/run_loop.hpp
View file @
4ea39116
...
@@ -31,6 +31,7 @@
...
@@ -31,6 +31,7 @@
#include <migraphx/module.hpp>
#include <migraphx/module.hpp>
#include <migraphx/config.hpp>
#include <migraphx/config.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/ranges.hpp>
#include <array>
#include <string>
#include <string>
namespace
migraphx
{
namespace
migraphx
{
...
...
src/include/migraphx/source_location.hpp
View file @
4ea39116
...
@@ -24,6 +24,7 @@
...
@@ -24,6 +24,7 @@
#ifndef MIGRAPHX_GUARD_MIGRAPHX_SOURCE_LOCATION_HPP
#ifndef MIGRAPHX_GUARD_MIGRAPHX_SOURCE_LOCATION_HPP
#define MIGRAPHX_GUARD_MIGRAPHX_SOURCE_LOCATION_HPP
#define MIGRAPHX_GUARD_MIGRAPHX_SOURCE_LOCATION_HPP
#include <cstdint>
#include <migraphx/config.hpp>
#include <migraphx/config.hpp>
#if defined(CPPCHECK)
#if defined(CPPCHECK)
...
...
src/include/migraphx/streamutils.hpp
View file @
4ea39116
...
@@ -30,6 +30,7 @@
...
@@ -30,6 +30,7 @@
#include <migraphx/rank.hpp>
#include <migraphx/rank.hpp>
#include <migraphx/requires.hpp>
#include <migraphx/requires.hpp>
#include <migraphx/config.hpp>
#include <migraphx/config.hpp>
#include <migraphx/optional.hpp>
#include <vector>
#include <vector>
namespace
migraphx
{
namespace
migraphx
{
...
@@ -68,6 +69,19 @@ auto stream_write_value_impl(rank<1>, std::ostream& os, const T& x) -> decltype(
...
@@ -68,6 +69,19 @@ auto stream_write_value_impl(rank<1>, std::ostream& os, const T& x) -> decltype(
os
<<
x
;
os
<<
x
;
}
}
template
<
class
T
>
auto
stream_write_value_impl
(
rank
<
1
>
,
std
::
ostream
&
os
,
const
optional
<
T
>&
x
)
{
if
(
x
.
has_value
())
{
os
<<
*
x
;
}
else
{
os
<<
"nullopt"
;
}
}
template
<
class
T
>
template
<
class
T
>
void
stream_write_value_impl
(
rank
<
1
>
,
std
::
ostream
&
os
,
const
std
::
vector
<
T
>&
r
)
void
stream_write_value_impl
(
rank
<
1
>
,
std
::
ostream
&
os
,
const
std
::
vector
<
T
>&
r
)
{
{
...
...
src/include/migraphx/tmp_dir.hpp
View file @
4ea39116
...
@@ -34,6 +34,7 @@ struct MIGRAPHX_EXPORT tmp_dir
...
@@ -34,6 +34,7 @@ struct MIGRAPHX_EXPORT tmp_dir
{
{
fs
::
path
path
;
fs
::
path
path
;
tmp_dir
(
const
std
::
string
&
prefix
=
""
);
tmp_dir
(
const
std
::
string
&
prefix
=
""
);
tmp_dir
(
tmp_dir
&&
)
=
default
;
void
execute
(
const
std
::
string
&
exe
,
const
std
::
string
&
args
)
const
;
void
execute
(
const
std
::
string
&
exe
,
const
std
::
string
&
args
)
const
;
...
...
src/include/migraphx/type_name.hpp
View file @
4ea39116
...
@@ -34,7 +34,7 @@ template <class PrivateMigraphTypeNameProbe>
...
@@ -34,7 +34,7 @@ template <class PrivateMigraphTypeNameProbe>
std
::
string
compute_type_name
()
std
::
string
compute_type_name
()
{
{
std
::
string
name
;
std
::
string
name
;
#ifdef
_MSC_VER
#if
def
ined(
_MSC_VER
) && !defined(__clang__)
name
=
typeid
(
PrivateMigraphTypeNameProbe
).
name
();
name
=
typeid
(
PrivateMigraphTypeNameProbe
).
name
();
name
=
name
.
substr
(
7
);
name
=
name
.
substr
(
7
);
#else
#else
...
...
src/normalize_attributes.cpp
View file @
4ea39116
...
@@ -66,15 +66,15 @@ auto tune_attribute(const std::vector<int64_t>& vec,
...
@@ -66,15 +66,15 @@ auto tune_attribute(const std::vector<int64_t>& vec,
{
{
if
(
input_shape
.
dynamic
())
if
(
input_shape
.
dynamic
())
{
{
std
::
transform
(
axes
.
begin
(),
axes
.
end
(),
max_vals
.
begin
(),
[
&
](
auto
i
)
{
// return the unchanged `vec` if the dynamic_dimensions at `axes` are not fixed
const
auto
&
dd
=
input_shape
.
dyn_dims
().
at
(
i
);
if
(
std
::
any_of
(
axes
.
begin
(),
axes
.
end
(),
[
&
](
auto
ax
)
{
if
(
not
dd
.
is_fixed
())
return
not
input_shape
.
dyn_dims
().
at
(
ax
).
is_fixed
();
}))
{
{
MIGRAPHX_THROW
(
return
vec
;
"NORMALIZE_ATTR: 'use_lens' on a non-fixed dynamic dimension, axis="
+
std
::
to_string
(
i
));
}
}
return
dd
.
max
;
std
::
transform
(
axes
.
begin
(),
axes
.
end
(),
max_vals
.
begin
(),
[
&
](
auto
i
)
{
return
input_shape
.
dyn_dims
().
at
(
i
).
max
;
});
});
}
}
else
else
...
...
src/
targets/gpu/int8_conv_pack
.cpp
→
src/
onnx/broadcast_qdq
.cpp
View file @
4ea39116
/*
/*
* The MIT License (MIT)
* The MIT License (MIT)
*
*
* Copyright (c) 2015-202
2
Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-202
3
Advanced Micro Devices, Inc. All rights reserved.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* of this software and associated documentation files (the "Software"), to deal
...
@@ -21,58 +21,56 @@
...
@@ -21,58 +21,56 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
* THE SOFTWARE.
*/
*/
#include <migraphx/gpu/int8_conv_pack.hpp>
#include <migraphx/
gpu/context
.hpp>
#include <migraphx/
onnx/broadcast_qdq
.hpp>
namespace
migraphx
{
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
namespace
onnx
{
shape
pack_int8_shape
(
const
shape
&
s
)
// This method is to prep for quantizelinear or dequantizelinear operation for
// either the broadcasting of weight-scale or zero-points of qlinearadd operator
// outputs: operator op (inputs x, broadcasted: scale (float) & zero_pt (8-bit))
instruction_ref
bcast_qdq_instr
(
const
std
::
string
&
op_name
,
instruction_ref
x_in
,
instruction_ref
arg_fscale
,
instruction_ref
arg_z_pt
,
const
onnx_parser
::
node_info
&
info
)
{
{
if
(
s
.
type
()
!=
shape
::
int8_type
)
auto
in_lens
=
x_in
->
get_shape
().
lens
();
{
MIGRAPHX_THROW
(
"PACK_INT8_ARGS: only process int8_type"
);
}
auto
lens
=
s
.
lens
();
// prep 1: broadcast scale. it can come as a scalar or a 1-D tensor.
auto
strides
=
s
.
strides
();
instruction_ref
bcast_scale
;
lens
[
1
]
=
(
lens
[
1
]
+
3
)
/
4
*
4
;
if
(
arg_fscale
->
get_shape
().
elements
()
>
1
)
strides
[
0
]
=
strides
[
1
]
*
lens
[
1
];
bcast_scale
=
info
.
add_instruction
(
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
0
},
{
"out_lens"
,
in_lens
}}),
arg_fscale
);
else
bcast_scale
=
info
.
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
in_lens
}}),
arg_fscale
);
return
{
s
.
type
(),
lens
,
strides
};
// prep 2: broadcast zero point. it can come as a scalar or a 1-D tensor.
}
instruction_ref
bcast_zero_pt
;
if
(
arg_z_pt
->
get_shape
().
elements
()
>
1
)
bcast_zero_pt
=
info
.
add_instruction
(
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
0
},
{
"out_lens"
,
in_lens
}}),
arg_z_pt
);
else
bcast_zero_pt
=
info
.
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
in_lens
}}),
arg_z_pt
);
shape
miopen_int8_conv_pack
::
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
// op_name is either quantizelinear or dequantizelinear:
{
return
info
.
add_instruction
(
migraphx
::
make_op
(
op_name
),
x_in
,
bcast_scale
,
bcast_zero_pt
);
check_shapes
{{
inputs
.
at
(
0
)},
*
this
}.
has
(
1
).
standard
();
return
pack_int8_shape
(
inputs
.
at
(
0
));
}
}
argument
// Multibroadcast a scaler..
miopen_int8_conv_pack
::
compute
(
context
&
ctx
,
const
shape
&
,
const
std
::
vector
<
argument
>&
args
)
const
instruction_ref
bcast_scalar_instr
(
const
migraphx
::
shape
&
shape_out
,
instruction_ref
arg_in
,
const
onnx_parser
::
node_info
&
info
)
{
{
auto
arg_desc
=
make_tensor
(
args
[
0
].
get_shape
());
auto
bcast_instr_out
=
info
.
add_instruction
(
auto
arg_desc_vec4
=
make_tensor
(
args
[
0
].
get_shape
(),
true
);
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
shape_out
.
lens
()}}),
arg_in
);
return
bcast_instr_out
;
float
alpha
=
1
;
float
beta
=
0
;
// pack input to vec4 format
auto
status
=
miopenTransformTensor
(
ctx
.
get_stream
().
get_miopen
(),
&
alpha
,
arg_desc
.
get
(),
args
[
0
].
implicit
(),
&
beta
,
arg_desc_vec4
.
get
(),
args
[
1
].
implicit
());
if
(
status
!=
miopenStatusSuccess
)
{
MIGRAPHX_THROW
(
"INT8_CONV_PACK: transform input tensor failed"
);
}
return
args
[
1
];
}
}
}
// namespace
gpu
}
// namespace
onnx
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
}
// namespace migraphx
src/
targets/gpu
/include/migraphx/
gpu/int8_gemm_pack
.hpp
→
src/
onnx
/include/migraphx/
onnx/broadcast_qdq
.hpp
View file @
4ea39116
/*
/*
* The MIT License (MIT)
* The MIT License (MIT)
*
*
* Copyright (c) 2015-202
2
Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-202
3
Advanced Micro Devices, Inc. All rights reserved.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* of this software and associated documentation files (the "Software"), to deal
...
@@ -21,42 +21,35 @@
...
@@ -21,42 +21,35 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
* THE SOFTWARE.
*/
*/
#ifndef MIGRAPHX_GUARD_RTGLIB_INT8_GEMM_PACK_HPP
#define MIGRAPHX_GUARD_RTGLIB_INT8_GEMM_PACK_HPP
#include <migraphx/argument.hpp>
#ifndef MIGRAPHX_GUARD_AMDMIGRAPHX_ONNX_BROADCAST_QDQ_HPP
#include <migraphx/config.hpp>
#define MIGRAPHX_GUARD_AMDMIGRAPHX_ONNX_BROADCAST_QDQ_HPP
#include <utility>
#include <string>
#include <migraphx/onnx/op_parser.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/instruction.hpp>
namespace
migraphx
{
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
namespace
onnx
{
struct
context
;
// This method is to prep for quantizelinear or dequantizelinear operation for
// either the broadcasting of weight-scale or zero-points of qlinearadd operator
struct
hip_int8_gemm_pack_a
// outputs: operator op (inputs x, broadcasted: scale (float) & zero_pt (8-bit))
{
instruction_ref
bcast_qdq_instr
(
const
std
::
string
&
op_name
,
std
::
string
name
()
const
{
return
"gpu::int8_gemm_pack_a"
;
}
instruction_ref
x_in
,
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
instruction_ref
arg_fscale
,
argument
compute
(
context
&
ctx
,
const
shape
&
,
const
std
::
vector
<
argument
>&
args
)
const
;
instruction_ref
arg_z_pt
,
std
::
ptrdiff_t
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
const
onnx_parser
::
node_info
&
info
);
{
return
shapes
.
size
()
-
1
;
// Multibroadcast a scaler..
}
instruction_ref
bcast_scalar_instr
(
const
migraphx
::
shape
&
shape_out
,
};
instruction_ref
arg_in
,
const
onnx_parser
::
node_info
&
info
);
struct
hip_int8_gemm_pack_b
{
}
// namespace onnx
std
::
string
name
()
const
{
return
"gpu::int8_gemm_pack_b"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
compute
(
context
&
ctx
,
const
shape
&
,
const
std
::
vector
<
argument
>&
args
)
const
;
std
::
ptrdiff_t
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
};
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
}
// namespace migraphx
...
...
src/onnx/include/migraphx/onnx/onnx_parser.hpp
View file @
4ea39116
...
@@ -100,6 +100,7 @@ struct onnx_parser
...
@@ -100,6 +100,7 @@ struct onnx_parser
bool
use_dyn_output
=
false
;
bool
use_dyn_output
=
false
;
bool
skip_unknown_operators
=
false
;
bool
skip_unknown_operators
=
false
;
int64_t
max_loop_iterations
=
10
;
int64_t
max_loop_iterations
=
10
;
int64_t
limit_max_iterations
=
std
::
numeric_limits
<
uint16_t
>::
max
();
int64_t
opset_version
=
13
;
int64_t
opset_version
=
13
;
std
::
unordered_map
<
std
::
string
,
op_func
>
ops
;
std
::
unordered_map
<
std
::
string
,
op_func
>
ops
;
...
...
src/onnx/onnx.cpp
View file @
4ea39116
...
@@ -67,6 +67,7 @@ program parse_onnx_from(const onnx_options& options, Ts&&... xs)
...
@@ -67,6 +67,7 @@ program parse_onnx_from(const onnx_options& options, Ts&&... xs)
}
}
parser
.
skip_unknown_operators
=
options
.
skip_unknown_operators
;
parser
.
skip_unknown_operators
=
options
.
skip_unknown_operators
;
parser
.
max_loop_iterations
=
options
.
max_loop_iterations
;
parser
.
max_loop_iterations
=
options
.
max_loop_iterations
;
parser
.
limit_max_iterations
=
options
.
limit_max_iterations
;
parser
.
use_dyn_output
=
options
.
use_dyn_output
;
parser
.
use_dyn_output
=
options
.
use_dyn_output
;
if
(
options
.
print_program_on_error
)
if
(
options
.
print_program_on_error
)
...
...
src/onnx/onnx_parser.cpp
View file @
4ea39116
...
@@ -244,7 +244,7 @@ void onnx_parser::parse_from(std::istream& is, std::string name)
...
@@ -244,7 +244,7 @@ void onnx_parser::parse_from(std::istream& is, std::string name)
this
->
filename
=
std
::
move
(
name
);
this
->
filename
=
std
::
move
(
name
);
auto
parent_path
=
fs
::
path
(
this
->
filename
).
parent_path
();
auto
parent_path
=
fs
::
path
(
this
->
filename
).
parent_path
();
if
(
not
parent_path
.
empty
())
if
(
not
parent_path
.
empty
())
this
->
path
=
parent_path
;
this
->
path
=
parent_path
.
string
()
;
onnx
::
ModelProto
model
;
onnx
::
ModelProto
model
;
if
(
model
.
ParseFromIstream
(
&
is
))
if
(
model
.
ParseFromIstream
(
&
is
))
...
...
src/onnx/padding.cpp
View file @
4ea39116
...
@@ -47,7 +47,7 @@ void cal_auto_padding_size(onnx_parser::node_info info,
...
@@ -47,7 +47,7 @@ void cal_auto_padding_size(onnx_parser::node_info info,
return
;
return
;
}
}
auto
auto_pad
=
info
.
attributes
[
"auto_pad"
].
s
();
auto
auto_pad
=
to_upper
(
info
.
attributes
[
"auto_pad"
].
s
()
)
;
if
(
auto_pad
.
find
(
"SAME"
)
!=
std
::
string
::
npos
)
if
(
auto_pad
.
find
(
"SAME"
)
!=
std
::
string
::
npos
)
{
{
bool
is_same_upper
=
(
auto_pad
.
find
(
"SAME_UPPER"
)
!=
std
::
string
::
npos
);
bool
is_same_upper
=
(
auto_pad
.
find
(
"SAME_UPPER"
)
!=
std
::
string
::
npos
);
...
...
src/onnx/parse_arg_op.cpp
View file @
4ea39116
/*
/*
* The MIT License (MIT)
* The MIT License (MIT)
*
*
* Copyright (c) 2015-202
2
Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-202
3
Advanced Micro Devices, Inc. All rights reserved.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* of this software and associated documentation files (the "Software"), to deal
...
@@ -50,14 +50,25 @@ struct parse_arg_op : op_parser<parse_arg_op>
...
@@ -50,14 +50,25 @@ struct parse_arg_op : op_parser<parse_arg_op>
keep_dims
=
parser
.
parse_value
(
info
.
attributes
.
at
(
"keepdims"
)).
at
<
int
>
();
keep_dims
=
parser
.
parse_value
(
info
.
attributes
.
at
(
"keepdims"
)).
at
<
int
>
();
}
}
bool
select_last_index
=
false
;
if
(
contains
(
info
.
attributes
,
"select_last_index"
))
{
select_last_index
=
static_cast
<
bool
>
(
parser
.
parse_value
(
info
.
attributes
.
at
(
"select_last_index"
)).
at
<
int
>
());
}
if
(
keep_dims
==
0
)
if
(
keep_dims
==
0
)
{
{
auto
ins
=
info
.
add_instruction
(
make_op
(
opd
.
op_name
,
{{
"axis"
,
axis
}}),
args
);
auto
ins
=
info
.
add_instruction
(
make_op
(
opd
.
op_name
,
{{
"axis"
,
axis
},
{
"select_last_index"
,
select_last_index
}}),
args
);
return
info
.
add_instruction
(
make_op
(
"squeeze"
,
{{
"axes"
,
{
axis
}}}),
ins
);
return
info
.
add_instruction
(
make_op
(
"squeeze"
,
{{
"axes"
,
{
axis
}}}),
ins
);
}
}
else
else
{
{
return
info
.
add_instruction
(
make_op
(
opd
.
op_name
,
{{
"axis"
,
axis
}}),
args
);
return
info
.
add_instruction
(
make_op
(
opd
.
op_name
,
{{
"axis"
,
axis
},
{
"select_last_index"
,
select_last_index
}}),
args
);
}
}
}
}
};
};
...
...
src/onnx/parse_clip.cpp
View file @
4ea39116
/*
/*
* The MIT License (MIT)
* The MIT License (MIT)
*
*
* Copyright (c) 2015-202
2
Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-202
3
Advanced Micro Devices, Inc. All rights reserved.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* of this software and associated documentation files (the "Software"), to deal
...
...
src/onnx/parse_depthtospace.cpp
View file @
4ea39116
...
@@ -87,8 +87,7 @@ struct parse_depthtospace : op_parser<parse_depthtospace>
...
@@ -87,8 +87,7 @@ struct parse_depthtospace : op_parser<parse_depthtospace>
auto
temp1
=
info
.
add_instruction
(
make_op
(
"reshape"
,
{{
"dims"
,
lens1
}}),
args
[
0
]);
auto
temp1
=
info
.
add_instruction
(
make_op
(
"reshape"
,
{{
"dims"
,
lens1
}}),
args
[
0
]);
auto
temp2
=
info
.
add_instruction
(
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
temp1
);
auto
temp2
=
info
.
add_instruction
(
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
temp1
);
return
info
.
add_instruction
(
make_op
(
"reshape"
,
{{
"dims"
,
lens2
}}),
return
info
.
add_instruction
(
make_op
(
"reshape"
,
{{
"dims"
,
lens2
}}),
temp2
);
info
.
make_contiguous
(
temp2
));
}
}
};
};
...
...
src/onnx/parse_generic_op.cpp
View file @
4ea39116
...
@@ -60,7 +60,7 @@ struct parse_generic_op : op_parser<parse_generic_op>
...
@@ -60,7 +60,7 @@ struct parse_generic_op : op_parser<parse_generic_op>
{
"Neg"
,
"neg"
},
{
"Neg"
,
"neg"
},
{
"Reciprocal"
,
"recip"
},
{
"Reciprocal"
,
"recip"
},
{
"Relu"
,
"relu"
},
{
"Relu"
,
"relu"
},
{
"Round"
,
"
round
"
},
{
"Round"
,
"
nearbyint
"
},
{
"Sigmoid"
,
"sigmoid"
},
{
"Sigmoid"
,
"sigmoid"
},
{
"Sign"
,
"sign"
},
{
"Sign"
,
"sign"
},
{
"Sin"
,
"sin"
},
{
"Sin"
,
"sin"
},
...
...
src/onnx/parse_groupnorm.cpp
0 → 100644
View file @
4ea39116
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/onnx/op_parser.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/instruction.hpp>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
onnx
{
struct
parse_groupnorm
:
op_parser
<
parse_groupnorm
>
{
std
::
vector
<
op_desc
>
operators
()
const
{
return
{{
"GroupNormalization"
}};
}
instruction_ref
parse
(
const
op_desc
&
/*opd*/
,
const
onnx_parser
&
parser
,
const
onnx_parser
::
node_info
&
info
,
std
::
vector
<
instruction_ref
>
args
)
const
{
float
epsilon
=
1e-5
f
;
if
(
contains
(
info
.
attributes
,
"epsilon"
))
{
epsilon
=
parser
.
parse_value
(
info
.
attributes
.
at
(
"epsilon"
)).
at
<
float
>
();
}
size_t
num_groups
;
if
(
contains
(
info
.
attributes
,
"num_groups"
))
{
num_groups
=
parser
.
parse_value
(
info
.
attributes
.
at
(
"num_groups"
)).
at
<
size_t
>
();
}
else
{
MIGRAPHX_THROW
(
"PARSE_GROUPNORM: num_groups must be available"
);
}
if
(
args
.
size
()
!=
3
)
{
MIGRAPHX_THROW
(
"PARSE_GROUPNORM: invalid input count"
);
}
auto
x
=
args
.
at
(
0
);
auto
scale
=
args
.
at
(
1
);
auto
bias
=
args
.
at
(
2
);
auto
x_shape
=
x
->
get_shape
();
auto
x_dtype
=
x_shape
.
type
();
auto
x_dims
=
x_shape
.
lens
();
if
(
x_shape
.
ndim
()
<=
2
)
{
MIGRAPHX_THROW
(
"PARSE_GROUPNORM: invalid input shape"
);
}
auto
c
=
x_shape
.
lens
().
at
(
1
);
if
(
c
%
num_groups
!=
0
)
{
MIGRAPHX_THROW
(
"PARSE_GROUPNORM: num_groups should be a divisor of the number of channels"
);
}
auto
group_size
=
c
/
num_groups
;
if
(
scale
->
get_shape
().
ndim
()
!=
1
or
scale
->
get_shape
().
lens
().
at
(
0
)
!=
num_groups
)
{
MIGRAPHX_THROW
(
"PARSE_GROUPNORM: scale tensor shape should be num_groups"
);
}
if
(
bias
->
get_shape
().
ndim
()
!=
1
or
bias
->
get_shape
().
lens
().
at
(
0
)
!=
num_groups
)
{
MIGRAPHX_THROW
(
"PARSE_GROUPNORM: bias tensor shape should be num_groups"
);
}
// Original shape: N x C x D1 x ... x Dn
// New shape: N x num_groups x C // num_groups x D1 x ... x Dn
std
::
vector
<
size_t
>
dims
=
{
x_dims
.
at
(
0
),
num_groups
,
group_size
};
std
::
copy
(
x_dims
.
begin
()
+
2
,
x_dims
.
end
(),
std
::
back_inserter
(
dims
));
auto
x_reshaped
=
info
.
add_instruction
(
make_op
(
"reshape"
,
{{
"dims"
,
dims
}}),
x
);
// Axes for D1 x ... x Dn
std
::
vector
<
size_t
>
axes
(
dims
.
size
()
-
2
);
std
::
iota
(
axes
.
begin
(),
axes
.
end
(),
2
);
// y = (x - mean) * rsqrt(variance + epsilon) * scale + bias
// mean = reduce_mean({D1, D2, ... Dk}, x)
// variance = reduce_mean({D1, D2, ... Dk}, (x - mean)^2)
auto
mean
=
info
.
add_instruction
(
make_op
(
"reduce_mean"
,
{{
"axes"
,
axes
}}),
x_reshaped
);
auto
x_sub_mean
=
info
.
add_common_op
(
"sub"
,
x_reshaped
,
mean
);
auto
x_sqdiff_mean
=
info
.
add_common_op
(
"sqdiff"
,
x_reshaped
,
mean
);
auto
variance
=
info
.
add_instruction
(
make_op
(
"reduce_mean"
,
{{
"axes"
,
axes
}}),
x_sqdiff_mean
);
epsilon
=
(
x_dtype
==
migraphx
::
shape
::
half_type
and
std
::
abs
(
epsilon
)
<
1e-7
)
?
1e-7
:
epsilon
;
auto
eps
=
info
.
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
{
x_dtype
},
{
epsilon
}});
auto
var_eps
=
info
.
add_common_op
(
"add"
,
variance
,
eps
);
auto
rsqrt
=
info
.
add_instruction
(
make_op
(
"rsqrt"
),
var_eps
);
auto
result
=
info
.
add_common_op
(
"mul"
,
x_sub_mean
,
rsqrt
);
auto
scale_bcast
=
info
.
add_instruction
(
make_op
(
"broadcast"
,
{{
"axis"
,
1
},
{
"out_lens"
,
dims
}}),
scale
);
auto
bias_bcast
=
info
.
add_instruction
(
make_op
(
"broadcast"
,
{{
"axis"
,
1
},
{
"out_lens"
,
dims
}}),
bias
);
auto
scaled
=
info
.
add_instruction
(
make_op
(
"mul"
),
result
,
scale_bcast
);
auto
y
=
info
.
add_instruction
(
make_op
(
"add"
),
scaled
,
bias_bcast
);
auto
y_reshaped
=
info
.
add_instruction
(
make_op
(
"reshape"
,
{{
"dims"
,
x_dims
}}),
y
);
return
y_reshaped
;
}
};
}
// namespace onnx
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
src/onnx/parse_isinf.cpp
0 → 100644
View file @
4ea39116
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/onnx/op_parser.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/instruction.hpp>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
onnx
{
struct
parse_isinf
:
op_parser
<
parse_isinf
>
{
std
::
vector
<
op_desc
>
operators
()
const
{
return
{{
"IsInf"
,
"isinf"
}};
}
instruction_ref
parse
(
const
op_desc
&
/*opd*/
,
const
onnx_parser
&
parser
,
onnx_parser
::
node_info
info
,
const
std
::
vector
<
instruction_ref
>&
args
)
const
{
bool
detect_negative
=
true
;
bool
detect_positive
=
true
;
if
(
contains
(
info
.
attributes
,
"detect_negative"
))
{
detect_negative
=
static_cast
<
bool
>
(
parser
.
parse_value
(
info
.
attributes
.
at
(
"detect_negative"
)).
at
<
int
>
());
}
if
(
contains
(
info
.
attributes
,
"detect_positive"
))
{
detect_positive
=
static_cast
<
bool
>
(
parser
.
parse_value
(
info
.
attributes
.
at
(
"detect_positive"
)).
at
<
int
>
());
}
auto
x_shape
=
args
[
0
]
->
get_shape
();
if
(
not
detect_negative
and
not
detect_positive
)
{
return
info
.
add_instruction
(
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
x_shape
.
lens
()}}),
info
.
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
{
shape
::
bool_type
},
{
false
}}));
}
auto
is_inf
=
info
.
add_instruction
(
make_op
(
"isinf"
),
args
[
0
]);
if
(
detect_negative
and
detect_positive
)
{
return
is_inf
;
}
auto
zero_l
=
info
.
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
{
x_shape
.
type
()},
{
0
}});
auto
mb_zero
=
info
.
add_instruction
(
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
x_shape
.
lens
()}}),
zero_l
);
auto
cond
=
info
.
add_broadcastable_binary_op
(
detect_negative
?
"less"
:
"greater"
,
args
[
0
],
mb_zero
);
if
(
cond
->
get_shape
().
type
()
!=
shape
::
bool_type
)
{
cond
=
info
.
add_instruction
(
make_op
(
"convert"
,
{{
"target_type"
,
shape
::
bool_type
}}),
cond
);
}
return
info
.
add_instruction
(
make_op
(
"logical_and"
),
is_inf
,
cond
);
}
};
}
// namespace onnx
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
src/onnx/parse_layernorm.cpp
0 → 100644
View file @
4ea39116
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/onnx/op_parser.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/instruction.hpp>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
onnx
{
struct
parse_layernorm
:
op_parser
<
parse_layernorm
>
{
std
::
vector
<
op_desc
>
operators
()
const
{
return
{{
"LayerNormalization"
}};
}
std
::
vector
<
instruction_ref
>
parse
(
const
op_desc
&
/*opd*/
,
const
onnx_parser
&
parser
,
const
onnx_parser
::
node_info
&
info
,
std
::
vector
<
instruction_ref
>
args
)
const
{
int64_t
axis
=
-
1
;
if
(
contains
(
info
.
attributes
,
"axis"
))
{
axis
=
parser
.
parse_value
(
info
.
attributes
.
at
(
"axis"
)).
at
<
int64_t
>
();
}
float
epsilon
=
1e-5
f
;
if
(
contains
(
info
.
attributes
,
"epsilon"
))
{
epsilon
=
parser
.
parse_value
(
info
.
attributes
.
at
(
"epsilon"
)).
at
<
float
>
();
}
if
(
contains
(
info
.
attributes
,
"stash_type"
))
{
std
::
cerr
<<
"WARNING: LAYERNORM does not support stash_type, it will be ignored.
\n
"
;
}
if
(
args
.
size
()
<
2
or
args
.
size
()
>
3
)
{
MIGRAPHX_THROW
(
"PARSE_LAYERNORM: invalid input count"
);
}
auto
x
=
args
.
at
(
0
);
auto
scale
=
args
.
at
(
1
);
bool
skip_bias
=
args
.
size
()
==
2
;
instruction_ref
bias
;
if
(
not
skip_bias
)
{
bias
=
args
.
at
(
2
);
}
auto
x_shape
=
x
->
get_shape
();
auto
x_dtype
=
x_shape
.
type
();
int64_t
x_rank
=
x_shape
.
ndim
();
if
(
x_rank
<
2
)
{
MIGRAPHX_THROW
(
"PARSE_LAYERNORM: invalid input shape"
);
}
// If rank(X) is r, axis' allowed range is [-r, r)
if
(
axis
<
-
x_rank
or
axis
>=
x_rank
)
{
MIGRAPHX_THROW
(
"PARSE_LAYERNORM: invalid axis"
);
}
// y = (x - mean) * rsqrt(variance + epsilon) * scale + bias
// mean = reduce_mean({D1, D2, ... Dk}, x)
// variance = reduce_mean({D1, D2, ... Dk}, (x - mean)^2)
// axis can be negative
axis
=
axis
<
0
?
axis
+
x_rank
:
axis
;
auto
kdims
=
x_rank
-
axis
;
std
::
vector
<
int64_t
>
axes
(
kdims
);
std
::
iota
(
axes
.
begin
(),
axes
.
end
(),
axis
);
auto
skipped_axes
=
x_rank
-
kdims
;
auto
mean
=
info
.
add_instruction
(
make_op
(
"reduce_mean"
,
{{
"axes"
,
axes
}}),
x
);
auto
x_sub_mean
=
info
.
add_common_op
(
"sub"
,
x
,
mean
);
auto
x_sqdiff_mean
=
info
.
add_common_op
(
"sqdiff"
,
x
,
mean
);
auto
variance
=
info
.
add_instruction
(
make_op
(
"reduce_mean"
,
{{
"axes"
,
axes
}}),
x_sqdiff_mean
);
epsilon
=
(
x_dtype
==
migraphx
::
shape
::
half_type
and
std
::
abs
(
epsilon
)
<
1e-7
)
?
1e-7
:
epsilon
;
auto
eps
=
info
.
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
{
x_dtype
},
{
epsilon
}});
auto
var_eps
=
info
.
add_common_op
(
"add"
,
variance
,
eps
);
auto
rsqrt
=
info
.
add_instruction
(
make_op
(
"rsqrt"
),
var_eps
);
auto
result
=
info
.
add_common_op
(
"mul"
,
x_sub_mean
,
rsqrt
);
instruction_ref
scale_bcast
=
scale
;
instruction_ref
bias_bcast
=
bias
;
if
(
skipped_axes
>
0
)
{
auto
x_dims
=
x_shape
.
lens
();
scale_bcast
=
info
.
add_instruction
(
make_op
(
"broadcast"
,
{{
"axis"
,
skipped_axes
},
{
"out_lens"
,
x_dims
}}),
scale
);
if
(
not
skip_bias
)
{
bias_bcast
=
info
.
add_instruction
(
make_op
(
"broadcast"
,
{{
"axis"
,
skipped_axes
},
{
"out_lens"
,
x_dims
}}),
bias
);
}
}
auto
scaled
=
info
.
add_instruction
(
make_op
(
"mul"
),
result
,
scale_bcast
);
auto
y
=
skip_bias
?
scaled
:
info
.
add_instruction
(
make_op
(
"add"
),
scaled
,
bias_bcast
);
return
{
y
,
mean
,
rsqrt
};
}
};
}
// namespace onnx
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
Prev
1
2
3
4
5
6
7
8
…
16
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment