Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
58d151ba
Commit
58d151ba
authored
Jan 05, 2023
by
charlie
Browse files
Merge branch 'develop' of github.com:ROCmSoftwarePlatform/AMDMIGraphX into dyn_onnx_gemm
parents
251f7778
0d197f27
Changes
9
Show whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
210 additions
and
18 deletions
+210
-18
src/include/migraphx/op/reduce_op.hpp
src/include/migraphx/op/reduce_op.hpp
+39
-15
src/onnx/parse_reduce_op.cpp
src/onnx/parse_reduce_op.cpp
+1
-2
test/onnx/gen_onnx.py
test/onnx/gen_onnx.py
+45
-1
test/onnx/onnx_test.cpp
test/onnx/onnx_test.cpp
+62
-0
test/onnx/reducel1_dyn_noaxes_test.onnx
test/onnx/reducel1_dyn_noaxes_test.onnx
+0
-0
test/onnx/reducel1_dyn_test.onnx
test/onnx/reducel1_dyn_test.onnx
+0
-0
test/onnx/reducemax_dyn_test.onnx
test/onnx/reducemax_dyn_test.onnx
+0
-0
test/op_shape_test.cpp
test/op_shape_test.cpp
+42
-0
test/ref_ops_test.cpp
test/ref_ops_test.cpp
+21
-0
No files found.
src/include/migraphx/op/reduce_op.hpp
View file @
58d151ba
...
...
@@ -26,6 +26,7 @@
#include <migraphx/op/name.hpp>
#include <migraphx/check_shapes.hpp>
#include <migraphx/dyn_output.hpp>
#include <migraphx/argument.hpp>
#include <migraphx/tensor_view.hpp>
#include <migraphx/shape_for_each.hpp>
...
...
@@ -105,26 +106,49 @@ struct reduce_op : op_name<Derived>
return
tuned_axes
;
}
/**
* @brief returns a shape in which the axis or axes named
* for reduction by this op are set, to size 1.
*
* @param inputs list of input shapes
* @return shape
*/
shape
normalize_compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
{
check_shapes
{
inputs
,
*
this
}.
has
(
1
);
check_shapes
{
inputs
,
*
this
,
true
}.
has
(
1
);
auto
s
=
inputs
.
at
(
0
);
if
(
s
.
dynamic
())
{
auto
output_dyn_dims
=
s
.
dyn_dims
();
auto
tuned_axes
=
tune_axes
(
output_dyn_dims
.
size
());
for
(
const
auto
&
axis
:
tuned_axes
)
{
// At the time of writing, there's no functional difference between
// optimum of 0 (no opt) or 1.
output_dyn_dims
[
axis
]
=
{
1
,
1
,
0
};
}
return
shape
{
s
.
type
(),
output_dyn_dims
};
}
else
{
auto
lens
=
s
.
lens
();
auto
tuned_axes
=
tune_axes
(
lens
.
size
());
for
(
auto
axis
:
tuned_axes
)
for
(
const
auto
&
axis
:
tuned_axes
)
{
lens
[
axis
]
=
1
;
}
return
inputs
[
0
].
with_lens
(
lens
);
}
}
template
<
class
T
>
void
tune_dims
(
const
std
::
vector
<
int64_t
>&
tuned_axes
,
const
std
::
vector
<
T
>&
in_lens
,
std
::
vector
<
T
>&
out_lens
)
const
{
for
(
auto
axis
:
tuned_axes
)
for
(
const
auto
&
axis
:
tuned_axes
)
{
out_lens
[
axis
]
=
in_lens
[
axis
];
}
...
...
@@ -151,17 +175,17 @@ struct reduce_op : op_name<Derived>
static_cast
<
const
Derived
&>
(
*
this
).
output
(
batch_shape
)(
val
);
}
argument
compute
(
const
shape
&
output_shape
,
std
::
vector
<
argument
>
args
)
const
argument
compute
(
const
dyn_output
&
dyn_out
,
std
::
vector
<
argument
>
args
)
const
{
argument
result
{
out
put_shape
};
argument
result
{
dyn_out
.
com
put
ed
_shape
};
auto
arg_lens
=
args
.
front
().
get_shape
().
lens
();
auto
tuned_axes
=
tune_axes
(
arg_lens
.
size
());
std
::
vector
<
std
::
size_t
>
batch_lens
(
out
put_shape
.
lens
().
size
(),
1
);
std
::
vector
<
std
::
size_t
>
batch_lens
(
dyn_out
.
com
put
ed
_shape
.
lens
().
size
(),
1
);
tune_dims
(
tuned_axes
,
arg_lens
,
batch_lens
);
shape
batch_shape
{
out
put_shape
.
type
(),
batch_lens
};
shape
batch_shape
{
dyn_out
.
com
put
ed
_shape
.
type
(),
batch_lens
};
visit_all
(
result
,
args
[
0
])([
&
](
auto
output
,
auto
input
)
{
par_for
(
out
put_shape
.
elements
(),
[
&
](
auto
i
)
{
auto
out_idx
=
out
put_shape
.
multi
(
i
);
par_for
(
dyn_out
.
com
put
ed
_shape
.
elements
(),
[
&
](
auto
i
)
{
auto
out_idx
=
dyn_out
.
com
put
ed
_shape
.
multi
(
i
);
this
->
reduce
(
input
,
batch_shape
,
tuned_axes
,
out_idx
,
output
);
});
});
...
...
src/onnx/parse_reduce_op.cpp
View file @
58d151ba
...
...
@@ -68,8 +68,7 @@ instruction_ref parse_reduce_oper(const std::string& op_name,
}
else
{
std
::
size_t
n_dim
=
args
.
front
()
->
get_shape
().
lens
().
size
();
axes
.
resize
(
n_dim
);
axes
.
resize
(
args
.
front
()
->
get_shape
().
ndim
());
std
::
iota
(
axes
.
begin
(),
axes
.
end
(),
0
);
}
}
...
...
test/onnx/gen_onnx.py
View file @
58d151ba
...
...
@@ -23,7 +23,7 @@
#####################################################################################
# This script generates onnx files for MIGraphX onnx operator tests.
# To generate an individual onnx file, you can use the following
# command: python -c "import gen_onnx; gen_onnx.{test_name}_test()"
# command: python
3
-c "import gen_onnx; gen_onnx.{test_name}_test()"
import
numpy
as
np
import
onnx
from
onnx
import
helper
...
...
@@ -4740,6 +4740,34 @@ def reducel1_test():
return
([
node
],
[
x
],
[
y
])
@
onnx_test
def
reducel1_dyn_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
None
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
None
])
axes
=
[
-
2
]
node
=
onnx
.
helper
.
make_node
(
'ReduceL1'
,
inputs
=
[
'x'
],
outputs
=
[
'y'
],
axes
=
axes
,
keepdims
=
0
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
def
reducel1_dyn_noaxes_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
None
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
None
])
node
=
onnx
.
helper
.
make_node
(
'ReduceL1'
,
inputs
=
[
'x'
],
outputs
=
[
'y'
],
keepdims
=
0
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
()
def
reducel2_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
3
,
4
,
5
,
6
])
...
...
@@ -4789,6 +4817,22 @@ def reduce_log_sum_exp_test():
def
reducemax_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
3
,
4
,
5
,
6
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
3
,
4
,
6
])
axes
=
[
2
]
node
=
onnx
.
helper
.
make_node
(
'ReduceMax'
,
inputs
=
[
'x'
],
outputs
=
[
'y'
],
axes
=
axes
,
keepdims
=
0
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
def
reducemax_dyn_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
None
,
4
,
5
,
6
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
None
,
4
,
6
])
axes
=
[
2
]
node
=
onnx
.
helper
.
make_node
(
'ReduceMax'
,
...
...
test/onnx/onnx_test.cpp
View file @
58d151ba
...
...
@@ -4522,6 +4522,50 @@ TEST_CASE(reducel1_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
reducel1_dyn_test
)
{
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
// a shape with 4 dynamic dimensions
auto
l0
=
mm
->
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
3
,
3
,
0
},
{
3
,
5
,
0
},
{
4
,
6
,
5
},
{
5
,
7
,
6
}}});
auto
abs_ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"abs"
),
l0
);
auto
sum_ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
{
-
2
}}}),
abs_ins
);
auto
sq_ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
-
2
}}}),
sum_ins
);
mm
->
add_return
({
sq_ins
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"x"
]
=
{{
3
,
3
},
{
3
,
5
},
{
4
,
6
,
5
},
{
5
,
7
,
6
}};
auto
prog
=
migraphx
::
parse_onnx
(
"reducel1_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
// No axes given in the onnx file. Parser should default to all axes.
auto
l0
=
mm
->
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
3
,
3
,
0
},
{
3
,
5
,
0
},
{
4
,
6
,
5
},
{
5
,
7
,
6
}}});
auto
abs_ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"abs"
),
l0
);
auto
sum_ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
{
0
,
1
,
2
,
3
}}}),
abs_ins
);
auto
sq_ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
0
,
1
,
2
,
3
}}}),
sum_ins
);
mm
->
add_return
({
sq_ins
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"x"
]
=
{{
3
,
3
},
{
3
,
5
},
{
4
,
6
,
5
},
{
5
,
7
,
6
}};
auto
prog
=
migraphx
::
parse_onnx
(
"reducel1_dyn_noaxes_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
}
TEST_CASE
(
reducel2_test
)
{
migraphx
::
program
p
;
...
...
@@ -4572,6 +4616,24 @@ TEST_CASE(reducemax_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
reducemax_dyn_test
)
{
// input shape with 4 dynamic dimensions
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
3
,
3
},
{
4
,
4
},
{
5
,
5
},
{
6
,
6
}}});
auto
r0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_max"
,
{{
"axes"
,
{
2
}}}),
l0
);
auto
r1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
2
}}}),
r0
);
mm
->
add_return
({
r1
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"x"
]
=
{{
3
,
3
},
{
4
,
4
},
{
5
,
5
},
{
6
,
6
}};
auto
prog
=
migraphx
::
parse_onnx
(
"reducemax_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
reducemean_test
)
{
migraphx
::
program
p
;
...
...
test/onnx/reducel1_dyn_noaxes_test.onnx
0 → 100644
View file @
58d151ba
File added
test/onnx/reducel1_dyn_test.onnx
0 → 100644
View file @
58d151ba
File added
test/onnx/reducemax_dyn_test.onnx
0 → 100644
View file @
58d151ba
File added
test/op_shape_test.cpp
View file @
58d151ba
...
...
@@ -1946,9 +1946,51 @@ void test_reduce_ops()
}
}
// dynamic shape
template
<
class
T
>
void
test_dyn_reduce_ops
()
{
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
2
,
3
,
3
},
{
2
,
4
,
4
}}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
std
::
vector
<
migraphx
::
shape
::
dynamic_dimension
>
(
{{
2
,
3
,
3
},
{
1
,
1
,
0
}})},
T
{{
-
1
}},
input
);
}
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
2
,
3
,
3
},
{
2
,
4
,
4
}}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
std
::
vector
<
migraphx
::
shape
::
dynamic_dimension
>
(
{{
1
,
1
,
0
},
{
2
,
4
,
4
}})},
T
{{
0
}},
input
);
}
{
// Empty axis argument reduces all axes
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
2
,
3
,
3
},
{
2
,
4
,
4
}}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
std
::
vector
<
migraphx
::
shape
::
dynamic_dimension
>
(
{{
1
,
1
,
0
},
{
1
,
1
,
0
}})},
T
{{}},
input
);
}
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
2
,
3
,
3
},
{
2
,
4
,
4
}}};
throws_shape
(
T
{{
4
}},
input
);
}
}
TEST_CASE
(
reduce_max
)
{
test_reduce_ops
<
migraphx
::
op
::
reduce_max
>
();
}
TEST_CASE
(
reduce_mean
)
{
test_reduce_ops
<
migraphx
::
op
::
reduce_mean
>
();
}
TEST_CASE
(
reduce_prod
)
{
test_reduce_ops
<
migraphx
::
op
::
reduce_prod
>
();
}
TEST_CASE
(
reduce_sum
)
{
test_reduce_ops
<
migraphx
::
op
::
reduce_sum
>
();
}
TEST_CASE
(
reduce_max_dyn
)
{
test_dyn_reduce_ops
<
migraphx
::
op
::
reduce_max
>
();
}
TEST_CASE
(
reduce_mean_dyn
)
{
test_dyn_reduce_ops
<
migraphx
::
op
::
reduce_mean
>
();
}
TEST_CASE
(
reduce_prod_dyn
)
{
test_dyn_reduce_ops
<
migraphx
::
op
::
reduce_prod
>
();
}
TEST_CASE
(
reduce_sum_dyn
)
{
test_dyn_reduce_ops
<
migraphx
::
op
::
reduce_sum
>
();
}
TEST_CASE
(
reshape_shape
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
24
,
1
,
1
,
1
}};
...
...
test/ref_ops_test.cpp
View file @
58d151ba
...
...
@@ -5753,6 +5753,27 @@ TEST_CASE(reduce_max_axis0)
EXPECT(results_vector == gold);
}
TEST_CASE(reduce_max_dynamic_axis0)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{2, 4, 2}, {3, 5, 3}}};
auto input = mm->add_parameter("X", s);
auto reduce_max_op = migraphx::make_op("reduce_max", {{"axes", {0}}});
mm->add_instruction(reduce_max_op, input);
p.compile(migraphx::ref::target{});
migraphx::parameter_map params;
migraphx::shape input_fixed_shape{migraphx::shape::float_type, {2, 5}};
std::vector<float> input_data{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
params["X"] = migraphx::argument(input_fixed_shape, input_data.data());
auto result = p.eval(params).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {6, 7, 8, 9, 10};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(reduce_max_axis01)
{
migraphx::program p;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment