Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
20a6f4f5
Unverified
Commit
20a6f4f5
authored
Jan 06, 2023
by
Charlie Lin
Committed by
GitHub
Jan 06, 2023
Browse files
Merge branch 'develop' into dyn_reshape
parents
983c7c1f
863bdfbf
Changes
11
Hide whitespace changes
Inline
Side-by-side
Showing
11 changed files
with
229 additions
and
27 deletions
+229
-27
Dockerfile
Dockerfile
+1
-1
Jenkinsfile
Jenkinsfile
+18
-8
src/include/migraphx/op/reduce_op.hpp
src/include/migraphx/op/reduce_op.hpp
+39
-15
src/onnx/parse_reduce_op.cpp
src/onnx/parse_reduce_op.cpp
+1
-2
test/onnx/gen_onnx.py
test/onnx/gen_onnx.py
+45
-1
test/onnx/onnx_test.cpp
test/onnx/onnx_test.cpp
+62
-0
test/onnx/reducel1_dyn_noaxes_test.onnx
test/onnx/reducel1_dyn_noaxes_test.onnx
+0
-0
test/onnx/reducel1_dyn_test.onnx
test/onnx/reducel1_dyn_test.onnx
+0
-0
test/onnx/reducemax_dyn_test.onnx
test/onnx/reducemax_dyn_test.onnx
+0
-0
test/op_shape_test.cpp
test/op_shape_test.cpp
+42
-0
test/ref_ops_test.cpp
test/ref_ops_test.cpp
+21
-0
No files found.
Dockerfile
View file @
20a6f4f5
...
...
@@ -87,7 +87,7 @@ RUN git clone --single-branch --branch ${ONNXRUNTIME_BRANCH} --recursive ${ONNXR
ADD
tools/build_and_test_onnxrt.sh /onnxruntime/build_and_test_onnxrt.sh
RUN
cget
-p
/usr/local
install
ROCmSoftwarePlatform/rocMLIR@
0f38fb33f518b53b94b541feb9b079668c5518e8
-DBUILD_MIXR_TARGET
=
On
-DLLVM_ENABLE_ZSTD
=
Off
-DLLVM_ENABLE_THREADS
=
Off
RUN
cget
-p
/usr/local
install
ROCmSoftwarePlatform/rocMLIR@
78b706fe9879587ab98b6614ae539265374a3fae
-DBUILD_MIXR_TARGET
=
On
-DLLVM_ENABLE_ZSTD
=
Off
-DLLVM_ENABLE_THREADS
=
Off
ENV
MIOPEN_FIND_DB_PATH=/tmp/miopen/find-db
ENV
MIOPEN_USER_DB_PATH=/tmp/miopen/user-db
...
...
Jenkinsfile
View file @
20a6f4f5
...
...
@@ -11,16 +11,22 @@ def rocmtestnode(Map conf) {
def
image
=
'migraphxlib'
env
.
CCACHE_COMPRESSLEVEL
=
7
env
.
CCACHE_DIR
=
ccache
def
cmake_build
=
{
compiler
,
flags
->
def
cmake_build
=
{
bconf
->
def
compiler
=
bconf
.
get
(
"compiler"
,
"/opt/rocm/llvm/bin/clang++"
)
def
flags
=
bconf
.
get
(
"flags"
,
""
)
def
gpu_debug
=
bconf
.
get
(
"gpu_debug"
,
"0"
)
def
cmd
=
"""
env
ulimit -c unlimited
echo "leak:dnnl::impl::malloc" > suppressions.txt
export LSAN_OPTIONS="suppressions=\$(pwd)/suppressions.txt"
export MIGRAPHX_GPU_DEBUG=${gpu_debug}
export CXX=${compiler}
export CXXFLAGS='-Werror'
env
rm -rf build
mkdir build
cd build
CXX=${compiler} CXXFLAGS='-Werror'
cmake -DCMAKE_C_COMPILER_LAUNCHER=ccache -DCMAKE_CXX_COMPILER_LAUNCHER=ccache ${flags} ..
cmake -DCMAKE_C_COMPILER_LAUNCHER=ccache -DCMAKE_CXX_COMPILER_LAUNCHER=ccache ${flags} ..
make -j\$(nproc) generate all doc package check VERBOSE=1
"""
echo
cmd
...
...
@@ -93,28 +99,32 @@ rocmtest clang_debug: rocmnode('vega') { cmake_build ->
stage
(
'Hip Clang Debug'
)
{
def
sanitizers
=
"undefined"
def
debug_flags
=
"-g -O2 -fsanitize=${sanitizers} -fno-sanitize-recover=${sanitizers}"
cmake_build
(
"/opt/rocm/llvm/bin/clang++"
,
"-DCMAKE_BUILD_TYPE=debug -DMIGRAPHX_ENABLE_PYTHON=Off -DCMAKE_CXX_FLAGS_DEBUG='${debug_flags}' -DCMAKE_C_FLAGS_DEBUG='${debug_flags}'"
)
cmake_build
(
flags:
"-DCMAKE_BUILD_TYPE=debug -DMIGRAPHX_ENABLE_PYTHON=Off -DCMAKE_CXX_FLAGS_DEBUG='${debug_flags}' -DCMAKE_C_FLAGS_DEBUG='${debug_flags}'"
)
}
},
clang_gpu_debug:
rocmnode
(
'vega'
)
{
cmake_build
->
stage
(
'Hip Clang GPU Debug'
)
{
cmake_build
(
flags:
"-DCMAKE_BUILD_TYPE=release"
,
gpu_debug:
true
)
}
},
clang_release:
rocmnode
(
'vega'
)
{
cmake_build
->
stage
(
'Hip Clang Release'
)
{
cmake_build
(
"/opt/rocm/llvm/bin/clang++"
,
"-DCMAKE_BUILD_TYPE=release"
)
cmake_build
(
flags:
"-DCMAKE_BUILD_TYPE=release"
)
stash
includes:
'build/*.deb'
,
name:
'migraphx-package'
}
},
mlir_debug:
rocmnode
(
'vega'
)
{
cmake_build
->
stage
(
'MLIR Debug'
)
{
def
sanitizers
=
"undefined"
def
debug_flags
=
"-g -O2 -fsanitize=${sanitizers} -fno-sanitize-recover=${sanitizers}"
cmake_build
(
"/opt/rocm/llvm/bin/clang++"
,
"-DCMAKE_BUILD_TYPE=debug -DMIGRAPHX_ENABLE_PYTHON=Off -DMIGRAPHX_ENABLE_MLIR=On -DCMAKE_CXX_FLAGS_DEBUG='${debug_flags}' -DCMAKE_C_FLAGS_DEBUG='${debug_flags}'"
)
cmake_build
(
flags:
"-DCMAKE_BUILD_TYPE=debug -DMIGRAPHX_ENABLE_PYTHON=Off -DMIGRAPHX_ENABLE_MLIR=On -DCMAKE_CXX_FLAGS_DEBUG='${debug_flags}' -DCMAKE_C_FLAGS_DEBUG='${debug_flags}'"
)
}
},
clang_asan:
rocmnode
(
'nogpu'
)
{
cmake_build
->
stage
(
'Clang ASAN'
)
{
def
sanitizers
=
"undefined,address"
def
debug_flags
=
"-g -O2 -fno-omit-frame-pointer -fsanitize=${sanitizers} -fno-sanitize-recover=${sanitizers}"
cmake_build
(
"/opt/rocm/llvm/bin/clang++"
,
"-DCMAKE_BUILD_TYPE=debug -DMIGRAPHX_ENABLE_PYTHON=Off -DMIGRAPHX_ENABLE_GPU=Off -DMIGRAPHX_ENABLE_CPU=On -DCMAKE_CXX_FLAGS_DEBUG='${debug_flags}' -DCMAKE_C_FLAGS_DEBUG='${debug_flags}'"
)
cmake_build
(
flags:
"-DCMAKE_BUILD_TYPE=debug -DMIGRAPHX_ENABLE_PYTHON=Off -DMIGRAPHX_ENABLE_GPU=Off -DMIGRAPHX_ENABLE_CPU=On -DCMAKE_CXX_FLAGS_DEBUG='${debug_flags}' -DCMAKE_C_FLAGS_DEBUG='${debug_flags}'"
)
}
}
//, clang_release_navi: rocmnode('navi21') { cmake_build ->
// stage('HIP Clang Release Navi') {
// cmake_build(
"/opt/rocm/llvm/bin/clang++",
"-DCMAKE_BUILD_TYPE=release")
// cmake_build(
flags:
"-DCMAKE_BUILD_TYPE=release")
// }
//}
...
...
src/include/migraphx/op/reduce_op.hpp
View file @
20a6f4f5
...
...
@@ -26,6 +26,7 @@
#include <migraphx/op/name.hpp>
#include <migraphx/check_shapes.hpp>
#include <migraphx/dyn_output.hpp>
#include <migraphx/argument.hpp>
#include <migraphx/tensor_view.hpp>
#include <migraphx/shape_for_each.hpp>
...
...
@@ -105,18 +106,41 @@ struct reduce_op : op_name<Derived>
return
tuned_axes
;
}
/**
* @brief returns a shape in which the axis or axes named
* for reduction by this op are set, to size 1.
*
* @param inputs list of input shapes
* @return shape
*/
shape
normalize_compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
{
check_shapes
{
inputs
,
*
this
}.
has
(
1
);
auto
s
=
inputs
.
at
(
0
);
auto
lens
=
s
.
lens
();
auto
tuned_axes
=
tune_axes
(
lens
.
size
());
for
(
auto
axis
:
tuned_axes
)
check_shapes
{
inputs
,
*
this
,
true
}.
has
(
1
);
auto
s
=
inputs
.
at
(
0
);
if
(
s
.
dynamic
())
{
lens
[
axis
]
=
1
;
auto
output_dyn_dims
=
s
.
dyn_dims
();
auto
tuned_axes
=
tune_axes
(
output_dyn_dims
.
size
());
for
(
const
auto
&
axis
:
tuned_axes
)
{
// At the time of writing, there's no functional difference between
// optimum of 0 (no opt) or 1.
output_dyn_dims
[
axis
]
=
{
1
,
1
,
0
};
}
return
shape
{
s
.
type
(),
output_dyn_dims
};
}
else
{
auto
lens
=
s
.
lens
();
auto
tuned_axes
=
tune_axes
(
lens
.
size
());
for
(
const
auto
&
axis
:
tuned_axes
)
{
lens
[
axis
]
=
1
;
}
return
inputs
[
0
].
with_lens
(
lens
);
}
return
inputs
[
0
].
with_lens
(
lens
);
}
template
<
class
T
>
...
...
@@ -124,7 +148,7 @@ struct reduce_op : op_name<Derived>
const
std
::
vector
<
T
>&
in_lens
,
std
::
vector
<
T
>&
out_lens
)
const
{
for
(
auto
axis
:
tuned_axes
)
for
(
const
auto
&
axis
:
tuned_axes
)
{
out_lens
[
axis
]
=
in_lens
[
axis
];
}
...
...
@@ -151,17 +175,17 @@ struct reduce_op : op_name<Derived>
static_cast
<
const
Derived
&>
(
*
this
).
output
(
batch_shape
)(
val
);
}
argument
compute
(
const
shape
&
output_shape
,
std
::
vector
<
argument
>
args
)
const
argument
compute
(
const
dyn_output
&
dyn_out
,
std
::
vector
<
argument
>
args
)
const
{
argument
result
{
out
put_shape
};
argument
result
{
dyn_out
.
com
put
ed
_shape
};
auto
arg_lens
=
args
.
front
().
get_shape
().
lens
();
auto
tuned_axes
=
tune_axes
(
arg_lens
.
size
());
std
::
vector
<
std
::
size_t
>
batch_lens
(
out
put_shape
.
lens
().
size
(),
1
);
std
::
vector
<
std
::
size_t
>
batch_lens
(
dyn_out
.
com
put
ed
_shape
.
lens
().
size
(),
1
);
tune_dims
(
tuned_axes
,
arg_lens
,
batch_lens
);
shape
batch_shape
{
out
put_shape
.
type
(),
batch_lens
};
shape
batch_shape
{
dyn_out
.
com
put
ed
_shape
.
type
(),
batch_lens
};
visit_all
(
result
,
args
[
0
])([
&
](
auto
output
,
auto
input
)
{
par_for
(
out
put_shape
.
elements
(),
[
&
](
auto
i
)
{
auto
out_idx
=
out
put_shape
.
multi
(
i
);
par_for
(
dyn_out
.
com
put
ed
_shape
.
elements
(),
[
&
](
auto
i
)
{
auto
out_idx
=
dyn_out
.
com
put
ed
_shape
.
multi
(
i
);
this
->
reduce
(
input
,
batch_shape
,
tuned_axes
,
out_idx
,
output
);
});
});
...
...
src/onnx/parse_reduce_op.cpp
View file @
20a6f4f5
...
...
@@ -68,8 +68,7 @@ instruction_ref parse_reduce_oper(const std::string& op_name,
}
else
{
std
::
size_t
n_dim
=
args
.
front
()
->
get_shape
().
lens
().
size
();
axes
.
resize
(
n_dim
);
axes
.
resize
(
args
.
front
()
->
get_shape
().
ndim
());
std
::
iota
(
axes
.
begin
(),
axes
.
end
(),
0
);
}
}
...
...
test/onnx/gen_onnx.py
100755 → 100644
View file @
20a6f4f5
...
...
@@ -23,7 +23,7 @@
#####################################################################################
# This script generates onnx files for MIGraphX onnx operator tests.
# To generate an individual onnx file, you can use the following
# command: python -c "import gen_onnx; gen_onnx.{test_name}_test()"
# command: python
3
-c "import gen_onnx; gen_onnx.{test_name}_test()"
import
numpy
as
np
import
onnx
from
onnx
import
helper
...
...
@@ -4675,6 +4675,34 @@ def reducel1_test():
return
([
node
],
[
x
],
[
y
])
@
onnx_test
def
reducel1_dyn_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
None
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
None
])
axes
=
[
-
2
]
node
=
onnx
.
helper
.
make_node
(
'ReduceL1'
,
inputs
=
[
'x'
],
outputs
=
[
'y'
],
axes
=
axes
,
keepdims
=
0
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
def
reducel1_dyn_noaxes_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
None
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
None
])
node
=
onnx
.
helper
.
make_node
(
'ReduceL1'
,
inputs
=
[
'x'
],
outputs
=
[
'y'
],
keepdims
=
0
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
()
def
reducel2_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
3
,
4
,
5
,
6
])
...
...
@@ -4724,6 +4752,22 @@ def reduce_log_sum_exp_test():
def
reducemax_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
3
,
4
,
5
,
6
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
3
,
4
,
6
])
axes
=
[
2
]
node
=
onnx
.
helper
.
make_node
(
'ReduceMax'
,
inputs
=
[
'x'
],
outputs
=
[
'y'
],
axes
=
axes
,
keepdims
=
0
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
def
reducemax_dyn_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
None
,
4
,
5
,
6
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
None
,
4
,
6
])
axes
=
[
2
]
node
=
onnx
.
helper
.
make_node
(
'ReduceMax'
,
...
...
test/onnx/onnx_test.cpp
View file @
20a6f4f5
...
...
@@ -4468,6 +4468,50 @@ TEST_CASE(reducel1_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
reducel1_dyn_test
)
{
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
// a shape with 4 dynamic dimensions
auto
l0
=
mm
->
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
3
,
3
,
0
},
{
3
,
5
,
0
},
{
4
,
6
,
5
},
{
5
,
7
,
6
}}});
auto
abs_ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"abs"
),
l0
);
auto
sum_ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
{
-
2
}}}),
abs_ins
);
auto
sq_ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
-
2
}}}),
sum_ins
);
mm
->
add_return
({
sq_ins
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"x"
]
=
{{
3
,
3
},
{
3
,
5
},
{
4
,
6
,
5
},
{
5
,
7
,
6
}};
auto
prog
=
migraphx
::
parse_onnx
(
"reducel1_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
// No axes given in the onnx file. Parser should default to all axes.
auto
l0
=
mm
->
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
3
,
3
,
0
},
{
3
,
5
,
0
},
{
4
,
6
,
5
},
{
5
,
7
,
6
}}});
auto
abs_ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"abs"
),
l0
);
auto
sum_ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
{
0
,
1
,
2
,
3
}}}),
abs_ins
);
auto
sq_ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
0
,
1
,
2
,
3
}}}),
sum_ins
);
mm
->
add_return
({
sq_ins
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"x"
]
=
{{
3
,
3
},
{
3
,
5
},
{
4
,
6
,
5
},
{
5
,
7
,
6
}};
auto
prog
=
migraphx
::
parse_onnx
(
"reducel1_dyn_noaxes_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
}
TEST_CASE
(
reducel2_test
)
{
migraphx
::
program
p
;
...
...
@@ -4518,6 +4562,24 @@ TEST_CASE(reducemax_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
reducemax_dyn_test
)
{
// input shape with 4 dynamic dimensions
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
3
,
3
},
{
4
,
4
},
{
5
,
5
},
{
6
,
6
}}});
auto
r0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_max"
,
{{
"axes"
,
{
2
}}}),
l0
);
auto
r1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
2
}}}),
r0
);
mm
->
add_return
({
r1
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"x"
]
=
{{
3
,
3
},
{
4
,
4
},
{
5
,
5
},
{
6
,
6
}};
auto
prog
=
migraphx
::
parse_onnx
(
"reducemax_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
reducemean_test
)
{
migraphx
::
program
p
;
...
...
test/onnx/reducel1_dyn_noaxes_test.onnx
0 → 100644
View file @
20a6f4f5
File added
test/onnx/reducel1_dyn_test.onnx
0 → 100644
View file @
20a6f4f5
File added
test/onnx/reducemax_dyn_test.onnx
0 → 100644
View file @
20a6f4f5
File added
test/op_shape_test.cpp
View file @
20a6f4f5
...
...
@@ -1946,9 +1946,51 @@ void test_reduce_ops()
}
}
// dynamic shape
template
<
class
T
>
void
test_dyn_reduce_ops
()
{
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
2
,
3
,
3
},
{
2
,
4
,
4
}}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
std
::
vector
<
migraphx
::
shape
::
dynamic_dimension
>
(
{{
2
,
3
,
3
},
{
1
,
1
,
0
}})},
T
{{
-
1
}},
input
);
}
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
2
,
3
,
3
},
{
2
,
4
,
4
}}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
std
::
vector
<
migraphx
::
shape
::
dynamic_dimension
>
(
{{
1
,
1
,
0
},
{
2
,
4
,
4
}})},
T
{{
0
}},
input
);
}
{
// Empty axis argument reduces all axes
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
2
,
3
,
3
},
{
2
,
4
,
4
}}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
std
::
vector
<
migraphx
::
shape
::
dynamic_dimension
>
(
{{
1
,
1
,
0
},
{
1
,
1
,
0
}})},
T
{{}},
input
);
}
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
2
,
3
,
3
},
{
2
,
4
,
4
}}};
throws_shape
(
T
{{
4
}},
input
);
}
}
TEST_CASE
(
reduce_max
)
{
test_reduce_ops
<
migraphx
::
op
::
reduce_max
>
();
}
TEST_CASE
(
reduce_mean
)
{
test_reduce_ops
<
migraphx
::
op
::
reduce_mean
>
();
}
TEST_CASE
(
reduce_prod
)
{
test_reduce_ops
<
migraphx
::
op
::
reduce_prod
>
();
}
TEST_CASE
(
reduce_sum
)
{
test_reduce_ops
<
migraphx
::
op
::
reduce_sum
>
();
}
TEST_CASE
(
reduce_max_dyn
)
{
test_dyn_reduce_ops
<
migraphx
::
op
::
reduce_max
>
();
}
TEST_CASE
(
reduce_mean_dyn
)
{
test_dyn_reduce_ops
<
migraphx
::
op
::
reduce_mean
>
();
}
TEST_CASE
(
reduce_prod_dyn
)
{
test_dyn_reduce_ops
<
migraphx
::
op
::
reduce_prod
>
();
}
TEST_CASE
(
reduce_sum_dyn
)
{
test_dyn_reduce_ops
<
migraphx
::
op
::
reduce_sum
>
();
}
TEST_CASE
(
reshape_shape
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
24
,
1
,
1
,
1
}};
...
...
test/ref_ops_test.cpp
View file @
20a6f4f5
...
...
@@ -5753,6 +5753,27 @@ TEST_CASE(reduce_max_axis0)
EXPECT(results_vector == gold);
}
TEST_CASE(reduce_max_dynamic_axis0)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{2, 4, 2}, {3, 5, 3}}};
auto input = mm->add_parameter("X", s);
auto reduce_max_op = migraphx::make_op("reduce_max", {{"axes", {0}}});
mm->add_instruction(reduce_max_op, input);
p.compile(migraphx::ref::target{});
migraphx::parameter_map params;
migraphx::shape input_fixed_shape{migraphx::shape::float_type, {2, 5}};
std::vector<float> input_data{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
params["X"] = migraphx::argument(input_fixed_shape, input_data.data());
auto result = p.eval(params).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {6, 7, 8, 9, 10};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(reduce_max_axis01)
{
migraphx::program p;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment