Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
baac1dab
Commit
baac1dab
authored
May 24, 2023
by
Alan Turner
Browse files
Merge remote-tracking branch 'origin/develop' into ck-host-lib
parents
830dff7a
77042e30
Changes
299
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
1002 additions
and
243 deletions
+1002
-243
test/fuse_reduce.cpp
test/fuse_reduce.cpp
+330
-0
test/gpu/hip.cpp
test/gpu/hip.cpp
+20
-1
test/gpu/jit.cpp
test/gpu/jit.cpp
+77
-4
test/gpu/literal.cpp
test/gpu/literal.cpp
+2
-1
test/gpu/manage_host_buffer.cpp
test/gpu/manage_host_buffer.cpp
+2
-2
test/gpu/mlir.cpp
test/gpu/mlir.cpp
+63
-4
test/gpu/quantization.cpp
test/gpu/quantization.cpp
+16
-7
test/gpu/stream_sync.cpp
test/gpu/stream_sync.cpp
+2
-1
test/include/pointwise.hpp
test/include/pointwise.hpp
+10
-1
test/marker.cpp
test/marker.cpp
+2
-3
test/module_test.cpp
test/module_test.cpp
+1
-1
test/onnx/.onnxrt-commit
test/onnx/.onnxrt-commit
+1
-1
test/onnx/concat_dyn_test.onnx
test/onnx/concat_dyn_test.onnx
+0
-0
test/onnx/gen_onnx.py
test/onnx/gen_onnx.py
+172
-1
test/onnx/onnx_test.cpp
test/onnx/onnx_test.cpp
+239
-151
test/onnx/slice_dyn_test.onnx
test/onnx/slice_dyn_test.onnx
+0
-0
test/onnx/slice_reverse_dyn_test.onnx
test/onnx/slice_reverse_dyn_test.onnx
+0
-0
test/onnx/slice_step_dyn_test.onnx
test/onnx/slice_step_dyn_test.onnx
+0
-0
test/onnx/verify_onnx.cpp
test/onnx/verify_onnx.cpp
+65
-65
test/onnx/where_dyn_test.onnx
test/onnx/where_dyn_test.onnx
+0
-0
No files found.
test/fuse_reduce.cpp
0 → 100644
View file @
baac1dab
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/fuse_reduce.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/program.hpp>
#include <basic_ops.hpp>
#include <migraphx/make_op.hpp>
#include <test.hpp>
#include <pointwise.hpp>
void
run_pass
(
migraphx
::
program
&
p
)
{
migraphx
::
run_passes
(
p
,
{
migraphx
::
fuse_reduce
{},
migraphx
::
dead_code_elimination
{}});
}
bool
all_instructions_are_local
(
const
migraphx
::
module
&
m
)
{
return
std
::
all_of
(
m
.
begin
(),
m
.
end
(),
[
&
](
const
auto
&
ins
)
{
return
std
::
all_of
(
ins
.
inputs
().
begin
(),
ins
.
inputs
().
end
(),
[
&
](
auto
input
)
{
return
m
.
has_instruction
(
input
);
});
});
}
template
<
class
F
>
migraphx
::
instruction_ref
add_reduce
(
migraphx
::
program
&
p
,
const
std
::
string
&
name
,
std
::
vector
<
migraphx
::
instruction_ref
>
inputs
,
const
std
::
vector
<
int64_t
>&
axes
,
F
f
)
{
auto
*
rm
=
p
.
create_module
(
name
);
auto
*
mm
=
p
.
get_main_module
();
rm
->
set_bypass
();
std
::
vector
<
migraphx
::
instruction_ref
>
params
;
std
::
transform
(
inputs
.
begin
(),
inputs
.
end
(),
std
::
back_inserter
(
params
),
[
&
](
auto
input
)
{
return
rm
->
add_parameter
(
"x"
+
std
::
to_string
(
params
.
size
()),
migraphx
::
shape
{
input
->
get_shape
().
type
(),
input
->
get_shape
().
lens
()});
});
auto
r
=
f
(
rm
,
params
,
axes
);
rm
->
add_return
({
r
});
EXPECT
(
all_instructions_are_local
(
*
rm
));
return
mm
->
add_instruction
(
migraphx
::
make_op
(
"fused_reduce"
,
{{
"axes"
,
axes
}}),
inputs
,
{
rm
});
}
inline
auto
single_reduce
(
const
std
::
string
&
name
)
{
return
[
=
](
auto
*
rm
,
const
auto
&
inputs
,
const
auto
&
axes
)
{
return
rm
->
add_instruction
(
migraphx
::
make_op
(
name
,
{{
"axes"
,
axes
}}),
inputs
);
};
}
TEST_CASE
(
single
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
migraphx
::
program
p1
;
{
auto
*
mm
=
p1
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
y
=
mm
->
add_parameter
(
"y"
,
s
);
auto
rsum1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
{
1
}}}),
x
);
auto
rsum2
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
{
1
}}}),
y
);
mm
->
add_return
({
rsum1
,
rsum2
});
}
run_pass
(
p1
);
migraphx
::
program
p2
;
{
auto
*
mm
=
p2
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
y
=
mm
->
add_parameter
(
"y"
,
s
);
auto
rsum1
=
add_reduce
(
p2
,
"main:reduce_sum0"
,
{
x
},
{
1
},
single_reduce
(
"reduce_sum"
));
auto
rsum2
=
add_reduce
(
p2
,
"main:reduce_sum1"
,
{
y
},
{
1
},
single_reduce
(
"reduce_sum"
));
mm
->
add_return
({
rsum1
,
rsum2
});
}
EXPECT
(
p1
==
p2
);
}
TEST_CASE
(
pointwise_reduce
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
migraphx
::
program
p1
;
{
auto
*
mm
=
p1
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
y
=
mm
->
add_parameter
(
"y"
,
s
);
auto
add
=
add_pointwise
(
p1
,
"main:pointwise0"
,
{
x
,
y
},
single_pointwise
(
"add"
));
auto
rsum
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
{
1
}}}),
add
);
mm
->
add_return
({
rsum
});
}
run_pass
(
p1
);
migraphx
::
program
p2
;
{
auto
*
mm
=
p2
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
y
=
mm
->
add_parameter
(
"y"
,
s
);
auto
rsum
=
add_reduce
(
p2
,
"main:pointwise0:main:reduce_sum0"
,
{
x
,
y
},
{
1
},
[
&
](
auto
*
rm
,
const
auto
&
inputs
,
const
auto
&
axes
)
{
auto
add
=
add_pointwise
(
p2
,
rm
,
"main:pointwise0"
,
inputs
,
single_pointwise
(
"add"
));
return
rm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
axes
}}),
add
);
});
mm
->
add_return
({
rsum
});
}
EXPECT
(
p1
==
p2
);
}
TEST_CASE
(
reduce_pointwise
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
migraphx
::
program
p1
;
{
auto
*
mm
=
p1
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
y
=
mm
->
add_parameter
(
"y"
,
s
);
auto
rsum
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
{
1
}}}),
x
);
auto
rsumb
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
s
.
lens
()}}),
rsum
);
auto
add
=
add_pointwise
(
p1
,
"main:pointwise0"
,
{
rsumb
,
y
},
single_pointwise
(
"add"
));
mm
->
add_return
({
add
});
}
run_pass
(
p1
);
migraphx
::
program
p2
;
{
auto
*
mm
=
p2
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
y
=
mm
->
add_parameter
(
"y"
,
s
);
auto
add
=
add_reduce
(
p2
,
"main:reduce_sum0:main:pointwise0"
,
{
x
,
y
},
{
1
},
[
&
](
auto
*
rm
,
const
auto
&
inputs
,
const
auto
&
axes
)
{
auto
rsum
=
rm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
axes
}}),
inputs
[
0
]);
auto
rsumb
=
rm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
s
.
lens
()}}),
rsum
);
return
add_pointwise
(
p2
,
rm
,
"main:pointwise0"
,
{
rsumb
,
inputs
[
1
]},
single_pointwise
(
"add"
));
});
mm
->
add_return
({
add
});
}
EXPECT
(
p1
==
p2
);
}
TEST_CASE
(
reduce_reduce
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
migraphx
::
program
p1
;
{
auto
*
mm
=
p1
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
rsum
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
{
1
}}}),
x
);
auto
rsumb
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
s
.
lens
()}}),
rsum
);
auto
rsumdiff
=
add_pointwise
(
p1
,
"main:pointwise0"
,
{
rsumb
,
x
},
single_pointwise
(
"sub"
));
auto
rsum2
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
{
1
}}}),
rsumdiff
);
auto
sqrt
=
add_pointwise
(
p1
,
"main:pointwise1"
,
{
rsum2
},
single_pointwise
(
"sqrt"
));
mm
->
add_return
({
sqrt
});
}
run_pass
(
p1
);
migraphx
::
program
p2
;
{
auto
*
mm
=
p2
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
sqrt
=
add_reduce
(
p2
,
"main:reduce_sum1:main:reduce_sum0:main:pointwise0:main:pointwise1"
,
{
x
},
{
1
},
[
&
](
auto
*
rm
,
const
auto
&
inputs
,
const
auto
&
axes
)
{
auto
rsum
=
rm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
axes
}}),
inputs
[
0
]);
auto
rsumb
=
rm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
s
.
lens
()}}),
rsum
);
auto
rsumdiff
=
add_pointwise
(
p2
,
rm
,
"main:pointwise0"
,
{
rsumb
,
inputs
[
0
]},
single_pointwise
(
"sub"
));
auto
rsum2
=
rm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
axes
}}),
rsumdiff
);
return
add_pointwise
(
p2
,
rm
,
"main:pointwise1"
,
{
rsum2
},
single_pointwise
(
"sqrt"
));
});
mm
->
add_return
({
sqrt
});
}
EXPECT
(
p1
==
p2
);
}
TEST_CASE
(
reduce_reduce_mismatch_axis
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
4
,
2
,
3
}};
migraphx
::
program
p1
;
{
auto
*
mm
=
p1
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
rsum1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
{
1
}}}),
x
);
auto
rsum2
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
{
2
}}}),
rsum1
);
mm
->
add_return
({
rsum2
});
}
run_pass
(
p1
);
migraphx
::
program
p2
;
{
auto
*
mm
=
p2
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
rsum1
=
add_reduce
(
p2
,
"main:reduce_sum0"
,
{
x
},
{
1
},
single_reduce
(
"reduce_sum"
));
auto
rsum2
=
add_reduce
(
p2
,
"main:reduce_sum1"
,
{
rsum1
},
{
2
},
single_reduce
(
"reduce_sum"
));
mm
->
add_return
({
rsum2
});
}
EXPECT
(
p1
==
p2
);
}
TEST_CASE
(
pointwise_reduce_broadcast
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
migraphx
::
program
p1
;
{
auto
*
mm
=
p1
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
rsum1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
{
1
}}}),
x
);
auto
sqrt
=
add_pointwise
(
p1
,
"main:pointwise0"
,
{
rsum1
},
single_pointwise
(
"sqrt"
));
auto
sqrtb
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
s
.
lens
()}}),
sqrt
);
auto
add1
=
add_pointwise
(
p1
,
"main:pointwise1"
,
{
sqrtb
,
x
},
single_pointwise
(
"add"
));
auto
rsum2
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
{
1
}}}),
add1
);
auto
add2
=
add_pointwise
(
p1
,
"main:pointwise2"
,
{
rsum2
,
rsum1
},
single_pointwise
(
"add"
));
mm
->
add_return
({
add2
});
}
run_pass
(
p1
);
migraphx
::
program
p2
;
{
auto
*
mm
=
p2
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
add2
=
add_reduce
(
p2
,
"main:pointwise0:main:pointwise1:main:reduce_sum1:main:pointwise2:main:reduce_sum0"
,
{
x
},
{
1
},
[
&
](
auto
*
rm
,
const
auto
&
inputs
,
const
auto
&
axes
)
{
auto
rsum1
=
rm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
axes
}}),
inputs
[
0
]);
auto
sqrt
=
add_pointwise
(
p2
,
rm
,
"main:pointwise0"
,
{
rsum1
},
single_pointwise
(
"sqrt"
));
auto
sqrtb
=
rm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
s
.
lens
()}}),
sqrt
);
auto
add1
=
add_pointwise
(
p2
,
rm
,
"main:pointwise1"
,
{
sqrtb
,
inputs
[
0
]},
single_pointwise
(
"add"
));
auto
rsum2
=
rm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
axes
}}),
add1
);
return
add_pointwise
(
p2
,
rm
,
"main:pointwise2"
,
{
rsum2
,
rsum1
},
single_pointwise
(
"add"
));
});
mm
->
add_return
({
add2
});
}
EXPECT
(
p1
==
p2
);
}
TEST_CASE
(
reduce_reduce_broadcast
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
4
,
2
,
3
}};
migraphx
::
program
p1
;
{
auto
*
mm
=
p1
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
rsum1
=
add_reduce
(
p1
,
"test:reduce_sum0"
,
{
x
},
{
1
},
single_reduce
(
"reduce_sum"
));
auto
rsumb
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
s
.
lens
()}}),
rsum1
);
auto
add
=
add_reduce
(
p1
,
"test:reduce_sum1"
,
{
rsumb
,
x
},
{
1
},
[
&
](
auto
*
rm
,
const
auto
&
inputs
,
const
auto
&
axes
)
{
auto
add2
=
add_pointwise
(
p1
,
rm
,
"test:pointwise0"
,
inputs
,
single_pointwise
(
"add"
));
return
rm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
axes
}}),
add2
);
});
mm
->
add_return
({
add
});
}
run_pass
(
p1
);
migraphx
::
program
p2
;
{
auto
*
mm
=
p2
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
rsum
=
add_reduce
(
p2
,
"test:reduce_sum1:test:reduce_sum0"
,
{
x
},
{
1
},
[
&
](
auto
*
rm
,
const
auto
&
inputs
,
const
auto
&
axes
)
{
auto
rsum1
=
rm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
axes
}}),
inputs
[
0
]);
auto
rsumb
=
rm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
s
.
lens
()}}),
rsum1
);
auto
add
=
add_pointwise
(
p2
,
rm
,
"test:pointwise0"
,
{
rsumb
,
inputs
[
0
]},
single_pointwise
(
"add"
));
return
rm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
axes
}}),
add
);
});
mm
->
add_return
({
rsum
});
}
EXPECT
(
p1
==
p2
);
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/gpu/hip.cpp
View file @
baac1dab
...
...
@@ -27,7 +27,7 @@
#include <migraphx/gpu/hip.hpp>
#include <migraphx/gpu/target.hpp>
TEST_CASE
(
tuple_
to_
from_gpu
)
TEST_CASE
(
tuple_from_gpu
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
int32_type
,
{
2
,
4
}};
...
...
@@ -47,4 +47,23 @@ TEST_CASE(tuple_to_from_gpu)
EXPECT
(
result2
==
p2_data
);
}
TEST_CASE
(
tuple_to_gpu
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
int32_type
,
{
2
,
4
}};
std
::
vector
<
float
>
p1_data
=
{
1.1
,
2.2
,
3.3
,
4.4
,
5.5
,
6.6
};
std
::
vector
<
int
>
p2_data
=
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
};
auto
p1
=
migraphx
::
argument
{
s1
,
p1_data
.
data
()};
auto
p2
=
migraphx
::
argument
{
s2
,
p2_data
.
data
()};
auto
p_gpu
=
migraphx
::
gpu
::
to_gpu
(
migraphx
::
argument
({
p1
,
p2
}));
auto
p_host
=
migraphx
::
gpu
::
from_gpu
(
p_gpu
);
std
::
vector
<
migraphx
::
argument
>
results
=
p_host
.
get_sub_objects
();
std
::
vector
<
float
>
result1
;
results
[
0
].
visit
([
&
](
auto
output
)
{
result1
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
int
>
result2
;
results
[
1
].
visit
([
&
](
auto
output
)
{
result2
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
result1
==
p1_data
);
EXPECT
(
result2
==
p2_data
);
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/gpu/jit.cpp
View file @
baac1dab
...
...
@@ -27,8 +27,8 @@
#include <migraphx/generate.hpp>
#include <migraphx/program.hpp>
#include <migraphx/par_for.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/gpu/kernel.hpp>
#include <migraphx/gpu/target.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/device_name.hpp>
...
...
@@ -206,8 +206,16 @@ TEST_CASE(compile_warnings)
EXPECT
(
not
compile
(
""
).
empty
());
EXPECT
(
not
compile
(
"-Wunused-parameter -Wno-error"
).
empty
());
EXPECT
(
not
compile
(
"-Wno-unused-parameter -Werror"
).
empty
());
#ifdef MIGRAPHX_USE_HIPRTC
if
(
not
migraphx
::
enabled
(
migraphx
::
gpu
::
MIGRAPHX_ENABLE_HIPRTC_WORKAROUNDS
{}))
{
EXPECT
(
test
::
throws
([
&
]
{
compile
(
"-Werror=unused-parameter"
);
}));
EXPECT
(
test
::
throws
([
&
]
{
compile
(
"-Wunused-parameter -Werror"
);
}));
}
#else
EXPECT
(
test
::
throws
([
&
]
{
compile
(
"-Werror=unused-parameter"
);
}));
EXPECT
(
test
::
throws
([
&
]
{
compile
(
"-Wunused-parameter -Werror"
);
}));
#endif
}
TEST_CASE
(
code_object_hip
)
...
...
@@ -235,7 +243,7 @@ TEST_CASE(code_object_hip)
auto
y
=
mm
->
add_parameter
(
"output"
,
input
);
mm
->
add_instruction
(
co
,
x
,
y
);
migraphx
::
compile_options
options
;
p
.
compile
(
migraphx
::
gpu
::
target
{}
,
options
);
p
.
compile
(
migraphx
::
make_
target
(
"gpu"
)
,
options
);
auto
result
=
migraphx
::
gpu
::
from_gpu
(
p
.
eval
({{
"output"
,
migraphx
::
gpu
::
allocate_gpu
(
input
)}}).
front
());
...
...
@@ -261,7 +269,7 @@ TEST_CASE(compile_code_object_hip)
auto
x
=
mm
->
add_literal
(
input_literal
);
auto
y
=
mm
->
add_parameter
(
"output"
,
input
);
mm
->
add_instruction
(
co
,
x
,
y
);
p
.
compile
(
migraphx
::
gpu
::
target
{}
,
migraphx
::
compile_options
{});
p
.
compile
(
migraphx
::
make_
target
(
"gpu"
)
,
migraphx
::
compile_options
{});
auto
result
=
migraphx
::
gpu
::
from_gpu
(
p
.
eval
({{
"output"
,
migraphx
::
gpu
::
allocate_gpu
(
input
)}}).
front
());
...
...
@@ -284,7 +292,7 @@ TEST_CASE(compile_pointwise)
auto
x
=
mm
->
add_literal
(
input_literal
);
auto
y
=
mm
->
add_parameter
(
"output"
,
input
);
mm
->
add_instruction
(
co
,
x
,
y
);
p
.
compile
(
migraphx
::
gpu
::
target
{}
,
migraphx
::
compile_options
{});
p
.
compile
(
migraphx
::
make_
target
(
"gpu"
)
,
migraphx
::
compile_options
{});
auto
result
=
migraphx
::
gpu
::
from_gpu
(
p
.
eval
({{
"output"
,
migraphx
::
gpu
::
allocate_gpu
(
input
)}}).
front
());
...
...
@@ -356,4 +364,69 @@ TEST_CASE(compile_math)
});
}
// NOLINTNEXTLINE
const
std
::
string
assert_template
=
R"__migraphx__(
#include <migraphx/kernels/math.hpp>
#include <migraphx/kernels/types.hpp>
using namespace migraphx;
extern "C" {
__global__ void kernel(void*)
{
static_assert(numeric_max<${type}>() == ${max}, "");
static_assert(numeric_lowest<${type}>() == ${min}, "");
}
}
int main() {}
)__migraphx__"
;
TEST_CASE
(
assert_type_min_max
)
{
std
::
vector
<
std
::
string
>
data_types
;
migraphx
::
gpu
::
hip_compile_options
options
;
for
(
auto
&&
t
:
migraphx
::
shape
::
types
())
{
if
(
contains
({
migraphx
::
shape
::
bool_type
,
migraphx
::
shape
::
tuple_type
},
t
))
continue
;
auto
name
=
migraphx
::
shape
::
cpp_type
(
t
);
if
(
t
==
migraphx
::
shape
::
half_type
)
name
.
insert
(
0
,
"migraphx::"
);
migraphx
::
shape
::
visit
(
t
,
[
&
](
auto
as
)
{
std
::
string
min
=
""
;
std
::
string
max
=
""
;
// Note 9223372036854775808 is a constant literal that is outside the range of long
// long type For the same reason, 18446744073709551616 needs postfix ULL to be parsed
// correctly
if
(
t
==
migraphx
::
shape
::
int64_type
)
{
min
=
"("
+
std
::
to_string
(
as
.
min
()
+
1
)
+
"LL - 1)"
;
max
=
std
::
to_string
(
as
.
max
());
}
else
if
(
t
==
migraphx
::
shape
::
uint64_type
)
{
min
=
std
::
to_string
(
as
.
min
());
max
=
std
::
to_string
(
as
.
max
())
+
"ULL"
;
}
else
{
min
=
std
::
to_string
(
as
.
min
());
max
=
std
::
to_string
(
as
.
max
());
}
auto
src
=
migraphx
::
interpolate_string
(
assert_template
,
{{
"type"
,
name
},
{
"max"
,
max
},
{
"min"
,
min
}});
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
5
,
2
}};
options
.
global
=
1024
;
options
.
local
=
1024
;
options
.
inputs
=
{
input
};
options
.
output
=
input
;
options
.
params
=
"-Wno-float-equal"
;
auto
co
=
migraphx
::
gpu
::
compile_hip_code_object
(
src
,
options
);
});
}
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/gpu/literal.cpp
View file @
baac1dab
...
...
@@ -26,6 +26,7 @@
#include <migraphx/program.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/gpu/target.hpp>
#include <migraphx/gpu/hip.hpp>
...
...
@@ -35,7 +36,7 @@ void gpu_literal_test()
auto
*
mm
=
p
.
get_main_module
();
auto
lit
=
generate_literal
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}});
mm
->
add_literal
(
lit
);
p
.
compile
(
migraphx
::
gpu
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"gpu"
)
);
auto
scratch
=
p
.
get_parameter
(
"scratch"
);
if
(
scratch
==
mm
->
end
())
{
...
...
test/gpu/manage_host_buffer.cpp
View file @
baac1dab
...
...
@@ -25,7 +25,7 @@
#include <iostream>
#include <vector>
#include <hip/hip_runtime_api.h>
#include <migraphx/
gpu/
target.hpp>
#include <migraphx/
register_
target.hpp>
#include <migraphx/verify.hpp>
#include <test.hpp>
#include <basic_ops.hpp>
...
...
@@ -57,7 +57,7 @@ TEST_CASE(host_same_buffer_copy)
pp
[
"a"
]
=
migraphx
::
argument
(
ss
,
a_vec
.
data
());
pp
[
"b"
]
=
migraphx
::
argument
(
ss
,
b_vec
.
data
());
std
::
vector
<
float
>
gpu_result
;
migraphx
::
target
gpu_t
=
migraphx
::
gpu
::
target
{}
;
migraphx
::
target
gpu_t
=
migraphx
::
make_
target
(
"gpu"
)
;
migraphx
::
compile_options
options
;
options
.
offload_copy
=
true
;
p
.
compile
(
gpu_t
,
options
);
...
...
test/gpu/mlir.cpp
View file @
baac1dab
...
...
@@ -25,7 +25,7 @@
#include <migraphx/gpu/target.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/write_literals.hpp>
#include <migraphx/re
f/
target.hpp>
#include <migraphx/re
gister_
target.hpp>
#include <migraphx/module.hpp>
#include <migraphx/program.hpp>
#include <migraphx/make_op.hpp>
...
...
@@ -121,7 +121,7 @@ migraphx::argument run_gpu(migraphx::program p, const migraphx::parameter_map& i
migraphx
::
argument
run_ref
(
migraphx
::
program
p
,
const
migraphx
::
parameter_map
&
inputs
)
{
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
return
p
.
eval
(
inputs
).
front
();
}
...
...
@@ -140,7 +140,7 @@ TEST_CASE(conv)
{
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
func.func @m
ai
n(%arg0: tensor<2x8x3x3xf32>, %arg1: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {arch = "", kernel = "mixr"} {
func.func @m
lir_convolutio
n(%arg0: tensor<2x8x3x3xf32>, %arg1: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {arch = "", kernel = "mixr"} {
%0 = migraphx.convolution(%arg1, %arg0) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
return %0 : tensor<1x2x2x2xf32>
}
...
...
@@ -163,7 +163,7 @@ TEST_CASE(conv_add_relu)
{
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
func.func @m
ai
n(%arg0: tensor<1x2x2x2xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {arch = "", kernel = "mixr"} {
func.func @m
lir_convolutio
n(%arg0: tensor<1x2x2x2xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {arch = "", kernel = "mixr"} {
%0 = migraphx.convolution(%arg2, %arg1) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
%1 = migraphx.add(%0, %arg0) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
%2 = migraphx.relu(%1) : (tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
...
...
@@ -187,4 +187,63 @@ module {
EXPECT
(
verify_mlir
(
m
));
}
TEST_CASE
(
dot_add
)
{
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
func.func @mlir_dot(%arg0: tensor<1x5x4xf32>, %arg1: tensor<1x4x3xf32>, %arg2: tensor<1x5x3xf32>) -> tensor<1x5x3xf32> attributes {arch = "", kernel = "mixr"} {
%0 = migraphx.dot(%arg0, %arg1) : tensor<1x5x4xf32>, tensor<1x4x3xf32> -> tensor<1x5x3xf32>
%1 = migraphx.add(%0, %arg2) : (tensor<1x5x3xf32>, tensor<1x5x3xf32>) -> tensor<1x5x3xf32>
return %1 : tensor<1x5x3xf32>
}
}
)__migraphx__"
;
migraphx
::
module
m
;
auto
arg0
=
m
.
add_parameter
(
"arg0"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
5
,
4
}});
auto
arg1
=
m
.
add_parameter
(
"arg1"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
4
,
3
}});
auto
arg2
=
m
.
add_parameter
(
"arg2"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
5
,
3
}});
auto
conv
=
m
.
add_instruction
(
migraphx
::
make_op
(
"dot"
),
arg0
,
arg1
);
auto
add
=
m
.
add_instruction
(
migraphx
::
make_op
(
"add"
),
conv
,
arg2
);
m
.
add_return
({
add
});
auto
s
=
migraphx
::
gpu
::
dump_mlir
(
m
);
// Skip test if MLIR is not enabled
if
(
s
.
empty
())
return
;
CHECK
(
encode
(
s
)
==
encode
(
mlir_output
));
EXPECT
(
verify_mlir
(
m
));
}
TEST_CASE
(
conv_int8_dequantize_quantize
)
{
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
func.func @main(%arg0: tensor<2x8x3x3xi8>, %arg1: tensor<1x8x4x4xi8>, %arg2: tensor<1x2x2x2xf32>, %arg3: tensor<1x2x2x2xi32>) -> tensor<1x2x2x2xi32> attributes {arch = "", kernel = "mixr"} {
%0 = migraphx.quant_convolution(%arg1, %arg0) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xi8>, tensor<2x8x3x3xi8>) -> tensor<1x2x2x2xi32>
%1 = migraphx.dequantizelinear(%0, %arg2, %arg3) : (tensor<1x2x2x2xi32>, tensor<1x2x2x2xf32>, tensor<1x2x2x2xi32>) -> tensor<1x2x2x2xf32>
%2 = migraphx.quantizelinear(%1, %arg2, %arg3) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>, tensor<1x2x2x2xi32>) -> tensor<1x2x2x2xi32>
return %2 : tensor<1x2x2x2xi32>
}
}
)__migraphx__"
;
migraphx
::
module
m
;
auto
x
=
m
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
int8_type
,
{
1
,
8
,
4
,
4
}});
auto
w
=
m
.
add_parameter
(
"w"
,
{
migraphx
::
shape
::
int8_type
,
{
2
,
8
,
3
,
3
}});
auto
conv
=
m
.
add_instruction
(
migraphx
::
make_op
(
"quant_convolution"
),
x
,
w
);
migraphx
::
shape
ss
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
2
,
2
}};
migraphx
::
shape
sz
{
migraphx
::
shape
::
int32_type
,
{
1
,
2
,
2
,
2
}};
auto
input2
=
m
.
add_parameter
(
"x_scale"
,
ss
);
auto
input3
=
m
.
add_parameter
(
"x_zero_point"
,
sz
);
auto
dequant
=
m
.
add_instruction
(
migraphx
::
make_op
(
"dequantizelinear"
),
conv
,
input2
,
input3
);
auto
r
=
m
.
add_instruction
(
migraphx
::
make_op
(
"quantizelinear"
),
dequant
,
input2
,
input3
);
m
.
add_return
({
r
});
auto
s
=
migraphx
::
gpu
::
dump_mlir
(
m
);
// Skip test if MLIR is not enabled
if
(
s
.
empty
())
return
;
CHECK
(
encode
(
s
)
==
encode
(
mlir_output
));
EXPECT
(
verify_mlir
(
m
));
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/gpu/quantization.cpp
View file @
baac1dab
...
...
@@ -23,12 +23,12 @@
*/
#include <iostream>
#include <vector>
#include <migraphx/gpu/fuse_mlir.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/quantization.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/ref/target.hpp>
#include <migraphx/gpu/target.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/verify.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/propagate_constant.hpp>
...
...
@@ -39,8 +39,8 @@
TEST_CASE
(
gpu_target_copy
)
{
migraphx
::
target
gpu_t
=
migraphx
::
gpu
::
target
{}
;
migraphx
::
target
ref_t
=
migraphx
::
ref
::
target
{}
;
migraphx
::
target
gpu_t
=
migraphx
::
make_
target
(
"gpu"
)
;
migraphx
::
target
ref_t
=
migraphx
::
make_
target
(
"ref"
)
;
migraphx
::
shape
s
{
migraphx
::
shape
::
int8_type
,
{
2
,
3
,
4
,
5
}};
auto
ref_arg_orig
=
migraphx
::
generate_argument
(
s
,
0x123456L
);
...
...
@@ -104,13 +104,22 @@ TEST_CASE(int8_quantization)
m
[
"a"
]
=
migraphx
::
generate_argument
(
sa
);
m
[
"b"
]
=
migraphx
::
generate_argument
(
sb
);
std
::
vector
<
float
>
ref_result
;
migraphx
::
target
ref_t
=
migraphx
::
ref
::
target
{}
;
migraphx
::
target
ref_t
=
migraphx
::
make_
target
(
"ref"
)
;
run_prog
(
p
,
ref_t
,
m
,
ref_result
);
std
::
vector
<
float
>
gpu_result
;
migraphx
::
target
gpu_t
=
migraphx
::
gpu
::
target
{}
;
migraphx
::
target
gpu_t
=
migraphx
::
make_
target
(
"gpu"
)
;
run_prog
(
p
,
gpu_t
,
m
,
gpu_result
);
// Note: the tolerance for mlir_enabled result is temporarily bumped
// higher because the lowering pipeline between mlir fallback and
// regular non-mlir pipeline diverged. MLIR fallback uses the
// rewrite_quantization at the very end of the pipeline, whereas
// the regular pipeline uses the rewrite_quantization in the much
// earlier stage.
if
(
migraphx
::
gpu
::
mlir_enabled
())
EXPECT
(
migraphx
::
verify_range
(
ref_result
,
gpu_result
,
1e5
));
else
EXPECT
(
migraphx
::
verify_range
(
ref_result
,
gpu_result
));
}
}
...
...
test/gpu/stream_sync.cpp
View file @
baac1dab
...
...
@@ -24,6 +24,7 @@
#include <iostream>
#include <vector>
#include <migraphx/register_target.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/context.hpp>
#include <migraphx/gpu/compile_hip.hpp>
...
...
@@ -133,7 +134,7 @@ TEST_CASE(test_stream_sync)
auto
mult_out
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"dot"
),
x
,
y
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
mult_out
,
test_val
);
p
.
compile
(
migraphx
::
gpu
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"gpu"
)
);
// Run network and then verify with kernel
auto
args
=
p
.
eval
({{
"x"
,
ginput
},
{
"output"
,
goutput
}},
{
pstream
.
get
(),
true
});
...
...
test/include/pointwise.hpp
View file @
baac1dab
...
...
@@ -30,12 +30,12 @@
template
<
class
F
>
migraphx
::
instruction_ref
add_pointwise
(
migraphx
::
program
&
p
,
migraphx
::
module_ref
mm
,
const
std
::
string
&
name
,
std
::
vector
<
migraphx
::
instruction_ref
>
inputs
,
F
f
)
{
auto
*
pm
=
p
.
create_module
(
name
);
auto
*
mm
=
p
.
get_main_module
();
pm
->
set_bypass
();
std
::
vector
<
migraphx
::
instruction_ref
>
params
;
std
::
transform
(
inputs
.
begin
(),
inputs
.
end
(),
std
::
back_inserter
(
params
),
[
&
](
auto
input
)
{
...
...
@@ -47,6 +47,15 @@ migraphx::instruction_ref add_pointwise(migraphx::program& p,
return
mm
->
add_instruction
(
migraphx
::
make_op
(
"pointwise"
),
inputs
,
{
pm
});
}
template
<
class
F
>
migraphx
::
instruction_ref
add_pointwise
(
migraphx
::
program
&
p
,
const
std
::
string
&
name
,
std
::
vector
<
migraphx
::
instruction_ref
>
inputs
,
F
f
)
{
return
add_pointwise
(
p
,
p
.
get_main_module
(),
name
,
inputs
,
f
);
}
inline
auto
single_pointwise
(
const
std
::
string
&
name
)
{
return
[
=
](
auto
*
pm
,
const
auto
&
inputs
)
{
...
...
test/marker.cpp
View file @
baac1dab
...
...
@@ -22,12 +22,11 @@
* THE SOFTWARE.
*/
#include <migraphx/program.hpp>
#include <migraphx/ref/target.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/marker.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/register_target.hpp>
#include "test.hpp"
struct
mock_marker
...
...
@@ -64,7 +63,7 @@ TEST_CASE(marker)
auto
one
=
mm
->
add_literal
(
1
);
auto
two
=
mm
->
add_literal
(
2
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
one
,
two
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
mock_marker
temp_marker
;
p
.
mark
({},
temp_marker
);
...
...
test/module_test.cpp
View file @
baac1dab
...
...
@@ -25,7 +25,7 @@
#include <migraphx/iterator_for.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/re
f/
target.hpp>
#include <migraphx/re
gister_
target.hpp>
#include <migraphx/ranges.hpp>
#include <sstream>
#include "test.hpp"
...
...
test/onnx/.onnxrt-commit
View file @
baac1dab
c9a53c925510a101f5ca94d5ecda0924e40a8463
5a43828b3d73028bfd33b3856f82698d9ab02cb1
test/onnx/concat_dyn_test.onnx
0 → 100644
View file @
baac1dab
File added
test/onnx/gen_onnx.py
View file @
baac1dab
#####################################################################################
# The MIT License (MIT)
#
# Copyright (c) 2015-202
2
Advanced Micro Devices, Inc. All rights reserved.
# Copyright (c) 2015-202
3
Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
...
...
@@ -761,6 +761,22 @@ def concat_test():
return
([
node
],
[
x
,
y
],
[
z
])
@
onnx_test
()
def
concat_dyn_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
None
,
None
,
3
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
None
,
None
,
3
])
z
=
helper
.
make_tensor_value_info
(
'2'
,
TensorProto
.
FLOAT
,
[
None
,
None
,
3
])
node
=
onnx
.
helper
.
make_node
(
'Concat'
,
inputs
=
[
'0'
,
'1'
],
axis
=
0
,
outputs
=
[
'2'
],
)
return
([
node
],
[
x
,
y
],
[
z
])
@
onnx_test
()
def
constant_test
():
x
=
np
.
array
([
0
,
1
,
2
])
...
...
@@ -6170,6 +6186,132 @@ def slice_test():
return
([
node
],
[
x
],
[
y
])
@
onnx_test
()
def
slice_dyn_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
None
,
None
,
2
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
None
,
None
,
2
])
node
=
onnx
.
helper
.
make_node
(
'Slice'
,
inputs
=
[
'0'
],
axes
=
[
0
],
starts
=
[
1
],
ends
=
[
2
],
outputs
=
[
'1'
])
return
([
node
],
[
x
],
[
y
])
@
onnx_test
def
slice_step_dyn_test
():
# A slice command with non - default steps will have a "Step"
# instruction added in parsing.
step
=
np
.
array
([
2
,
1
])
step_tensor
=
helper
.
make_tensor
(
name
=
"step"
,
data_type
=
TensorProto
.
INT32
,
dims
=
step
.
shape
,
vals
=
step
.
astype
(
int
))
arg_step
=
helper
.
make_node
(
"Constant"
,
inputs
=
[],
outputs
=
[
'arg_step'
],
value
=
step_tensor
)
axis
=
np
.
array
([
-
1
,
-
2
])
axis_tensor
=
helper
.
make_tensor
(
name
=
"axis"
,
data_type
=
TensorProto
.
INT32
,
dims
=
axis
.
shape
,
vals
=
axis
.
astype
(
int
))
arg_axis
=
helper
.
make_node
(
"Constant"
,
inputs
=
[],
outputs
=
[
'arg_axis'
],
value
=
axis_tensor
)
end
=
np
.
array
([
-
1
,
-
1
])
end_tensor
=
helper
.
make_tensor
(
name
=
"end"
,
data_type
=
TensorProto
.
INT32
,
dims
=
end
.
shape
,
vals
=
end
.
astype
(
int
))
arg_end
=
helper
.
make_node
(
"Constant"
,
inputs
=
[],
outputs
=
[
'arg_end'
],
value
=
end_tensor
)
start
=
np
.
array
([
-
5
,
-
3
])
start_tensor
=
helper
.
make_tensor
(
name
=
"start"
,
data_type
=
TensorProto
.
INT32
,
dims
=
start
.
shape
,
vals
=
start
.
astype
(
int
))
arg_start
=
helper
.
make_node
(
"Constant"
,
inputs
=
[],
outputs
=
[
'arg_start'
],
value
=
start_tensor
)
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
None
,
5
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
None
,
2
])
node
=
onnx
.
helper
.
make_node
(
'Slice'
,
inputs
=
[
'0'
,
'arg_start'
,
'arg_end'
,
'arg_axis'
,
'arg_step'
],
outputs
=
[
'1'
])
return
([
arg_step
,
arg_axis
,
arg_end
,
arg_start
,
node
],
[
x
],
[
y
])
@
onnx_test
def
slice_reverse_dyn_test
():
# A slice command with negative step on any axis will have
# a "Reverse" instruction added in parsing.
step
=
np
.
array
([
-
1
,
1
])
step_tensor
=
helper
.
make_tensor
(
name
=
"step"
,
data_type
=
TensorProto
.
INT32
,
dims
=
step
.
shape
,
vals
=
step
.
astype
(
int
))
arg_step
=
helper
.
make_node
(
"Constant"
,
inputs
=
[],
outputs
=
[
'arg_step'
],
value
=
step_tensor
)
axis
=
np
.
array
([
-
1
,
-
2
])
axis_tensor
=
helper
.
make_tensor
(
name
=
"axis"
,
data_type
=
TensorProto
.
INT32
,
dims
=
axis
.
shape
,
vals
=
axis
.
astype
(
int
))
arg_axis
=
helper
.
make_node
(
"Constant"
,
inputs
=
[],
outputs
=
[
'arg_axis'
],
value
=
axis_tensor
)
end
=
np
.
array
([
-
1
,
-
1
])
end_tensor
=
helper
.
make_tensor
(
name
=
"end"
,
data_type
=
TensorProto
.
INT32
,
dims
=
end
.
shape
,
vals
=
end
.
astype
(
int
))
arg_end
=
helper
.
make_node
(
"Constant"
,
inputs
=
[],
outputs
=
[
'arg_end'
],
value
=
end_tensor
)
start
=
np
.
array
([
-
5
,
-
3
])
start_tensor
=
helper
.
make_tensor
(
name
=
"start"
,
data_type
=
TensorProto
.
INT32
,
dims
=
start
.
shape
,
vals
=
start
.
astype
(
int
))
arg_start
=
helper
.
make_node
(
"Constant"
,
inputs
=
[],
outputs
=
[
'arg_start'
],
value
=
start_tensor
)
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
None
,
5
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
None
,
2
])
node
=
onnx
.
helper
.
make_node
(
'Slice'
,
inputs
=
[
'0'
,
'arg_start'
,
'arg_end'
,
'arg_axis'
,
'arg_step'
],
outputs
=
[
'1'
])
return
([
arg_step
,
arg_axis
,
arg_end
,
arg_start
,
node
],
[
x
],
[
y
])
@
onnx_test
()
def
slice_3arg_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
5
,
5
])
...
...
@@ -7290,3 +7432,32 @@ def where_test():
outputs
=
[
'z'
])
return
([
node
],
[
c
,
x
,
y
],
[
z
])
@
onnx_test
()
def
where_dyn_test
():
c
=
helper
.
make_tensor_value_info
(
'c'
,
TensorProto
.
BOOL
,
[
None
,
2
,
2
])
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
None
,
2
,
2
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
None
,
2
,
2
])
z
=
helper
.
make_tensor_value_info
(
'z'
,
TensorProto
.
FLOAT
,
[
None
,
2
,
2
])
node
=
onnx
.
helper
.
make_node
(
'Where'
,
inputs
=
[
'c'
,
'x'
,
'y'
],
outputs
=
[
'z'
])
return
([
node
],
[
c
,
x
,
y
],
[
z
])
@
onnx_test
()
def
where_mixed_test
():
# mixture of static and dynamic input shapes is not supported
c
=
helper
.
make_tensor_value_info
(
'c'
,
TensorProto
.
BOOL
,
[
None
,
2
,
2
])
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
None
,
2
,
2
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
3
,
2
,
2
])
z
=
helper
.
make_tensor_value_info
(
'z'
,
TensorProto
.
FLOAT
,
[
None
,
2
,
2
])
node
=
onnx
.
helper
.
make_node
(
'Where'
,
inputs
=
[
'c'
,
'x'
,
'y'
],
outputs
=
[
'z'
])
return
([
node
],
[
c
,
x
,
y
],
[
z
])
test/onnx/onnx_test.cpp
View file @
baac1dab
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-202
2
Advanced Micro Devices, Inc. All rights reserved.
* Copyright (c) 2015-202
3
Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
...
...
@@ -186,14 +186,13 @@ TEST_CASE(argmax_dyn_test)
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
4
,
4
,
0
},
{
5
,
5
,
0
},
{
6
,
6
,
0
}}});
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
4
,
4
},
{
5
,
5
},
{
6
,
6
}}});
auto
ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"argmax"
,
{{
"axis"
,
2
}}),
l0
);
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
2
}}}),
ins
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
options
.
default_dyn_dim_value
=
{
1
,
4
};
auto
prog
=
parse_onnx
(
"argmax_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
...
...
@@ -296,8 +295,7 @@ TEST_CASE(averagepool_dyn_test)
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"0"
,
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
5
,
5
,
0
},
{
5
,
5
,
0
},
{
5
,
5
,
0
}}});
"0"
,
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
3
,
3
},
{
5
,
5
},
{
5
,
5
},
{
5
,
5
}}});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
average
},
{
"padding"
,
{
0
,
0
,
0
,
0
,
0
,
0
}},
...
...
@@ -307,7 +305,7 @@ TEST_CASE(averagepool_dyn_test)
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
options
.
default_dyn_dim_value
=
{
1
,
4
};
auto
prog
=
migraphx
::
parse_onnx
(
"averagepool_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
...
...
@@ -315,7 +313,7 @@ TEST_CASE(averagepool_dyn_test)
TEST_CASE
(
averagepool_dyn_autopad_error_test
)
{
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
options
.
default_dyn_dim_value
=
{
1
,
4
};
EXPECT
(
test
::
throws
(
[
&
]
{
migraphx
::
parse_onnx
(
"averagepool_dyn_autopad_error_test.onnx"
,
options
);
}));
}
...
...
@@ -323,7 +321,7 @@ TEST_CASE(averagepool_dyn_autopad_error_test)
TEST_CASE
(
averagepool_dyn_asym_padding_error_test
)
{
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
options
.
default_dyn_dim_value
=
{
1
,
4
};
EXPECT
(
test
::
throws
(
[
&
]
{
migraphx
::
parse_onnx
(
"averagepool_dyn_asym_padding_error_test.onnx"
,
options
);
}));
}
...
...
@@ -331,7 +329,7 @@ TEST_CASE(averagepool_dyn_asym_padding_error_test)
TEST_CASE
(
averagepool_dyn_cip_error_test
)
{
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
options
.
default_dyn_dim_value
=
{
1
,
4
};
EXPECT
(
test
::
throws
(
[
&
]
{
migraphx
::
parse_onnx
(
"averagepool_dyn_cip_error_test.onnx"
,
options
);
}));
}
...
...
@@ -589,15 +587,14 @@ TEST_CASE(binary_dyn_brcst_prelu_test)
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
4
,
4
,
0
},
{
5
,
5
,
0
}}});
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
3
,
3
},
{
4
,
4
},
{
5
,
5
}}});
auto
l1
=
mm
->
add_parameter
(
"1"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
,
5
}});
auto
ret
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"prelu"
),
{
l0
,
l1
});
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
options
.
default_dyn_dim_value
=
{
1
,
4
};
auto
prog
=
migraphx
::
parse_onnx
(
"binary_dyn_brcst_prelu_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
...
...
@@ -609,14 +606,13 @@ TEST_CASE(binary_dyn_brcst_add_test)
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
half_type
,
{
4
,
5
}});
auto
l1
=
mm
->
add_parameter
(
"1"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
4
,
4
,
0
},
{
5
,
5
,
0
}}});
"1"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
3
,
3
},
{
4
,
4
},
{
5
,
5
}}});
auto
ret
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"add"
),
{
l0
,
l1
});
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
options
.
default_dyn_dim_value
=
{
1
,
4
};
auto
prog
=
migraphx
::
parse_onnx
(
"binary_dyn_brcst_add_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
...
...
@@ -625,7 +621,7 @@ TEST_CASE(binary_dyn_brcst_add_test)
TEST_CASE
(
binary_dyn_brcst_attr_error_test
)
{
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
options
.
default_dyn_dim_value
=
{
1
,
4
};
EXPECT
(
test
::
throws
(
[
&
]
{
migraphx
::
parse_onnx
(
"binary_dyn_brcst_attr_error_test.onnx"
,
options
);
}));
}
...
...
@@ -635,8 +631,7 @@ TEST_CASE(binary_dyn_brcst_mul_test)
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
4
,
4
,
0
},
{
5
,
5
,
0
}}});
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
3
,
3
},
{
4
,
4
},
{
5
,
5
}}});
auto
l1
=
mm
->
add_parameter
(
"1"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
,
1
}});
auto
bl1
=
mm
->
add_instruction
(
...
...
@@ -648,7 +643,7 @@ TEST_CASE(binary_dyn_brcst_mul_test)
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
options
.
default_dyn_dim_value
=
{
1
,
4
};
auto
prog
=
migraphx
::
parse_onnx
(
"binary_dyn_brcst_mul_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
...
...
@@ -840,6 +835,25 @@ TEST_CASE(concat_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
concat_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
1
,
4
},
{
3
,
3
}}});
auto
l1
=
mm
->
add_parameter
(
"1"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
1
,
4
},
{
3
,
3
}}});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"concat"
),
l0
,
l1
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
};
auto
prog
=
parse_onnx
(
"concat_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
constant_test
)
{
migraphx
::
program
p
;
...
...
@@ -1101,8 +1115,8 @@ TEST_CASE(conv_dynamic_batch_test)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"0"
,
{
migraphx
::
shape
::
float_type
,
{{
1
,
6
,
0
},
{
3
,
3
,
0
},
{
5
,
5
,
0
},
{
5
,
5
,
0
}}});
auto
l0
=
mm
->
add_parameter
(
"0"
,
{
migraphx
::
shape
::
float_type
,
{{
1
,
6
},
{
3
,
3
},
{
5
,
5
},
{
5
,
5
}}});
auto
l1
=
mm
->
add_parameter
(
"1"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
3
,
3
}});
auto
c0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"convolution"
,
...
...
@@ -1112,7 +1126,7 @@ TEST_CASE(conv_dynamic_batch_test)
mm
->
add_return
({
c0
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
6
,
0
};
options
.
default_dyn_dim_value
=
{
1
,
6
};
auto
prog
=
migraphx
::
parse_onnx
(
"conv_dynamic_batch_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
...
...
@@ -1122,8 +1136,8 @@ TEST_CASE(conv_dynamic_bias_test)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
x0
=
mm
->
add_parameter
(
"0"
,
{
migraphx
::
shape
::
float_type
,
{{
1
,
6
,
0
},
{
3
,
3
,
0
},
{
32
,
32
,
0
},
{
32
,
32
,
0
}}});
auto
x0
=
mm
->
add_parameter
(
"0"
,
{
migraphx
::
shape
::
float_type
,
{{
1
,
6
},
{
3
,
3
},
{
32
,
32
},
{
32
,
32
}}});
auto
x1
=
mm
->
add_parameter
(
"1"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
5
,
5
}});
auto
x2
=
mm
->
add_parameter
(
"2"
,
{
migraphx
::
shape
::
float_type
,
{
1
}});
auto
x3
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"convolution"
),
x0
,
x1
);
...
...
@@ -1132,7 +1146,7 @@ TEST_CASE(conv_dynamic_bias_test)
mm
->
add_return
({
x5
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
6
,
0
};
options
.
default_dyn_dim_value
=
{
1
,
6
};
auto
prog
=
migraphx
::
parse_onnx
(
"conv_dynamic_bias_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
...
...
@@ -1141,8 +1155,8 @@ TEST_CASE(conv_dynamic_img_test)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"0"
,
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
,
0
},
{
3
,
3
,
0
},
{
5
,
10
,
0
},
{
5
,
10
,
0
}}});
auto
l0
=
mm
->
add_parameter
(
"0"
,
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
},
{
3
,
3
},
{
5
,
10
},
{
5
,
10
}}});
auto
l1
=
mm
->
add_parameter
(
"1"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
3
,
3
}});
auto
c0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"convolution"
,
...
...
@@ -1152,7 +1166,7 @@ TEST_CASE(conv_dynamic_img_test)
mm
->
add_return
({
c0
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
5
,
10
,
0
};
options
.
default_dyn_dim_value
=
{
5
,
10
};
auto
prog
=
migraphx
::
parse_onnx
(
"conv_dynamic_img_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
...
...
@@ -1163,8 +1177,8 @@ TEST_CASE(conv_dynamic_weights_test)
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"0"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
5
,
5
}});
auto
l1
=
mm
->
add_parameter
(
"1"
,
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
,
0
},
{
3
,
3
,
0
},
{
2
,
4
,
0
},
{
2
,
4
,
0
}}});
auto
l1
=
mm
->
add_parameter
(
"1"
,
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
},
{
3
,
3
},
{
2
,
4
},
{
2
,
4
}}});
auto
c0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"convolution"
,
{{
"padding"
,
{
0
,
0
}},
{
"stride"
,
{
1
,
1
}},
{
"dilation"
,
{
1
,
1
}}}),
...
...
@@ -1173,7 +1187,7 @@ TEST_CASE(conv_dynamic_weights_test)
mm
->
add_return
({
c0
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
2
,
4
,
0
};
options
.
default_dyn_dim_value
=
{
2
,
4
};
auto
prog
=
migraphx
::
parse_onnx
(
"conv_dynamic_weights_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
...
...
@@ -1183,10 +1197,10 @@ TEST_CASE(conv_dynamic_img_and_weights_test)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"0"
,
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
,
0
},
{
3
,
3
,
0
},
{
5
,
10
,
0
},
{
5
,
10
,
0
}}});
auto
l1
=
mm
->
add_parameter
(
"1"
,
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
,
0
},
{
3
,
3
,
0
},
{
2
,
4
,
0
},
{
2
,
4
,
0
}}});
auto
l0
=
mm
->
add_parameter
(
"0"
,
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
},
{
3
,
3
},
{
5
,
10
},
{
5
,
10
}}});
auto
l1
=
mm
->
add_parameter
(
"1"
,
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
},
{
3
,
3
},
{
2
,
4
},
{
2
,
4
}}});
auto
c0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"convolution"
,
{{
"padding"
,
{
0
,
0
}},
{
"stride"
,
{
1
,
1
}},
{
"dilation"
,
{
1
,
1
}}}),
...
...
@@ -1195,8 +1209,8 @@ TEST_CASE(conv_dynamic_img_and_weights_test)
mm
->
add_return
({
c0
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
5
,
10
,
0
};
options
.
map_dyn_input_dims
[
"1"
]
=
{{
1
,
1
,
0
},
{
3
,
3
,
0
},
{
2
,
4
,
0
},
{
2
,
4
,
0
}};
options
.
default_dyn_dim_value
=
{
5
,
10
};
options
.
map_dyn_input_dims
[
"1"
]
=
{{
1
,
1
},
{
3
,
3
},
{
2
,
4
},
{
2
,
4
}};
auto
prog
=
migraphx
::
parse_onnx
(
"conv_dynamic_img_and_weights_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
...
...
@@ -1206,8 +1220,8 @@ TEST_CASE(conv_dynamic_batch_same_upper)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"0"
,
{
migraphx
::
shape
::
float_type
,
{{
1
,
10
,
0
},
{
3
,
3
,
0
},
{
5
,
5
,
0
},
{
5
,
5
,
0
}}});
auto
l0
=
mm
->
add_parameter
(
"0"
,
{
migraphx
::
shape
::
float_type
,
{{
1
,
10
},
{
3
,
3
},
{
5
,
5
},
{
5
,
5
}}});
auto
l1
=
mm
->
add_parameter
(
"1"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
3
,
3
}});
auto
c0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"convolution"
,
...
...
@@ -1217,7 +1231,7 @@ TEST_CASE(conv_dynamic_batch_same_upper)
mm
->
add_return
({
c0
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
10
,
0
};
options
.
default_dyn_dim_value
=
{
1
,
10
};
auto
prog
=
migraphx
::
parse_onnx
(
"conv_dynamic_batch_same_upper_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
...
...
@@ -1227,8 +1241,8 @@ TEST_CASE(conv_dynamic_img_same_upper)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"0"
,
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
,
0
},
{
3
,
3
,
0
},
{
5
,
10
,
0
},
{
5
,
10
,
0
}}});
auto
l0
=
mm
->
add_parameter
(
"0"
,
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
},
{
3
,
3
},
{
5
,
10
},
{
5
,
10
}}});
auto
l1
=
mm
->
add_parameter
(
"1"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
3
,
3
}});
auto
c0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"convolution"
,
...
...
@@ -1241,7 +1255,7 @@ TEST_CASE(conv_dynamic_img_same_upper)
mm
->
add_return
({
c0
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
5
,
10
,
0
};
options
.
default_dyn_dim_value
=
{
5
,
10
};
auto
prog
=
migraphx
::
parse_onnx
(
"conv_dynamic_img_same_upper_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
...
...
@@ -1252,8 +1266,8 @@ TEST_CASE(conv_dynamic_kernel_same_lower)
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"0"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
5
,
5
}});
auto
l1
=
mm
->
add_parameter
(
"1"
,
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
,
0
},
{
3
,
3
,
0
},
{
2
,
4
,
0
},
{
2
,
4
,
0
}}});
auto
l1
=
mm
->
add_parameter
(
"1"
,
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
},
{
3
,
3
},
{
2
,
4
},
{
2
,
4
}}});
auto
c0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"convolution"
,
{{
"padding"
,
{
0
,
0
}},
...
...
@@ -1265,7 +1279,7 @@ TEST_CASE(conv_dynamic_kernel_same_lower)
mm
->
add_return
({
c0
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
2
,
4
,
0
};
options
.
default_dyn_dim_value
=
{
2
,
4
};
auto
prog
=
migraphx
::
parse_onnx
(
"conv_dynamic_kernel_same_lower_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
...
...
@@ -2011,14 +2025,13 @@ TEST_CASE(flatten_dyn_test)
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
4
,
4
,
0
},
{
5
,
5
,
0
}}});
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
3
,
3
},
{
4
,
4
},
{
5
,
5
}}});
auto
c0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
l0
);
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"flatten"
,
{{
"axis"
,
2
}}),
c0
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
options
.
default_dyn_dim_value
=
{
1
,
4
};
auto
prog
=
parse_onnx
(
"flatten_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
...
...
@@ -2068,11 +2081,9 @@ TEST_CASE(gather_dyn_test)
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"data"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
4
,
4
,
0
},
{
5
,
5
,
0
},
{
6
,
6
,
0
}}});
"data"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
4
,
4
},
{
5
,
5
},
{
6
,
6
}}});
auto
l1
=
mm
->
add_parameter
(
"indices"
,
migraphx
::
shape
{
migraphx
::
shape
::
int32_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
4
,
4
,
0
},
{
5
,
5
,
0
}}});
"indices"
,
migraphx
::
shape
{
migraphx
::
shape
::
int32_type
,
{{
1
,
4
},
{
3
,
3
},
{
4
,
4
},
{
5
,
5
}}});
auto
cont_l0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
l0
);
auto
cont_l1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
l1
);
...
...
@@ -2082,7 +2093,7 @@ TEST_CASE(gather_dyn_test)
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
options
.
default_dyn_dim_value
=
{
1
,
4
};
auto
prog
=
parse_onnx
(
"gather_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
...
...
@@ -2162,15 +2173,15 @@ TEST_CASE(gathernd_dyn_test)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"data"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
2
,
4
,
2
},
{
2
,
4
}}});
auto
l0
=
mm
->
add_parameter
(
"data"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
2
,
4
,
{
2
}
},
{
2
,
4
}}});
auto
l1
=
mm
->
add_parameter
(
"indices"
,
migraphx
::
shape
{
migraphx
::
shape
::
int64_type
,
{{
1
,
3
},
{
2
,
2
}}});
auto
r
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"gathernd"
),
l0
,
l1
);
mm
->
add_return
({
r
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"data"
]
=
{{
2
,
4
,
2
},
{
2
,
4
}};
options
.
map_dyn_input_dims
[
"data"
]
=
{{
2
,
4
,
{
2
}
},
{
2
,
4
}};
options
.
map_dyn_input_dims
[
"indices"
]
=
{{
1
,
3
},
{
2
,
2
}};
auto
prog
=
migraphx
::
parse_onnx
(
"gathernd_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
...
...
@@ -2299,9 +2310,9 @@ TEST_CASE(gemm_dyn_inner_test)
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"A"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
10
,
8
},
{
6
,
6
,
0
}}});
"A"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
10
,
{
8
}
},
{
6
,
6
}}});
auto
l1
=
mm
->
add_parameter
(
"B"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
10
,
8
},
{
7
,
7
,
0
}}});
"B"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
10
,
{
8
}
},
{
7
,
7
}}});
auto
alpha
=
0.5
f
;
auto
a_l
=
mm
->
add_literal
(
alpha
);
auto
t_a
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"mul"
),
{
a_l
,
l0
});
...
...
@@ -2310,7 +2321,7 @@ TEST_CASE(gemm_dyn_inner_test)
mm
->
add_return
({
dot
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
10
,
8
};
options
.
default_dyn_dim_value
=
{
1
,
10
,
{
8
}
};
auto
prog
=
migraphx
::
parse_onnx
(
"gemm_dyn_inner_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
...
...
@@ -2320,7 +2331,7 @@ TEST_CASE(gemm_dyn_outer_test)
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"A"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
5
,
5
,
0
},
{
5
,
10
,
7
}}});
"A"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
5
,
5
},
{
5
,
10
,
{
7
}
}}});
auto
l1
=
mm
->
add_parameter
(
"B"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
11
,
5
}});
auto
alpha
=
2.
f
;
auto
a_l
=
mm
->
add_literal
(
alpha
);
...
...
@@ -2331,7 +2342,7 @@ TEST_CASE(gemm_dyn_outer_test)
mm
->
add_return
({
dot
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
5
,
10
,
7
};
options
.
default_dyn_dim_value
=
{
5
,
10
,
{
7
}
};
auto
prog
=
migraphx
::
parse_onnx
(
"gemm_dyn_outer_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
...
...
@@ -2382,10 +2393,8 @@ TEST_CASE(globalavgpool_dyn_test)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
input
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
16
,
16
,
0
},
{
16
,
16
,
0
}}});
auto
input
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
3
,
3
},
{
16
,
16
},
{
16
,
16
}}});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
average
},
{
"lengths"
,
{
16
,
16
}},
...
...
@@ -2394,7 +2403,7 @@ TEST_CASE(globalavgpool_dyn_test)
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
options
.
default_dyn_dim_value
=
{
1
,
4
};
auto
prog
=
parse_onnx
(
"globalavgpool_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
...
...
@@ -2421,10 +2430,8 @@ TEST_CASE(globallppool_dyn_test)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
input
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
,
0
},
{
3
,
3
,
0
},
{
16
,
32
,
0
},
{
16
,
32
,
0
}}});
auto
input
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
},
{
3
,
3
},
{
16
,
32
},
{
16
,
32
}}});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
lpnorm
},
{
"dyn_global"
,
true
},
...
...
@@ -2434,7 +2441,7 @@ TEST_CASE(globallppool_dyn_test)
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
16
,
32
,
0
};
options
.
default_dyn_dim_value
=
{
16
,
32
};
auto
prog
=
migraphx
::
parse_onnx
(
"globallppool_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
...
...
@@ -2461,10 +2468,8 @@ TEST_CASE(globalmaxpool_dyn_test)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
input
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
32
,
32
,
0
},
{
32
,
32
,
0
}}});
auto
input
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
3
,
3
},
{
32
,
32
},
{
32
,
32
}}});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{
"lengths"
,
{
32
,
32
}},
...
...
@@ -2473,7 +2478,7 @@ TEST_CASE(globalmaxpool_dyn_test)
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
options
.
default_dyn_dim_value
=
{
1
,
4
};
auto
prog
=
parse_onnx
(
"globalmaxpool_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
...
...
@@ -3672,16 +3677,16 @@ TEST_CASE(matmul_dyn_mm_test)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"1"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
4
,
8
,
6
},
{
7
,
7
,
0
}}});
auto
l1
=
mm
->
add_parameter
(
"2"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
7
,
7
,
0
},
{
1
,
5
,
3
}}});
auto
l0
=
mm
->
add_parameter
(
"1"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
4
,
8
,
{
6
}
},
{
7
,
7
}}});
auto
l1
=
mm
->
add_parameter
(
"2"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
7
,
7
},
{
1
,
5
,
{
3
}
}}});
auto
ret
=
migraphx
::
add_apply_alpha_beta
(
*
mm
,
{
l0
,
l1
},
migraphx
::
make_op
(
"dot"
),
1.0
f
,
0.0
f
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"1"
]
=
{{
4
,
8
,
6
},
{
7
,
7
,
0
}};
options
.
map_dyn_input_dims
[
"2"
]
=
{{
7
,
7
,
0
},
{
1
,
5
,
3
}};
options
.
map_dyn_input_dims
[
"1"
]
=
{{
4
,
8
,
{
6
}
},
{
7
,
7
}};
options
.
map_dyn_input_dims
[
"2"
]
=
{{
7
,
7
},
{
1
,
5
,
{
3
}
}};
auto
prog
=
parse_onnx
(
"matmul_dyn_mm_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
...
...
@@ -3691,8 +3696,8 @@ TEST_CASE(matmul_dyn_mv_test)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"1"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
4
,
8
,
6
},
{
7
,
7
,
0
}}});
auto
l0
=
mm
->
add_parameter
(
"1"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
4
,
8
,
{
6
}
},
{
7
,
7
}}});
auto
l1
=
mm
->
add_parameter
(
"2"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
7
}});
auto
sl1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
}}}),
l1
);
auto
res
=
migraphx
::
add_apply_alpha_beta
(
*
mm
,
{
l0
,
sl1
},
migraphx
::
make_op
(
"dot"
),
1.0
f
,
0.0
f
);
...
...
@@ -3700,7 +3705,7 @@ TEST_CASE(matmul_dyn_mv_test)
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"1"
]
=
{{
4
,
8
,
6
},
{
7
,
7
,
0
}};
options
.
map_dyn_input_dims
[
"1"
]
=
{{
4
,
8
,
{
6
}
},
{
7
,
7
}};
auto
prog
=
parse_onnx
(
"matmul_dyn_mv_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
...
...
@@ -3712,14 +3717,14 @@ TEST_CASE(matmul_dyn_vm_test)
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"1"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
7
}});
auto
l1
=
mm
->
add_parameter
(
"2"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
7
,
7
,
0
},
{
4
,
10
,
8
}}});
"2"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
7
,
7
},
{
4
,
10
,
{
8
}
}}});
auto
sl0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
0
}}}),
l0
);
auto
res
=
migraphx
::
add_apply_alpha_beta
(
*
mm
,
{
sl0
,
l1
},
migraphx
::
make_op
(
"dot"
),
1.0
f
,
0.0
f
);
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
0
}}}),
res
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"2"
]
=
{{
7
,
7
,
0
},
{
4
,
10
,
8
}};
options
.
map_dyn_input_dims
[
"2"
]
=
{{
7
,
7
},
{
4
,
10
,
{
8
}
}};
auto
prog
=
parse_onnx
(
"matmul_dyn_vm_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
...
...
@@ -3729,7 +3734,7 @@ TEST_CASE(matmul_dyn_vv_test)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
::
dynamic_dimension
dd
{
5
,
8
,
7
};
migraphx
::
shape
::
dynamic_dimension
dd
{
5
,
8
,
{
7
}
};
auto
l0
=
mm
->
add_parameter
(
"1"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
dd
}});
auto
l1
=
mm
->
add_parameter
(
"2"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
dd
}});
auto
sl0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
0
}}}),
l0
);
...
...
@@ -3750,7 +3755,7 @@ TEST_CASE(matmul_dyn_vv_test)
TEST_CASE
(
matmul_dyn_broadcast_error
)
{
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
options
.
default_dyn_dim_value
=
{
1
,
4
};
EXPECT
(
test
::
throws
([
&
]
{
migraphx
::
parse_onnx
(
"matmul_dyn_broadcast_error.onnx"
,
options
);
}));
}
...
...
@@ -3770,7 +3775,7 @@ TEST_CASE(matmulinteger_test)
TEST_CASE
(
matmulinteger_dyn_error
)
{
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
options
.
default_dyn_dim_value
=
{
1
,
4
};
EXPECT
(
test
::
throws
([
&
]
{
migraphx
::
parse_onnx
(
"matmulinteger_dyn_error.onnx"
,
options
);
}));
}
...
...
@@ -4079,13 +4084,13 @@ TEST_CASE(neg_dynamic_test)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
s
{
migraphx
::
shape
::
int64_type
,
{{
1
,
10
,
0
},
{
3
,
3
,
0
}}};
migraphx
::
shape
s
{
migraphx
::
shape
::
int64_type
,
{{
1
,
10
},
{
3
,
3
}}};
auto
input
=
mm
->
add_parameter
(
"0"
,
s
);
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"neg"
),
input
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
10
,
0
};
options
.
default_dyn_dim_value
=
{
1
,
10
};
auto
prog
=
migraphx
::
parse_onnx
(
"neg_dynamic_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
...
...
@@ -4121,9 +4126,9 @@ TEST_CASE(nms_dynamic_batch_test)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
sb
{
migraphx
::
shape
::
float_type
,
{{
1
,
10
,
0
},
{
6
,
6
,
0
},
{
4
,
4
,
0
}}};
migraphx
::
shape
sb
{
migraphx
::
shape
::
float_type
,
{{
1
,
10
},
{
6
,
6
},
{
4
,
4
}}};
auto
b
=
mm
->
add_parameter
(
"boxes"
,
sb
);
migraphx
::
shape
ss
{
migraphx
::
shape
::
float_type
,
{{
1
,
10
,
0
},
{
1
,
1
,
0
},
{
6
,
6
,
0
}}};
migraphx
::
shape
ss
{
migraphx
::
shape
::
float_type
,
{{
1
,
10
},
{
1
,
1
},
{
6
,
6
}}};
auto
s
=
mm
->
add_parameter
(
"scores"
,
ss
);
migraphx
::
shape
smo
{
migraphx
::
shape
::
int64_type
,
{
1
}};
auto
mo
=
mm
->
add_parameter
(
"max_output_boxes_per_class"
,
smo
);
...
...
@@ -4142,7 +4147,7 @@ TEST_CASE(nms_dynamic_batch_test)
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
10
,
0
};
options
.
default_dyn_dim_value
=
{
1
,
10
};
options
.
use_dyn_output
=
true
;
auto
prog
=
migraphx
::
parse_onnx
(
"nms_dynamic_batch_test.onnx"
,
options
);
...
...
@@ -4153,9 +4158,9 @@ TEST_CASE(nms_dynamic_boxes_test)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
sb
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
,
0
},
{
6
,
20
,
0
},
{
4
,
4
,
0
}}};
migraphx
::
shape
sb
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
},
{
6
,
20
},
{
4
,
4
}}};
auto
b
=
mm
->
add_parameter
(
"boxes"
,
sb
);
migraphx
::
shape
ss
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
,
0
},
{
1
,
1
,
0
},
{
6
,
20
,
0
}}};
migraphx
::
shape
ss
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
},
{
1
,
1
},
{
6
,
20
}}};
auto
s
=
mm
->
add_parameter
(
"scores"
,
ss
);
migraphx
::
shape
smo
{
migraphx
::
shape
::
int64_type
,
{
1
}};
auto
mo
=
mm
->
add_parameter
(
"max_output_boxes_per_class"
,
smo
);
...
...
@@ -4168,7 +4173,7 @@ TEST_CASE(nms_dynamic_boxes_test)
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
6
,
20
,
0
};
options
.
default_dyn_dim_value
=
{
6
,
20
};
options
.
use_dyn_output
=
true
;
auto
prog
=
migraphx
::
parse_onnx
(
"nms_dynamic_boxes_test.onnx"
,
options
);
...
...
@@ -4181,7 +4186,7 @@ TEST_CASE(nms_dynamic_classes_test)
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
sb
{
migraphx
::
shape
::
float_type
,
{
1
,
6
,
4
}};
auto
b
=
mm
->
add_parameter
(
"boxes"
,
sb
);
migraphx
::
shape
ss
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
,
0
},
{
1
,
10
,
0
},
{
6
,
6
,
0
}}};
migraphx
::
shape
ss
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
},
{
1
,
10
},
{
6
,
6
}}};
auto
s
=
mm
->
add_parameter
(
"scores"
,
ss
);
migraphx
::
shape
smo
{
migraphx
::
shape
::
int64_type
,
{
1
}};
auto
mo
=
mm
->
add_parameter
(
"max_output_boxes_per_class"
,
smo
);
...
...
@@ -4194,7 +4199,7 @@ TEST_CASE(nms_dynamic_classes_test)
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
10
,
0
};
options
.
default_dyn_dim_value
=
{
1
,
10
};
options
.
use_dyn_output
=
true
;
auto
prog
=
migraphx
::
parse_onnx
(
"nms_dynamic_classes_test.onnx"
,
options
);
...
...
@@ -4369,12 +4374,12 @@ TEST_CASE(pad_attr_dyn_test)
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
2
,
4
,
2
},
{
2
,
4
,
2
}}});
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
2
,
4
,
{
2
}
},
{
2
,
4
,
{
2
}
}}});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"pad"
,
{{
"pads"
,
{
1
,
1
,
1
,
1
}}}),
x
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"0"
]
=
{{
2
,
4
,
2
},
{
2
,
4
,
2
}};
options
.
map_dyn_input_dims
[
"0"
]
=
{{
2
,
4
,
{
2
}
},
{
2
,
4
,
{
2
}
}};
auto
prog
=
parse_onnx
(
"pad_attr_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
...
...
@@ -4384,13 +4389,13 @@ TEST_CASE(pad_cnst_dyn_test)
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
2
,
4
,
2
},
{
2
,
4
,
2
}}});
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
2
,
4
,
{
2
}
},
{
2
,
4
,
{
2
}
}}});
mm
->
add_literal
({
migraphx
::
shape
{
migraphx
::
shape
::
int32_type
,
{
4
}},
{
0
,
2
,
0
,
1
}});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"pad"
,
{{
"pads"
,
{
0
,
2
,
0
,
1
}}}),
x
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"0"
]
=
{{
2
,
4
,
2
},
{
2
,
4
,
2
}};
options
.
map_dyn_input_dims
[
"0"
]
=
{{
2
,
4
,
{
2
}
},
{
2
,
4
,
{
2
}
}};
auto
prog
=
parse_onnx
(
"pad_cnst_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
...
...
@@ -4398,7 +4403,7 @@ TEST_CASE(pad_cnst_dyn_test)
TEST_CASE
(
pad_dyn_reflect_error
)
{
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
2
,
4
,
2
};
options
.
default_dyn_dim_value
=
{
2
,
4
,
{
2
}
};
EXPECT
(
test
::
throws
([
&
]
{
migraphx
::
parse_onnx
(
"pad_dyn_reflect_error.onnx"
,
options
);
}));
}
...
...
@@ -4862,7 +4867,7 @@ TEST_CASE(reducel1_dyn_test)
// a shape with 4 dynamic dimensions
auto
l0
=
mm
->
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
3
,
3
,
0
},
{
3
,
5
,
0
},
{
4
,
6
,
5
},
{
5
,
7
,
6
}}});
{{
3
,
3
},
{
3
,
5
},
{
4
,
6
,
{
5
}
},
{
5
,
7
,
{
6
}
}}});
auto
abs_ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"abs"
),
l0
);
auto
sum_ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
{
-
2
}}}),
abs_ins
);
...
...
@@ -4870,7 +4875,7 @@ TEST_CASE(reducel1_dyn_test)
mm
->
add_return
({
sq_ins
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"x"
]
=
{{
3
,
3
},
{
3
,
5
},
{
4
,
6
,
5
},
{
5
,
7
,
6
}};
options
.
map_dyn_input_dims
[
"x"
]
=
{{
3
,
3
},
{
3
,
5
},
{
4
,
6
,
{
5
}
},
{
5
,
7
,
{
6
}
}};
auto
prog
=
migraphx
::
parse_onnx
(
"reducel1_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
...
...
@@ -4882,7 +4887,7 @@ TEST_CASE(reducel1_dyn_test)
// No axes given in the onnx file. Parser should default to all axes.
auto
l0
=
mm
->
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
3
,
3
,
0
},
{
3
,
5
,
0
},
{
4
,
6
,
5
},
{
5
,
7
,
6
}}});
{{
3
,
3
},
{
3
,
5
},
{
4
,
6
,
{
5
}
},
{
5
,
7
,
{
6
}
}}});
auto
abs_ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"abs"
),
l0
);
auto
sum_ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
{
0
,
1
,
2
,
3
}}}),
abs_ins
);
...
...
@@ -4891,7 +4896,7 @@ TEST_CASE(reducel1_dyn_test)
mm
->
add_return
({
sq_ins
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"x"
]
=
{{
3
,
3
},
{
3
,
5
},
{
4
,
6
,
5
},
{
5
,
7
,
6
}};
options
.
map_dyn_input_dims
[
"x"
]
=
{{
3
,
3
},
{
3
,
5
},
{
4
,
6
,
{
5
}
},
{
5
,
7
,
{
6
}
}};
auto
prog
=
migraphx
::
parse_onnx
(
"reducel1_dyn_noaxes_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
...
...
@@ -4954,13 +4959,13 @@ TEST_CASE(reducemax_dyn_test)
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
3
,
3
},
{
4
,
4
},
{
5
,
5
},
{
6
,
6
}}});
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
3
,
5
},
{
4
,
4
},
{
5
,
5
},
{
6
,
6
}}});
auto
r0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_max"
,
{{
"axes"
,
{
2
}}}),
l0
);
auto
r1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
2
}}}),
r0
);
mm
->
add_return
({
r1
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"x"
]
=
{{
3
,
3
},
{
4
,
4
},
{
5
,
5
},
{
6
,
6
}};
options
.
map_dyn_input_dims
[
"x"
]
=
{{
3
,
5
},
{
4
,
4
},
{
5
,
5
},
{
6
,
6
}};
auto
prog
=
migraphx
::
parse_onnx
(
"reducemax_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
...
...
@@ -5097,8 +5102,10 @@ TEST_CASE(reshape_test)
migraphx
::
literal
{
migraphx
::
shape
{
migraphx
::
shape
::
int64_type
,
{
2
}},
reshape_dims
});
auto
l0
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
,
2
,
3
}});
op
.
dims
=
reshape_dims
;
mm
->
add_instruction
(
op
,
l0
);
mm
->
add_instruction
(
op
,
l0
);
auto
c0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
l0
);
mm
->
add_instruction
(
op
,
c0
);
auto
c1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
l0
);
mm
->
add_instruction
(
op
,
c1
);
auto
prog
=
optimize_onnx
(
"reshape_test.onnx"
);
EXPECT
(
p
==
prog
);
...
...
@@ -5787,17 +5794,17 @@ TEST_CASE(scatternd_dyn_test)
auto
*
mm
=
p
.
get_main_module
();
// parameters with dynamic dimensions
auto
l0
=
mm
->
add_parameter
(
"data"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
3
,
2
},
{
2
,
2
},
{
2
,
2
}}});
"data"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
3
,
{
2
}
},
{
2
,
2
},
{
2
,
2
}}});
auto
l1
=
mm
->
add_parameter
(
"indices"
,
migraphx
::
shape
{
migraphx
::
shape
::
int64_type
,
{{
2
,
1
,
2
},
{
1
,
1
},
{
2
,
2
}}});
"indices"
,
migraphx
::
shape
{
migraphx
::
shape
::
int64_type
,
{{
2
,
1
,
{
2
}
},
{
1
,
1
},
{
2
,
2
}}});
auto
l2
=
mm
->
add_parameter
(
"updates"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
2
,
1
,
2
},
{
1
,
1
},
{
2
,
2
}}});
"updates"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
2
,
1
,
{
2
}
},
{
1
,
1
},
{
2
,
2
}}});
auto
r
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"scatternd_none"
),
l0
,
l1
,
l2
);
mm
->
add_return
({
r
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"data"
]
=
{{
1
,
3
,
2
},
{
2
,
2
},
{
2
,
2
}};
options
.
map_dyn_input_dims
[
"indices"
]
=
{{
2
,
1
,
2
},
{
1
,
1
},
{
2
,
2
}};
options
.
map_dyn_input_dims
[
"updates"
]
=
{{
2
,
1
,
2
},
{
1
,
1
},
{
2
,
2
}};
options
.
map_dyn_input_dims
[
"data"
]
=
{{
1
,
3
,
{
2
}
},
{
2
,
2
},
{
2
,
2
}};
options
.
map_dyn_input_dims
[
"indices"
]
=
{{
2
,
1
,
{
2
}
},
{
1
,
1
},
{
2
,
2
}};
options
.
map_dyn_input_dims
[
"updates"
]
=
{{
2
,
1
,
{
2
}
},
{
1
,
1
},
{
2
,
2
}};
auto
prog
=
migraphx
::
parse_onnx
(
"scatternd_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
...
...
@@ -5931,7 +5938,7 @@ TEST_CASE(sinh_dynamic_test)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
::
dynamic_dimension
dd
{
1
,
10
,
0
};
migraphx
::
shape
::
dynamic_dimension
dd
{
1
,
10
};
std
::
vector
<
migraphx
::
shape
::
dynamic_dimension
>
dyn_dims
;
dyn_dims
.
push_back
(
dd
);
auto
input
=
mm
->
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
dyn_dims
});
...
...
@@ -5991,6 +5998,44 @@ TEST_CASE(slice_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
slice_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
3
,
3
},
{
1
,
3
},
{
2
,
2
}}});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
0
}},
{
"starts"
,
{
1
}},
{
"ends"
,
{
2
}}}),
l0
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
// Parser converts the dynamic input shape to static unless there is at least one non-fixed
// dynamic dimension. Slicing is not allowed along the non-fixed axis 1.
options
.
map_dyn_input_dims
[
"0"
]
=
{{
3
,
3
},
{
1
,
3
},
{
2
,
2
}};
auto
prog
=
migraphx
::
parse_onnx
(
"slice_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
slice_step_dyn_test
)
{
// A slice command with non-default steps will have a "Step" instruction added in parsing.
// At the time of writing, Step doesn't support dynamic shape input.
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
};
EXPECT
(
test
::
throws
([
&
]
{
migraphx
::
parse_onnx
(
"slice_step_dyn_test.onnx"
,
options
);
}));
}
TEST_CASE
(
slice_reverse_dyn_test
)
{
// A slice command with negative step on any axis will have a "Reverse" instruction added in
// parsing. At the time of writing, Reverse doesn't support dynamic shape input.
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
};
EXPECT
(
test
::
throws
([
&
]
{
migraphx
::
parse_onnx
(
"slice_reverse_dyn_test.onnx"
,
options
);
}));
}
TEST_CASE
(
slice_3arg_test
)
{
migraphx
::
program
p
;
...
...
@@ -6116,13 +6161,12 @@ TEST_CASE(softmax_dyn_test)
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
4
,
4
,
0
},
{
4
,
4
,
0
}}});
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
3
,
3
},
{
4
,
4
},
{
4
,
4
}}});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"softmax"
,
{{
"axis"
,
-
1
}}),
l0
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
options
.
default_dyn_dim_value
=
{
1
,
4
};
auto
prog
=
migraphx
::
parse_onnx
(
"softmax_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
...
...
@@ -6332,10 +6376,9 @@ TEST_CASE(squeeze_unsqueeze_dyn_test)
auto
*
mm
=
p
.
get_main_module
();
std
::
vector
<
int64_t
>
squeeze_axes
{
0
,
2
,
3
,
5
};
std
::
vector
<
int64_t
>
unsqueeze_axes
{
0
,
1
,
3
,
5
};
auto
l0
=
mm
->
add_parameter
(
"0"
,
auto
l0
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
,
0
},
{
1
,
4
,
0
},
{
1
,
1
,
0
},
{
1
,
1
,
0
},
{
1
,
4
,
0
},
{
1
,
1
,
0
}}});
{{
1
,
1
},
{
1
,
4
},
{
1
,
1
},
{
1
,
1
},
{
1
,
4
},
{
1
,
1
}}});
auto
c0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
l0
);
auto
l1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
squeeze_axes
}}),
c0
);
auto
c1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
l1
);
...
...
@@ -6343,7 +6386,7 @@ TEST_CASE(squeeze_unsqueeze_dyn_test)
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
options
.
default_dyn_dim_value
=
{
1
,
4
};
auto
prog
=
parse_onnx
(
"squeeze_unsqueeze_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
...
...
@@ -6637,14 +6680,13 @@ TEST_CASE(transpose_dyn_test)
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
input
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
2
,
2
,
0
},
{
2
,
2
,
0
},
{
3
,
3
,
0
}}});
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
2
,
2
},
{
2
,
2
},
{
3
,
3
}}});
std
::
vector
<
int64_t
>
perm
{
0
,
3
,
1
,
2
};
auto
t0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
input
);
mm
->
add_return
({
t0
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
options
.
default_dyn_dim_value
=
{
1
,
4
};
auto
prog
=
migraphx
::
parse_onnx
(
"transpose_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
...
...
@@ -6834,7 +6876,7 @@ TEST_CASE(variable_batch_user_input_test1)
mm
->
add_return
({
r
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
2
,
2
,
0
};
options
.
default_dyn_dim_value
=
{
2
,
2
};
auto
prog
=
migraphx
::
parse_onnx
(
"variable_batch_test.onnx"
,
options
);
...
...
@@ -6845,14 +6887,13 @@ TEST_CASE(variable_batch_user_input_test2)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
2
,
5
,
0
},
{
3
,
3
,
0
},
{
16
,
16
,
0
},
{
16
,
16
,
0
}}});
auto
l0
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
2
,
5
},
{
3
,
3
},
{
16
,
16
},
{
16
,
16
}}});
auto
r
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"identity"
),
l0
);
mm
->
add_return
({
r
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
2
,
5
,
0
};
options
.
default_dyn_dim_value
=
{
2
,
5
};
auto
prog
=
migraphx
::
parse_onnx
(
"variable_batch_test.onnx"
,
options
);
...
...
@@ -6863,14 +6904,13 @@ TEST_CASE(variable_batch_user_input_test3)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
2
,
5
,
0
},
{
3
,
3
,
0
},
{
16
,
16
,
0
},
{
16
,
16
,
0
}}});
auto
l0
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
2
,
5
},
{
3
,
3
},
{
16
,
16
},
{
16
,
16
}}});
auto
r
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"identity"
),
l0
);
mm
->
add_return
({
r
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"0"
]
=
{{
2
,
5
,
0
},
{
3
,
3
,
0
},
{
16
,
16
,
0
},
{
16
,
16
,
0
}};
options
.
map_dyn_input_dims
[
"0"
]
=
{{
2
,
5
},
{
3
,
3
},
{
16
,
16
},
{
16
,
16
}};
auto
prog
=
migraphx
::
parse_onnx
(
"variable_batch_test.onnx"
,
options
);
...
...
@@ -6898,7 +6938,7 @@ TEST_CASE(variable_batch_user_input_test5)
// Error using default_dim_value and default_dyn_dim_value
migraphx
::
onnx_options
options
;
options
.
default_dim_value
=
2
;
options
.
default_dyn_dim_value
=
{
1
,
2
,
0
};
options
.
default_dyn_dim_value
=
{
1
,
2
};
EXPECT
(
test
::
throws
([
&
]
{
migraphx
::
parse_onnx
(
"variable_batch_test.onnx"
,
options
);
}));
}
...
...
@@ -6907,12 +6947,29 @@ TEST_CASE(variable_batch_user_input_test6)
{
// Error using both map_dyn_input_dims and map_input_dims
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"0"
]
=
{{
2
,
5
,
0
},
{
3
,
3
,
0
},
{
16
,
16
,
0
},
{
16
,
16
,
0
}};
options
.
map_dyn_input_dims
[
"0"
]
=
{{
2
,
5
},
{
3
,
3
},
{
16
,
16
},
{
16
,
16
}};
options
.
map_input_dims
[
"0"
]
=
{
2
,
3
,
16
,
16
};
EXPECT
(
test
::
throws
([
&
]
{
migraphx
::
parse_onnx
(
"variable_batch_test.onnx"
,
options
);
}));
}
TEST_CASE
(
variable_batch_user_input_test7
)
{
// if entry in map_dyn_input_dims is all fixed dynamic_dimensions, convert it to a static shape
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
16
,
16
}});
auto
r
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"identity"
),
l0
);
mm
->
add_return
({
r
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"0"
]
=
{{
2
,
2
,
{
2
}},
{
3
,
3
},
{
16
,
16
},
{
16
,
16
}};
auto
prog
=
migraphx
::
parse_onnx
(
"variable_batch_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
variable_batch_leq_zero_test
)
{
migraphx
::
program
p
;
...
...
@@ -6948,4 +7005,35 @@ TEST_CASE(where_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
where_dyn_test
)
{
// TODO: broadcasting for dynamic shapes isn't implemented at time of writing.
// Update this test case to use shapes that require broadcasting, when available.
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
lc
=
mm
->
add_parameter
(
"c"
,
migraphx
::
shape
{
migraphx
::
shape
::
bool_type
,
{{
1
,
4
},
{
2
,
2
},
{
2
,
2
}}});
auto
lx
=
mm
->
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
2
,
2
},
{
2
,
2
}}});
auto
ly
=
mm
->
add_parameter
(
"y"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
2
,
2
},
{
2
,
2
}}});
auto
r
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"where"
),
lc
,
lx
,
ly
);
mm
->
add_return
({
r
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
};
auto
prog
=
parse_onnx
(
"where_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
where_mixed_test
)
{
// mixture of static and dynamic input shapes is not supported
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
};
EXPECT
(
test
::
throws
([
&
]
{
migraphx
::
parse_onnx
(
"where_mixed_test.onnx"
,
options
);
}));
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/onnx/slice_dyn_test.onnx
0 → 100644
View file @
baac1dab
File added
test/onnx/slice_reverse_dyn_test.onnx
0 → 100644
View file @
baac1dab
File added
test/onnx/slice_step_dyn_test.onnx
0 → 100644
View file @
baac1dab
File added
test/onnx/verify_onnx.cpp
View file @
baac1dab
...
...
@@ -26,7 +26,7 @@
#include <migraphx/literal.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/program.hpp>
#include <migraphx/re
f/
target.hpp>
#include <migraphx/re
gister_
target.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/verify.hpp>
#include <migraphx/onnx.hpp>
...
...
@@ -36,7 +36,7 @@
TEST_CASE
(
averagepool_notset_test
)
{
auto
p
=
migraphx
::
parse_onnx
(
"averagepool_notset_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
std
::
vector
<
float
>
data_x
=
{
0
,
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
10
,
11
,
12
,
13
,
14
,
15
,
16
,
17
,
18
,
19
,
20
,
21
,
22
,
23
,
24
};
migraphx
::
shape
s_x
{
migraphx
::
shape
::
float_type
,
{
1
,
1
,
5
,
5
}};
...
...
@@ -54,7 +54,7 @@ TEST_CASE(averagepool_notset_test)
TEST_CASE
(
averagepool_nt_cip_test
)
{
auto
p
=
migraphx
::
parse_onnx
(
"averagepool_nt_cip_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
std
::
vector
<
float
>
data_x
=
{
0
,
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
10
,
11
,
12
,
13
,
14
,
15
,
16
,
17
,
18
,
19
,
20
,
21
,
22
,
23
,
24
};
migraphx
::
shape
s_x
{
migraphx
::
shape
::
float_type
,
{
1
,
1
,
5
,
5
}};
...
...
@@ -72,7 +72,7 @@ TEST_CASE(averagepool_nt_cip_test)
TEST_CASE
(
batch_norm_flat_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"batch_norm_flat_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
x_shape
{
migraphx
::
shape
::
float_type
,
{
10
}};
migraphx
::
shape
c_shape
(
migraphx
::
shape
::
float_type
,
{
1
});
...
...
@@ -118,7 +118,7 @@ TEST_CASE(batch_norm_flat_test)
TEST_CASE
(
batch_norm_rank_2_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"batch_norm_rank_2_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
x_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
5
}};
migraphx
::
shape
c_shape
(
migraphx
::
shape
::
float_type
,
{
5
});
...
...
@@ -155,7 +155,7 @@ TEST_CASE(batch_norm_rank_2_test)
TEST_CASE
(
batch_norm_1d_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"batch_norm_1d_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
x_shape
{
migraphx
::
shape
::
half_type
,
{
2
,
3
,
4
}};
migraphx
::
shape
c_shape
(
migraphx
::
shape
::
float_type
,
{
3
});
...
...
@@ -191,7 +191,7 @@ TEST_CASE(batch_norm_1d_test)
TEST_CASE
(
batch_norm_2d_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"batch_norm_2d_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
x_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
,
4
}};
migraphx
::
shape
c_shape
(
migraphx
::
shape
::
float_type
,
{
3
});
...
...
@@ -257,7 +257,7 @@ TEST_CASE(batch_norm_2d_test)
TEST_CASE
(
batch_norm_3d_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"batch_norm_3d_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
x_shape
{
migraphx
::
shape
::
half_type
,
{
2
,
2
,
2
,
2
,
2
}};
migraphx
::
shape
c_shape
(
migraphx
::
shape
::
half_type
,
{
2
});
...
...
@@ -299,7 +299,7 @@ TEST_CASE(batch_norm_3d_test)
TEST_CASE
(
celu_verify_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"celu_verify_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
std
::
vector
<
float
>
data
=
{
-
5.5
,
2.0
,
100.
,
7.0
,
0.
,
-
1.
};
...
...
@@ -321,7 +321,7 @@ TEST_CASE(celu_verify_test)
TEST_CASE
(
clip_args_type_mismatch
)
{
auto
p
=
migraphx
::
parse_onnx
(
"clip_test_args_type_mismatch.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
s_0
{
migraphx
::
shape
::
float_type
,
{
3
,
3
}};
migraphx
::
parameter_map
pp
;
std
::
vector
<
float
>
data_0
=
{
0.9
,
1.2
,
1.7
,
1.9
,
2.2
,
2.7
,
2.9
,
3.2
,
3.7
};
...
...
@@ -337,7 +337,7 @@ TEST_CASE(clip_args_type_mismatch)
TEST_CASE
(
depthtospace_simple_test
)
{
auto
p
=
migraphx
::
parse_onnx
(
"depthtospace_simple_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
std
::
vector
<
float
>
data_in
(
48
);
std
::
iota
(
std
::
begin
(
data_in
),
std
::
end
(
data_in
),
0
);
migraphx
::
shape
s_x
{
migraphx
::
shape
::
float_type
,
{
1
,
8
,
2
,
3
}};
...
...
@@ -355,7 +355,7 @@ TEST_CASE(depthtospace_simple_test)
TEST_CASE
(
spacetodepth_simple_test
)
{
auto
p
=
migraphx
::
parse_onnx
(
"spacetodepth_simple_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
std
::
vector
<
float
>
data_in
(
48
);
std
::
iota
(
std
::
begin
(
data_in
),
std
::
end
(
data_in
),
0
);
migraphx
::
shape
s_x
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
4
,
6
}};
...
...
@@ -374,7 +374,7 @@ TEST_CASE(spacetodepth_depthtospace_test)
{
// space to depth
auto
p1
=
migraphx
::
parse_onnx
(
"spacetodepth_simple_test.onnx"
);
p1
.
compile
(
migraphx
::
ref
::
target
{}
);
p1
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
std
::
vector
<
float
>
data_in
(
48
);
std
::
iota
(
std
::
begin
(
data_in
),
std
::
end
(
data_in
),
0
);
migraphx
::
shape
s_x_1
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
4
,
6
}};
...
...
@@ -383,7 +383,7 @@ TEST_CASE(spacetodepth_depthtospace_test)
auto
result1
=
p1
.
eval
(
pp1
).
back
();
// depth to space
auto
p2
=
migraphx
::
parse_onnx
(
"depthtospace_simple_test.onnx"
);
p2
.
compile
(
migraphx
::
ref
::
target
{}
);
p2
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
parameter_map
pp2
;
pp2
[
"x"
]
=
result1
;
auto
result2
=
p2
.
eval
(
pp2
).
back
();
...
...
@@ -395,7 +395,7 @@ TEST_CASE(spacetodepth_depthtospace_test)
TEST_CASE
(
eyelike_verify_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"eyelike_verify_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
3
,
4
}};
std
::
vector
<
float
>
data
{
12
,
0
};
...
...
@@ -413,7 +413,7 @@ TEST_CASE(eyelike_verify_test)
TEST_CASE
(
eyelike_verify_negk_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"eyelike_verify_negk_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
3
,
4
}};
std
::
vector
<
float
>
data
{
12
,
0
};
...
...
@@ -431,7 +431,7 @@ TEST_CASE(eyelike_verify_negk_test)
TEST_CASE
(
gather_elements
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"gather_elements_axis0_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
s_data
{
migraphx
::
shape
::
float_type
,
{
3
,
4
}};
std
::
vector
<
float
>
data
=
{
0.25
,
0.75
,
0.9375
,
0.4375
,
0.6875
,
0.5625
,
-
0.875
,
0.1875
,
-
0.125
,
0.5
,
-
0.9375
,
-
0.0625
};
...
...
@@ -454,7 +454,7 @@ TEST_CASE(gather_elements)
TEST_CASE
(
gemm_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"gemm_brcst_C_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
5
,
6
}};
std
::
vector
<
float
>
a_data
=
{
0.26472837
,
0.8525864
,
0.41929847
,
0.14151508
,
0.43216065
,
...
...
@@ -498,10 +498,10 @@ TEST_CASE(gemm_test)
TEST_CASE
(
gemm_half_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"gemm_half_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
half_type
,
{
8
,
6
}};
std
::
vector
tmp
=
{
0.2646
,
0.8525
,
0.4192
,
0.1415
,
0.4321
,
0.675
,
0.4248
,
0.8203
,
std
::
vector
<
float
>
tmp
=
{
0.2646
,
0.8525
,
0.4192
,
0.1415
,
0.4321
,
0.675
,
0.4248
,
0.8203
,
0.978
,
0.5796
,
0.6626
,
0.479
,
0.924
,
0.734
,
0.674
,
0.8716
,
0.3733
,
0.3328
,
0.4272
,
0.0247
,
0.7583
,
0.4873
,
0.5835
,
0.694
,
0.4375
,
0.2406
,
0.269
,
0.6763
,
0.542
,
0.8994
,
0.657
,
0.5425
,
...
...
@@ -542,7 +542,7 @@ TEST_CASE(gemm_half_test)
TEST_CASE
(
greaterorequal_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"greaterorequal_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
3
}};
std
::
vector
<
float
>
data1
=
{
0.25
,
0.75
,
0.9375
};
...
...
@@ -563,7 +563,7 @@ TEST_CASE(greaterorequal_test)
TEST_CASE
(
hardsigmoid_verify_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"hardsigmoid_verify_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
5
}};
std
::
vector
<
float
>
data
=
{
-
10.0
,
-
2.5
,
-
1.0
,
-
0.5
,
0
,
1.0
,
2.0
,
2.5
,
2.6
,
100.0
};
...
...
@@ -587,7 +587,7 @@ TEST_CASE(hardsigmoid_verify_test)
TEST_CASE
(
if_else_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"if_else_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
s_data
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
std
::
vector
<
float
>
data
=
{
0.0625
,
0.75
,
-
0.0625
,
0.125
,
-
0.125
,
-
0.5625
};
migraphx
::
shape
bool_data
{
migraphx
::
shape
::
bool_type
,
{
1
}};
...
...
@@ -609,7 +609,7 @@ TEST_CASE(if_else_test)
TEST_CASE
(
if_else_test_inlined
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"if_else_test_inlined.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
s_data
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
std
::
vector
<
float
>
data
=
{
0.0625
,
0.75
,
-
0.0625
,
0.125
,
-
0.125
,
-
0.5625
};
...
...
@@ -628,7 +628,7 @@ TEST_CASE(if_else_test_inlined)
TEST_CASE
(
if_then_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"if_then_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
s_data
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
std
::
vector
<
float
>
data
=
{
0.0625
,
0.75
,
-
0.0625
,
0.125
,
-
0.125
,
-
0.5625
};
migraphx
::
shape
bool_data
{
migraphx
::
shape
::
bool_type
,
{
1
}};
...
...
@@ -651,7 +651,7 @@ TEST_CASE(if_then_test)
TEST_CASE
(
if_then_test_inlined
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"if_then_test_inlined.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
s_data
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
std
::
vector
<
float
>
data
=
{
0.0625
,
0.75
,
-
0.0625
,
0.125
,
-
0.125
,
-
0.5625
};
...
...
@@ -671,7 +671,7 @@ TEST_CASE(if_literal_test)
{
auto
run_prog
=
[](
bool
cond
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"if_literal_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
s_data
{
migraphx
::
shape
::
bool_type
};
std
::
vector
<
char
>
data
=
{
static_cast
<
char
>
(
cond
)};
...
...
@@ -704,7 +704,7 @@ TEST_CASE(if_then_else_multi_output_shapes_inlined_test)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"if_then_else_multi_output_shapes_inlined_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
x_data
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
1
}};
migraphx
::
shape
y_data
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
std
::
vector
<
float
>
data
=
{
0.0625
,
0.75
,
-
0.0625
,
0.125
,
-
0.125
,
-
0.5625
};
...
...
@@ -733,7 +733,7 @@ TEST_CASE(if_then_else_multi_output_shapes_inlined_test)
TEST_CASE
(
if_then_else_multi_output_shapes_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"if_then_else_multi_output_shapes_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
s_data
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
1
}};
std
::
vector
<
float
>
data
=
{
0.0625
,
0.75
,
-
0.0625
,
0.125
,
-
0.125
,
-
0.5625
};
migraphx
::
shape
bool_data
{
migraphx
::
shape
::
bool_type
,
{
1
}};
...
...
@@ -765,7 +765,7 @@ TEST_CASE(if_pl_test)
{
auto
run_prog
=
[](
bool
cond
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"if_pl_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
xs
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
migraphx
::
shape
ys
{
migraphx
::
shape
::
float_type
,
{
3
,
3
}};
migraphx
::
shape
cond_s
{
migraphx
::
shape
::
bool_type
};
...
...
@@ -805,7 +805,7 @@ TEST_CASE(if_tuple_test)
{
auto
run_prog
=
[](
bool
cond
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"if_tuple_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
xs
{
migraphx
::
shape
::
float_type
,
{
1
,
4
}};
migraphx
::
shape
ys
{
migraphx
::
shape
::
float_type
,
{
3
,
4
}};
migraphx
::
shape
cond_s
{
migraphx
::
shape
::
bool_type
};
...
...
@@ -854,7 +854,7 @@ TEST_CASE(instance_norm_test)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"instance_norm_val_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
auto
result
=
p
.
eval
({}).
back
();
std
::
vector
<
float
>
result_vector
(
9
);
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -884,7 +884,7 @@ TEST_CASE(instance_norm_3d_test)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"instance_norm_val_3d_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
auto
result
=
p
.
eval
({}).
back
();
std
::
vector
<
float
>
result_vector
(
16
);
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -912,7 +912,7 @@ TEST_CASE(instance_norm_3d_test)
TEST_CASE
(
lessorequal_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"lessorequal_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
3
}};
std
::
vector
<
float
>
data1
=
{
0.25
,
0.75
,
0.9375
};
...
...
@@ -933,7 +933,7 @@ TEST_CASE(lessorequal_test)
TEST_CASE
(
lpnormalization_1norm
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"lpnormalization_l1_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
3
,
4
}};
std
::
vector
<
float
>
data
{
0.
f
,
2.
f
,
-
2.
f
,
1.
f
,
1.
f
,
-
5.
f
,
3.
f
,
-
1.
f
,
-
4.
f
,
3.
f
,
0.
f
,
0.
f
};
migraphx
::
parameter_map
pp
;
...
...
@@ -961,7 +961,7 @@ TEST_CASE(lpnormalization_1norm)
TEST_CASE
(
lpnormalization_2norm
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"lpnormalization_l2_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
3
,
4
}};
std
::
vector
<
float
>
data
{
0.
f
,
2.
f
,
-
2.
f
,
1.
f
,
1.
f
,
-
5.
f
,
3.
f
,
-
1.
f
,
-
4.
f
,
3.
f
,
0.
f
,
0.
f
};
migraphx
::
parameter_map
pp
;
...
...
@@ -989,7 +989,7 @@ TEST_CASE(lpnormalization_2norm)
TEST_CASE
(
mean_broadcast_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"mean_broadcast_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
s0
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
4
}};
std
::
vector
<
float
>
data0
(
12
,
1
);
...
...
@@ -1020,7 +1020,7 @@ TEST_CASE(mean_broadcast_test)
TEST_CASE
(
mean_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"mean_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
s
{
migraphx
::
shape
::
double_type
,
{
2
,
2
,
2
}};
const
int
num_elms
=
8
;
...
...
@@ -1047,7 +1047,7 @@ TEST_CASE(mean_test)
TEST_CASE
(
mean_integral_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"mean_integral_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
s
{
migraphx
::
shape
::
int32_type
,
{
2
,
2
,
2
}};
const
int
num_elms
=
8
;
...
...
@@ -1074,7 +1074,7 @@ TEST_CASE(mean_integral_test)
TEST_CASE
(
mod_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"mod_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
s
{
migraphx
::
shape
::
int32_type
,
{
3
,
3
,
3
}};
...
...
@@ -1101,7 +1101,7 @@ TEST_CASE(mod_test)
TEST_CASE
(
mod_test_different_types
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"mod_test_different_dtypes.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
s_int16
{
migraphx
::
shape
::
int16_type
,
{
3
,
3
,
3
}};
migraphx
::
shape
s_int32
{
migraphx
::
shape
::
int32_type
,
{
3
,
3
,
3
}};
...
...
@@ -1129,7 +1129,7 @@ TEST_CASE(mod_test_different_types)
TEST_CASE
(
mod_test_fmod
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"mod_test_fmod.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
3
,
3
,
3
}};
...
...
@@ -1158,7 +1158,7 @@ TEST_CASE(mod_test_fmod)
TEST_CASE
(
mod_test_fmod_different_types
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"mod_test_fmod_different_dtypes.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
s_float
{
migraphx
::
shape
::
float_type
,
{
3
,
3
,
3
}};
migraphx
::
shape
s_int
{
migraphx
::
shape
::
int32_type
,
{
3
,
3
,
3
}};
...
...
@@ -1188,7 +1188,7 @@ TEST_CASE(mod_test_fmod_different_types)
TEST_CASE
(
nonzero_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"nonzero_dynamic_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
s
{
migraphx
::
shape
::
bool_type
,
{
2
,
2
}};
std
::
vector
<
char
>
data
=
{
1
,
1
,
1
,
0
};
...
...
@@ -1207,7 +1207,7 @@ TEST_CASE(nonzero_test)
TEST_CASE
(
resize_downsample_f_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"resize_downsample_f_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
sx
{
migraphx
::
shape
::
float_type
,
{
1
,
1
,
2
,
4
}};
std
::
vector
<
float
>
dx
(
sx
.
elements
());
...
...
@@ -1228,7 +1228,7 @@ TEST_CASE(resize_downsample_f_test)
TEST_CASE
(
resize_upsample_linear_ac_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"resize_upsample_linear_ac_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
sx
{
migraphx
::
shape
::
float_type
,
{
1
,
1
,
2
,
2
}};
std
::
vector
<
float
>
dx
=
{
1.0
f
,
2.0
f
,
3.0
f
,
4.0
f
};
...
...
@@ -1263,7 +1263,7 @@ TEST_CASE(resize_upsample_linear_ac_test)
TEST_CASE
(
resize_upsample_linear_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"resize_upsample_linear_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
sx
{
migraphx
::
shape
::
float_type
,
{
1
,
1
,
2
,
2
}};
std
::
vector
<
float
>
dx
=
{
1.0
f
,
2.0
f
,
3.0
f
,
4.0
f
};
...
...
@@ -1284,7 +1284,7 @@ TEST_CASE(resize_upsample_linear_test)
TEST_CASE
(
resize_upsample_pf_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"resize_upsample_pf_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
sx
{
migraphx
::
shape
::
float_type
,
{
1
,
1
,
2
,
2
}};
std
::
vector
<
float
>
dx
=
{
1.0
f
,
2.0
f
,
3.0
f
,
4.0
f
};
...
...
@@ -1305,7 +1305,7 @@ TEST_CASE(resize_upsample_pf_test)
TEST_CASE
(
reversesequence_4D_verify_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"reversesequence_4D_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
xs
{
migraphx
::
shape
::
float_type
,
{
2
,
2
,
2
,
2
}};
std
::
vector
<
float
>
x_data
=
{
...
...
@@ -1326,7 +1326,7 @@ TEST_CASE(reversesequence_4D_verify_test)
TEST_CASE
(
reversesequence_batch_verify_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"reversesequence_batch_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
xs
{
migraphx
::
shape
::
float_type
,
{
4
,
4
}};
std
::
vector
<
float
>
x_data
=
{
...
...
@@ -1347,7 +1347,7 @@ TEST_CASE(reversesequence_batch_verify_test)
TEST_CASE
(
reversesequence_time_verify_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"reversesequence_time_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
xs
{
migraphx
::
shape
::
float_type
,
{
4
,
4
}};
std
::
vector
<
float
>
x_data
=
{
...
...
@@ -1368,7 +1368,7 @@ TEST_CASE(reversesequence_time_verify_test)
TEST_CASE
(
selu_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"selu_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
xs
{
migraphx
::
shape
::
double_type
,
{
2
,
3
}};
std
::
vector
<
double
>
x_data
=
{
1.1
,
2.1
,
0.0
,
-
1.3
,
-
5.3
,
12.0
};
...
...
@@ -1388,7 +1388,7 @@ TEST_CASE(selu_test)
TEST_CASE
(
size_verify_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"size_verify_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
5
,
3
}};
std
::
vector
<
float
>
data
(
30
,
1.
);
...
...
@@ -1403,7 +1403,7 @@ TEST_CASE(size_verify_test)
TEST_CASE
(
slice_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"slice_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
sh_data
{
migraphx
::
shape
::
float_type
,
{
3
,
2
}};
std
::
vector
<
float
>
data
=
{
0
,
1
,
2
,
3
,
4
,
5
};
...
...
@@ -1422,7 +1422,7 @@ TEST_CASE(slice_test)
TEST_CASE
(
slice_5arg_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"slice_5arg_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
sh_data
{
migraphx
::
shape
::
float_type
,
{
5
,
5
}};
// start
std
::
vector
<
float
>
data
=
{
0
,
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
10
,
11
,
12
,
...
...
@@ -1442,7 +1442,7 @@ TEST_CASE(slice_5arg_test)
TEST_CASE
(
slice_reverse_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"slice_5arg_reverse_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
sh_data
{
migraphx
::
shape
::
float_type
,
{
5
,
5
}};
// start
std
::
vector
<
float
>
data
=
{
0
,
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
10
,
11
,
12
,
...
...
@@ -1462,7 +1462,7 @@ TEST_CASE(slice_reverse_test)
TEST_CASE
(
slice_step_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"slice_5arg_step_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
sh_data
{
migraphx
::
shape
::
float_type
,
{
5
,
5
}};
// start
std
::
vector
<
float
>
data
=
{
0
,
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
10
,
11
,
12
,
...
...
@@ -1482,7 +1482,7 @@ TEST_CASE(slice_step_test)
TEST_CASE
(
softplus_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"softplus_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
5
}};
std
::
vector
<
float
>
data
=
{
0
,
1
,
2
,
3
,
4
};
...
...
@@ -1503,7 +1503,7 @@ TEST_CASE(softplus_test)
TEST_CASE
(
softsign_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"softsign_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
5
}};
std
::
vector
<
float
>
data
=
{
0
,
1
,
2
,
3
,
4
};
...
...
@@ -1543,7 +1543,7 @@ TEST_CASE(upsample_test)
TEST_CASE
(
where_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"where_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
shape
c_shape
{
migraphx
::
shape
::
bool_type
,
{
2
}};
std
::
vector
<
int8_t
>
c_data
=
{
1
,
0
};
...
...
test/onnx/where_dyn_test.onnx
0 → 100644
View file @
baac1dab
File added
Prev
1
…
8
9
10
11
12
13
14
15
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment