Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
30c49503
Commit
30c49503
authored
Mar 23, 2023
by
Khalique Ahmed
Browse files
manual merge
parents
870a396b
09aaa63e
Changes
202
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
567 additions
and
77 deletions
+567
-77
test/run_loop_test.cpp
test/run_loop_test.cpp
+2
-2
test/serialize_program.cpp
test/serialize_program.cpp
+2
-2
test/serialize_test.cpp
test/serialize_test.cpp
+23
-3
test/shape_test.cpp
test/shape_test.cpp
+24
-0
test/simplify_algebra_test.cpp
test/simplify_algebra_test.cpp
+74
-18
test/simplify_qdq_test.cpp
test/simplify_qdq_test.cpp
+5
-5
test/targets.cpp
test/targets.cpp
+4
-2
test/verify/main.cpp
test/verify/main.cpp
+7
-1
test/verify/run_verify.cpp
test/verify/run_verify.cpp
+12
-3
test/verify/test_layernorm.cpp
test/verify/test_layernorm.cpp
+35
-10
test/verify/test_reduce_op_large.cpp
test/verify/test_reduce_op_large.cpp
+26
-0
test/verify/test_select_module_add.cpp
test/verify/test_select_module_add.cpp
+77
-0
test/verify/test_select_module_conv.cpp
test/verify/test_select_module_conv.cpp
+71
-0
test/verify/test_select_module_reduce.cpp
test/verify/test_select_module_reduce.cpp
+69
-0
tools/accuracy/accuracy_checker.py
tools/accuracy/accuracy_checker.py
+115
-22
tools/api/api.cpp
tools/api/api.cpp
+5
-1
tools/build_and_test_onnxrt.sh
tools/build_and_test_onnxrt.sh
+7
-4
tools/download_models.sh
tools/download_models.sh
+5
-4
tools/include/context.hpp
tools/include/context.hpp
+2
-0
tools/include/operation.hpp
tools/include/operation.hpp
+2
-0
No files found.
test/run_loop_test.cpp
View file @
30c49503
...
...
@@ -27,7 +27,7 @@
#include <migraphx/literal.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/quantization.hpp>
#include <migraphx/re
f/
target.hpp>
#include <migraphx/re
gister_
target.hpp>
#include <migraphx/shape.hpp>
#include <migraphx/verify.hpp>
#include <migraphx/make_op.hpp>
...
...
@@ -207,7 +207,7 @@ static auto run_prog(migraphx::program p, int64_t iter_num, bool cond, int64_t i
migraphx
::
shape
s
{
migraphx
::
shape
::
int64_type
,
{
1
}};
migraphx
::
shape
sc
{
migraphx
::
shape
::
bool_type
};
p
.
compile
(
migraphx
::
ref
::
target
{}
);
p
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
migraphx
::
parameter_map
pp
;
pp
[
"iter_num"
]
=
migraphx
::
argument
(
si
,
&
iter_num
);
pp
[
"ccond"
]
=
migraphx
::
argument
(
sc
,
&
cond
);
...
...
test/serialize_program.cpp
View file @
30c49503
...
...
@@ -22,7 +22,7 @@
* THE SOFTWARE.
*/
#include <migraphx/program.hpp>
#include <migraphx/re
f/
target.hpp>
#include <migraphx/re
gister_
target.hpp>
#include <migraphx/load_save.hpp>
#include "test.hpp"
#include <migraphx/make_op.hpp>
...
...
@@ -82,7 +82,7 @@ TEST_CASE(as_file)
TEST_CASE
(
compiled
)
{
migraphx
::
program
p1
=
create_program
();
p1
.
compile
(
migraphx
::
ref
::
target
{}
);
p1
.
compile
(
migraphx
::
make_
target
(
"ref"
)
);
std
::
vector
<
char
>
buffer
=
migraphx
::
save_buffer
(
p1
);
migraphx
::
program
p2
=
migraphx
::
load_buffer
(
buffer
);
EXPECT
(
p1
.
sort
()
==
p2
.
sort
());
...
...
test/serialize_test.cpp
View file @
30c49503
...
...
@@ -60,7 +60,9 @@ struct reflectable_type
return
migraphx
::
pack
(
f
(
self
.
value
,
"value"
));
}
};
std
::
vector
<
nested_type
>
nested_types
=
{};
std
::
vector
<
nested_type
>
nested_types
=
{};
std
::
tuple
<
int
,
nested_type
,
std
::
string
>
tuple_items
=
std
::
make_tuple
(
0
,
nested_type
{
0
},
""
);
migraphx
::
optional
<
int
>
opt_value
=
migraphx
::
nullopt
;
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
...
...
@@ -71,7 +73,8 @@ struct reflectable_type
f
(
self
.
et
,
"et"
),
f
(
self
.
se
,
"se"
),
f
(
self
.
ce
,
"ce"
),
f
(
self
.
nested_types
,
"nested_types"
));
f
(
self
.
nested_types
,
"nested_types"
),
f
(
self
.
tuple_items
,
"tuple_items"
));
}
};
...
...
@@ -83,7 +86,9 @@ TEST_CASE(serialize_reflectable_type)
{},
reflectable_type
::
simple1
,
reflectable_type
::
class_enum
::
class2
,
{{
1
},
{
2
}}};
{{
1
},
{
2
}},
{
5
,
{
4
},
"hello"
},
{
migraphx
::
nullopt
}};
migraphx
::
value
v1
=
migraphx
::
to_value
(
t1
);
reflectable_type
t2
=
migraphx
::
from_value
<
reflectable_type
>
(
v1
);
migraphx
::
value
v2
=
migraphx
::
to_value
(
t2
);
...
...
@@ -125,6 +130,21 @@ TEST_CASE(serialize_empty_struct)
EXPECT
(
v
.
at
(
"a"
).
to
<
int
>
()
==
1
);
}
TEST_CASE
(
serialize_empty_optional
)
{
migraphx
::
optional
<
int
>
x
{};
migraphx
::
value
v
=
migraphx
::
to_value
(
x
);
EXPECT
(
v
.
is_null
());
}
TEST_CASE
(
serialize_optional
)
{
migraphx
::
optional
<
int
>
x
{
2
};
migraphx
::
value
v
=
migraphx
::
to_value
(
x
);
EXPECT
(
v
.
is_int64
());
EXPECT
(
v
.
to
<
int
>
()
==
2
);
}
TEST_CASE
(
from_value_binary
)
{
std
::
vector
<
std
::
uint8_t
>
data
(
10
);
...
...
test/shape_test.cpp
View file @
30c49503
...
...
@@ -238,6 +238,30 @@ TEST_CASE(test_shape_dynamic_serialize)
EXPECT
(
s3
!=
s4
);
}
TEST_CASE
(
any_of_dynamic_true
)
{
std
::
vector
<
migraphx
::
shape
>
sub_shapes
=
{};
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
4
,
4
}}});
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
5
}});
migraphx
::
shape
s0
{
sub_shapes
};
EXPECT
(
s0
.
any_of_dynamic
());
sub_shapes
=
{};
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
},
{
4
,
4
}}});
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
5
}});
migraphx
::
shape
s1
{
sub_shapes
};
EXPECT
(
s1
.
any_of_dynamic
());
}
TEST_CASE
(
any_of_dynamic_false
)
{
std
::
vector
<
migraphx
::
shape
>
sub_shapes
=
{};
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
4
}});
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
5
}});
migraphx
::
shape
s
{
sub_shapes
};
EXPECT
(
not
s
.
any_of_dynamic
());
}
TEST_CASE
(
test_shape_packed
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
2
},
{
2
,
1
}};
...
...
test/simplify_algebra_test.cpp
View file @
30c49503
...
...
@@ -559,6 +559,32 @@ TEST_CASE(simplify_inner_broadcast2)
EXPECT
(
m1
==
m2
);
}
TEST_CASE
(
simplify_inner_broadcast_scalar
)
{
auto
b
=
migraphx
::
op
::
multibroadcast
{{
32
,
384
}};
migraphx
::
module
m1
;
{
auto
x
=
m1
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
int32_type
,
{
1
,
384
}});
auto
y
=
m1
.
add_parameter
(
"y"
,
{
migraphx
::
shape
::
int32_type
,
{
1
,
1
}});
auto
xb
=
m1
.
add_instruction
(
b
,
x
);
auto
yb
=
m1
.
add_instruction
(
b
,
y
);
auto
sum
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"add"
),
xb
,
yb
);
m1
.
add_instruction
(
pass_op
{},
sum
);
}
run_pass
(
m1
);
migraphx
::
module
m2
;
{
auto
x
=
m2
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
int32_type
,
{
1
,
384
}});
auto
y
=
m2
.
add_parameter
(
"y"
,
{
migraphx
::
shape
::
int32_type
,
{
1
,
1
}});
auto
yb
=
m2
.
add_instruction
(
migraphx
::
op
::
multibroadcast
{{
1
,
384
}},
y
);
auto
sum
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"add"
),
x
,
yb
);
auto
sumb
=
m2
.
add_instruction
(
b
,
sum
);
m2
.
add_instruction
(
pass_op
{},
sumb
);
}
EXPECT
(
m1
==
m2
);
}
TEST_CASE
(
simplify_add_conv1
)
{
migraphx
::
module
m
;
...
...
@@ -1041,16 +1067,18 @@ TEST_CASE(simplify_neg_unit_mult_const)
{
migraphx
::
module
m1
;
{
auto
x
=
m1
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
int32_type
,
{
1
}});
auto
unit
=
m1
.
add_literal
(
-
1
);
auto
x
=
m1
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
int32_type
,
{
1
,
6
}});
auto
unit
=
m1
.
add_literal
(
migraphx
::
literal
{{
migraphx
::
shape
::
int32_type
,
{
1
,
6
}},
std
::
vector
<
int
>
(
6
,
-
1
)});
m1
.
add_instruction
(
migraphx
::
make_op
(
"mul"
),
x
,
unit
);
}
run_pass
(
m1
);
migraphx
::
module
m2
;
{
auto
x
=
m2
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
int32_type
,
{
1
}});
m2
.
add_instruction
(
migraphx
::
make_op
(
"neg"
),
x
);
auto
x
=
m2
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
int32_type
,
{
1
,
6
}});
auto
x2
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"neg"
),
x
);
m2
.
add_instruction
(
migraphx
::
make_op
(
"identity"
),
x2
);
}
EXPECT
((
m1
==
m2
));
...
...
@@ -1068,8 +1096,30 @@ TEST_CASE(simplify_neg_unit_mult_const2)
migraphx
::
module
m2
;
{
auto
x
=
m2
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
int32_type
,
{
1
}});
m2
.
add_instruction
(
migraphx
::
make_op
(
"neg"
),
x
);
auto
x
=
m2
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
int32_type
,
{
1
}});
auto
x2
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"neg"
),
x
);
m2
.
add_instruction
(
migraphx
::
make_op
(
"identity"
),
x2
);
}
EXPECT
((
m1
==
m2
));
}
TEST_CASE
(
simplify_neg_unit_mult_const_add
)
{
migraphx
::
module
m1
;
{
auto
unit
=
m1
.
add_literal
(
-
1
);
auto
x
=
m1
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
int32_type
,
{
1
}});
auto
x2
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"mul"
),
unit
,
x
);
m1
.
add_instruction
(
migraphx
::
make_op
(
"add"
),
x2
,
x2
);
}
run_pass
(
m1
);
migraphx
::
module
m2
;
{
auto
x
=
m2
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
int32_type
,
{
1
}});
auto
x2
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"neg"
),
x
);
m2
.
add_instruction
(
migraphx
::
make_op
(
"add"
),
x2
,
x2
);
}
EXPECT
((
m1
==
m2
));
...
...
@@ -1091,8 +1141,9 @@ TEST_CASE(simplify_neg_unit_mul_const_vec)
migraphx
::
module
m2
;
{
auto
x
=
m2
.
add_parameter
(
"x"
,
x_shape
);
m2
.
add_instruction
(
migraphx
::
make_op
(
"neg"
),
x
);
auto
x
=
m2
.
add_parameter
(
"x"
,
x_shape
);
auto
x2
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"neg"
),
x
);
m2
.
add_instruction
(
migraphx
::
make_op
(
"identity"
),
x2
);
}
EXPECT
(
m1
==
m2
);
...
...
@@ -1114,8 +1165,9 @@ TEST_CASE(simplify_neg_unit_mul_const_vec2)
migraphx
::
module
m2
;
{
auto
x
=
m2
.
add_parameter
(
"x"
,
x_shape
);
m2
.
add_instruction
(
migraphx
::
make_op
(
"neg"
),
x
);
auto
x
=
m2
.
add_parameter
(
"x"
,
x_shape
);
auto
x2
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"neg"
),
x
);
m2
.
add_instruction
(
migraphx
::
make_op
(
"identity"
),
x2
);
}
EXPECT
(
m1
==
m2
);
...
...
@@ -1133,8 +1185,9 @@ TEST_CASE(simplify_neg_unit_div_const)
migraphx
::
module
m2
;
{
auto
x
=
m2
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
int32_type
,
{
1
}});
m2
.
add_instruction
(
migraphx
::
make_op
(
"neg"
),
x
);
auto
x
=
m2
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
int32_type
,
{
1
}});
auto
x2
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"neg"
),
x
);
m2
.
add_instruction
(
migraphx
::
make_op
(
"identity"
),
x2
);
}
EXPECT
(
m1
==
m2
);
...
...
@@ -1156,8 +1209,9 @@ TEST_CASE(simplify_neg_unit_div_const_vec)
migraphx
::
module
m2
;
{
auto
x
=
m2
.
add_parameter
(
"x"
,
x_shape
);
m2
.
add_instruction
(
migraphx
::
make_op
(
"neg"
),
x
);
auto
x
=
m2
.
add_parameter
(
"x"
,
x_shape
);
auto
x2
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"neg"
),
x
);
m2
.
add_instruction
(
migraphx
::
make_op
(
"identity"
),
x2
);
}
EXPECT
(
m1
==
m2
);
...
...
@@ -1216,8 +1270,9 @@ TEST_CASE(simplify_sub_neg_zero_const)
migraphx
::
module
m2
;
{
auto
x
=
m2
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
int32_type
,
{
1
}});
m2
.
add_instruction
(
migraphx
::
make_op
(
"neg"
),
x
);
auto
x
=
m2
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
int32_type
,
{
1
}});
auto
x2
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"neg"
),
x
);
m2
.
add_instruction
(
migraphx
::
make_op
(
"identity"
),
x2
);
}
EXPECT
(
m1
==
m2
);
}
...
...
@@ -1238,8 +1293,9 @@ TEST_CASE(simplify_sub_neg_zero_const_vec)
migraphx
::
module
m2
;
{
auto
x
=
m2
.
add_parameter
(
"x"
,
x_shape
);
m2
.
add_instruction
(
migraphx
::
make_op
(
"neg"
),
x
);
auto
x
=
m2
.
add_parameter
(
"x"
,
x_shape
);
auto
x2
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"neg"
),
x
);
m2
.
add_instruction
(
migraphx
::
make_op
(
"identity"
),
x2
);
}
EXPECT
(
m1
==
m2
);
...
...
test/simplify_qdq_test.cpp
View file @
30c49503
...
...
@@ -23,7 +23,7 @@
*/
#include <migraphx/simplify_qdq.hpp>
#include <migraphx/program.hpp>
#include <migraphx/re
f/
target.hpp>
#include <migraphx/re
gister_
target.hpp>
#include <migraphx/instruction.hpp>
#include <test.hpp>
#include <migraphx/make_op.hpp>
...
...
@@ -686,8 +686,8 @@ TEST_CASE(conv_correctness)
auto
input
=
migraphx
::
argument
(
si
,
iv
.
data
());
std
::
vector
<
float
>
wv
(
sw
.
elements
(),
10
);
auto
weights
=
migraphx
::
argument
(
sw
,
wv
.
data
());
p1
.
compile
(
migraphx
::
target
(
migraphx
::
ref
::
target
{}
));
p2
.
compile
(
migraphx
::
target
(
migraphx
::
ref
::
target
{}
));
p1
.
compile
(
migraphx
::
target
(
migraphx
::
make_
target
(
"ref"
)
));
p2
.
compile
(
migraphx
::
target
(
migraphx
::
make_
target
(
"ref"
)
));
auto
result1
=
p1
.
eval
({{
"input"
,
input
},
{
"weights"
,
weights
}}).
back
();
std
::
vector
<
float
>
rv1
(
16
);
...
...
@@ -736,8 +736,8 @@ TEST_CASE(dot_correctness)
auto
a
=
migraphx
::
argument
(
sh1
,
av
.
data
());
std
::
vector
<
float
>
bv
(
sh2
.
elements
(),
10
);
auto
b
=
migraphx
::
argument
(
sh2
,
bv
.
data
());
p1
.
compile
(
migraphx
::
target
(
migraphx
::
ref
::
target
{}
));
p2
.
compile
(
migraphx
::
target
(
migraphx
::
ref
::
target
{}
));
p1
.
compile
(
migraphx
::
target
(
migraphx
::
make_
target
(
"ref"
)
));
p2
.
compile
(
migraphx
::
target
(
migraphx
::
make_
target
(
"ref"
)
));
auto
result1
=
p1
.
eval
({{
"a"
,
a
},
{
"b"
,
b
}}).
back
();
std
::
vector
<
float
>
rv1
(
sh3
.
elements
());
...
...
test/targets.cpp
View file @
30c49503
...
...
@@ -22,7 +22,6 @@
* THE SOFTWARE.
*/
#include <migraphx/register_target.hpp>
#include <migraphx/ref/target.hpp>
#include <migraphx/target.hpp>
#include "test.hpp"
...
...
@@ -43,7 +42,10 @@ TEST_CASE(make_invalid_target)
TEST_CASE
(
targets
)
{
auto
ts
=
migraphx
::
get_targets
();
EXPECT
(
ts
.
size
()
>
0
);
EXPECT
(
ts
.
size
()
==
0
);
auto
ref_t
=
migraphx
::
make_target
(
"ref"
);
ts
=
migraphx
::
get_targets
();
EXPECT
(
ts
.
size
()
==
1
);
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/verify/main.cpp
View file @
30c49503
...
...
@@ -67,7 +67,13 @@ int main(int argc, const char* argv[])
{
run_verify
rv
;
rv
.
add_validation_for
(
"gpu"
,
&
validate_gpu
);
rv
.
disable_test_for
(
"cpu"
,
{
"test_if_lp"
,
"test_if_param"
,
"test_if_literal"
});
rv
.
disable_test_for
(
"cpu"
,
{
"test_if_lp"
,
"test_if_param"
,
"test_if_literal"
,
"test_select_module_add"
,
"test_select_module_reduce"
,
"test_select_module_conv"
});
rv
.
disable_test_for
(
"gpu"
,
{
"test_conv_bn_add"
});
rv
.
run
(
argc
,
argv
);
}
test/verify/run_verify.cpp
View file @
30c49503
...
...
@@ -26,7 +26,7 @@
#include "verify_program.hpp"
#include "test.hpp"
#include <migraphx/env.hpp>
#include <migraphx/re
f/
target.hpp>
#include <migraphx/re
gister_
target.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/load_save.hpp>
...
...
@@ -117,7 +117,7 @@ void run_verify::validate(const migraphx::target& t,
std
::
vector
<
migraphx
::
argument
>
run_verify
::
run_ref
(
migraphx
::
program
p
,
migraphx
::
parameter_map
inputs
)
const
{
migraphx
::
ref
::
target
t
{}
;
migraphx
::
target
t
=
migraphx
::
make_target
(
"ref"
)
;
auto_print
pp
{
p
,
t
.
name
()};
compile_check
(
p
,
t
);
return
p
.
eval
(
std
::
move
(
inputs
));
...
...
@@ -185,7 +185,16 @@ void run_verify::verify(const std::string& name, const migraphx::program& p) con
migraphx
::
parameter_map
m
;
for
(
auto
&&
x
:
p
.
get_parameter_shapes
())
{
m
[
x
.
first
]
=
migraphx
::
generate_argument
(
x
.
second
,
get_hash
(
x
.
first
));
if
(
x
.
second
.
dynamic
())
{
// create static shape using maximum dimensions
migraphx
::
shape
static_shape
{
x
.
second
.
type
(),
x
.
second
.
max_lens
()};
m
[
x
.
first
]
=
migraphx
::
generate_argument
(
static_shape
,
get_hash
(
x
.
first
));
}
else
{
m
[
x
.
first
]
=
migraphx
::
generate_argument
(
x
.
second
,
get_hash
(
x
.
first
));
}
}
auto
gold_f
=
detach_async
([
=
]
{
return
run_ref
(
p
,
m
);
});
...
...
test/verify/test_layernorm.cpp
View file @
30c49503
...
...
@@ -24,31 +24,30 @@
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/reduce_mean.hpp>
migraphx
::
instruction_ref
add_layernorm
(
migraphx
::
module
&
m
,
migraphx
::
instruction_ref
x
,
std
::
vector
<
size_t
>
dims
,
float
eps
=
1e-12
f
)
{
auto
scale
=
m
.
add_parameter
(
"scale"
,
migraphx
::
shape
{
m
igraphx
::
shape
::
float
_type
,
{
dims
.
back
()}});
auto
bias
=
m
.
add_parameter
(
"bias"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
dims
.
back
()}});
auto
epsilon
=
m
.
add_literal
(
eps
);
auto
exponent
=
m
.
add_literal
(
2.0
f
);
auto
mgx_type
=
x
->
get_shape
().
type
();
auto
scale
=
m
.
add_parameter
(
"scale"
,
migraphx
::
shape
{
m
gx
_type
,
{
dims
.
back
()}});
auto
bias
=
m
.
add_parameter
(
"bias"
,
migraphx
::
shape
{
mgx_type
,
{
dims
.
back
()}});
auto
epsilon
=
m
.
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
{
mgx_type
},
{
eps
}}
);
auto
exponent
=
m
.
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
{
mgx_type
},
{
2.0
f
}}
);
auto
mean
=
m
.
add_instruction
(
migraphx
::
op
::
reduce_mean
({
2
}),
x
);
auto
mean
=
m
.
add_instruction
(
migraphx
::
make_op
(
"
reduce_mean
"
,
{{
"axes"
,
{
2
}}
}),
x
);
auto
mean_mbcast
=
m
.
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
dims
}}),
mean
);
auto
sub
=
m
.
add_instruction
(
migraphx
::
make_op
(
"sub"
),
x
,
mean_mbcast
);
auto
exponent_mbcast
=
m
.
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
dims
}}),
exponent
);
auto
pow
=
m
.
add_instruction
(
migraphx
::
make_op
(
"pow"
),
sub
,
exponent_mbcast
);
auto
var
=
m
.
add_instruction
(
migraphx
::
op
::
reduce_mean
({
2
}),
pow
);
auto
var
=
m
.
add_instruction
(
migraphx
::
make_op
(
"
reduce_mean
"
,
{{
"axes"
,
{
2
}}
}),
pow
);
auto
epsilon_mbcast
=
m
.
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
{
1
,
dims
.
at
(
1
),
1
}}}),
epsilon
);
auto
add_epsilon
=
m
.
add_instruction
(
migraphx
::
make_op
(
"add"
),
var
,
epsilon_mbcast
);
...
...
@@ -90,6 +89,32 @@ struct test_layernorm2 : verify_program<test_layernorm2>
}
};
struct
test_layernorm_large
:
verify_program
<
test_layernorm_large
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
std
::
vector
<
size_t
>
dims
=
{
1
,
32
,
262144
};
auto
x
=
mm
->
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
dims
});
add_layernorm
(
*
mm
,
x
,
dims
);
return
p
;
}
};
struct
test_layernorm_fp16
:
verify_program
<
test_layernorm_fp16
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
std
::
vector
<
size_t
>
dims
=
{
1
,
24
,
64
};
auto
x
=
mm
->
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
half_type
,
dims
});
add_layernorm
(
*
mm
,
x
,
dims
);
return
p
;
}
};
struct
test_layernorm_eps
:
verify_program
<
test_layernorm_eps
>
{
migraphx
::
program
create_program
()
const
...
...
test/verify/test_reduce_op_large.cpp
View file @
30c49503
...
...
@@ -76,3 +76,29 @@ struct test_reduce_mean_2 : verify_program<test_reduce_mean_2>
return
p
;
};
};
struct
test_large_reduce_mean1
:
verify_program
<
test_large_reduce_mean1
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
256
*
256
*
16
}};
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
mm
->
add_instruction
(
migraphx
::
op
::
reduce_mean
{{
1
}},
x
);
return
p
;
};
};
struct
test_large_reduce_mean2
:
verify_program
<
test_large_reduce_mean2
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
1
,
32
,
262144
}};
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
mm
->
add_instruction
(
migraphx
::
op
::
reduce_mean
{{
2
}},
x
);
return
p
;
};
};
test/verify/test_select_module_add.cpp
0 → 100644
View file @
30c49503
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct
test_select_module_add
:
verify_program
<
test_select_module_add
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
lit_s
{
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
}}};
auto
literal_ins
=
mm
->
add_literal
(
migraphx
::
literal
{
lit_s
,
{
6
}});
// create batch submodules
auto
create_submodule
=
[
&
](
std
::
size_t
batch_size
,
const
std
::
string
&
module_name
)
{
auto
*
submod
=
p
.
create_module
(
module_name
);
migraphx
::
shape
sm_shape
{
migraphx
::
shape
::
float_type
,
{
batch_size
,
4
}};
auto
sm_input
=
submod
->
add_parameter
(
"data"
,
sm_shape
);
auto
broadcast_lit
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
),
literal_ins
,
sm_input
);
auto
add_ins0
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
sm_input
,
broadcast_lit
);
auto
add_ins1
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
add_ins0
,
broadcast_lit
);
submod
->
add_return
({
add_ins0
,
add_ins1
});
return
submod
;
};
auto
*
batch1
=
create_submodule
(
1
,
"batch_1"
);
auto
*
batch2
=
create_submodule
(
2
,
"batch_2"
);
auto
*
batch3
=
create_submodule
(
3
,
"batch_3"
);
auto
*
batch4
=
create_submodule
(
4
,
"batch_4"
);
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
4
,
4
}}};
auto
input
=
mm
->
add_parameter
(
"data"
,
s
);
std
::
vector
<
migraphx
::
shape
>
sub_shapes
=
{};
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
4
,
4
}}});
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
4
,
4
}}});
migraphx
::
shape
out_attr
=
migraphx
::
shape
{
sub_shapes
};
auto
sm_ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"select_module"
,
{{
"output_dyn_shapes"
,
migraphx
::
to_value
(
out_attr
)}}),
{
input
},
{
batch1
,
batch2
,
batch3
,
batch4
});
auto
ret0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"get_tuple_elem"
,
{{
"index"
,
0
}}),
sm_ins
);
auto
ret1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"get_tuple_elem"
,
{{
"index"
,
1
}}),
sm_ins
);
mm
->
add_return
({
ret0
,
ret1
});
return
p
;
}
};
test/verify/test_select_module_conv.cpp
0 → 100644
View file @
30c49503
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct
test_select_module_conv
:
verify_program
<
test_select_module_conv
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
// create batch submodules
auto
create_submodule
=
[
&
](
std
::
size_t
batch_size
,
const
std
::
string
&
module_name
)
{
auto
*
submod
=
p
.
create_module
(
module_name
);
migraphx
::
shape
sm_shape
{
migraphx
::
shape
::
float_type
,
{
batch_size
,
3
,
4
,
4
}};
auto
sm_input
=
submod
->
add_parameter
(
"data"
,
sm_shape
);
migraphx
::
shape
weights_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
3
,
3
}};
std
::
vector
<
float
>
weights_data
(
2
*
3
*
3
*
3
,
2.0
);
auto
weights
=
submod
->
add_literal
(
migraphx
::
literal
{
weights_shape
,
weights_data
});
auto
conv_ins
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"convolution"
,
{{
"padding"
,
{
1
,
1
}}}),
sm_input
,
weights
);
submod
->
add_return
({
conv_ins
});
return
submod
;
};
auto
*
batch1
=
create_submodule
(
1
,
"batch_1"
);
auto
*
batch2
=
create_submodule
(
2
,
"batch_2"
);
auto
*
batch3
=
create_submodule
(
3
,
"batch_3"
);
auto
*
batch4
=
create_submodule
(
4
,
"batch_4"
);
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
3
,
3
},
{
4
,
4
},
{
4
,
4
}}};
auto
input
=
mm
->
add_parameter
(
"data"
,
s
);
std
::
vector
<
migraphx
::
shape
>
sub_shapes
=
{};
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
2
,
2
},
{
4
,
4
},
{
4
,
4
}}});
migraphx
::
shape
out_attr
=
migraphx
::
shape
{
sub_shapes
};
auto
sm_ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"select_module"
,
{{
"output_dyn_shapes"
,
migraphx
::
to_value
(
out_attr
)}}),
{
input
},
{
batch1
,
batch2
,
batch3
,
batch4
});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"get_tuple_elem"
,
{{
"index"
,
0
}}),
sm_ins
);
mm
->
add_return
({
ret
});
return
p
;
}
};
test/verify/test_select_module_reduce.cpp
0 → 100644
View file @
30c49503
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct
test_select_module_reduce
:
verify_program
<
test_select_module_reduce
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
// create batch submodules
auto
create_submodule
=
[
&
](
std
::
size_t
batch_size
,
const
std
::
string
&
module_name
)
{
auto
*
submod
=
p
.
create_module
(
module_name
);
migraphx
::
shape
sm_shape
{
migraphx
::
shape
::
float_type
,
{
batch_size
,
2
,
2
}};
auto
sm_input
=
submod
->
add_parameter
(
"data"
,
sm_shape
);
auto
reduce_ins
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
{
1
}}}),
sm_input
);
auto
squeeze_ins
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
1
}}}),
reduce_ins
);
submod
->
add_return
({
squeeze_ins
});
return
submod
;
};
auto
*
batch1
=
create_submodule
(
1
,
"batch_1"
);
auto
*
batch2
=
create_submodule
(
2
,
"batch_2"
);
auto
*
batch3
=
create_submodule
(
3
,
"batch_3"
);
auto
*
batch4
=
create_submodule
(
4
,
"batch_4"
);
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
2
,
2
},
{
2
,
2
}}};
auto
input
=
mm
->
add_parameter
(
"data"
,
s
);
std
::
vector
<
migraphx
::
shape
>
sub_shapes
=
{};
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
2
,
2
}}});
migraphx
::
shape
out_attr
=
migraphx
::
shape
{
sub_shapes
};
auto
sm_ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"select_module"
,
{{
"output_dyn_shapes"
,
migraphx
::
to_value
(
out_attr
)}}),
{
input
},
{
batch1
,
batch2
,
batch3
,
batch4
});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"get_tuple_elem"
,
{{
"index"
,
0
}}),
sm_ins
);
mm
->
add_return
({
ret
});
return
p
;
}
};
tools/accuracy/accuracy_checker.py
View file @
30c49503
...
...
@@ -25,6 +25,7 @@ import argparse
import
numpy
as
np
import
migraphx
import
onnxruntime
as
ort
import
sys
def
parse_args
():
...
...
@@ -33,15 +34,13 @@ def parse_args():
'MIGraphX accuracy checker. Use to verify onnx files to ensure MIGraphX
\'
s output
\
is within tolerance of onnx runtime
\'
s expected output.'
)
req_args
=
parser
.
add_argument_group
(
title
=
'required arguments'
)
req_args
.
add_argument
(
'--onnx'
,
type
=
str
,
required
=
True
,
help
=
'path to onnx file'
)
req_args
.
add_argument
(
'--provider'
,
type
=
str
,
default
=
'CPUExecutionProvider'
,
help
=
'execution provider for onnx runtime
\
file_args
=
parser
.
add_argument_group
(
title
=
'file type arguments'
)
file_args
.
add_argument
(
'--onnx'
,
type
=
str
,
help
=
'path to onnx file'
)
file_args
.
add_argument
(
'--tf'
,
type
=
str
,
help
=
'path to tf pb file'
)
parser
.
add_argument
(
'--provider'
,
type
=
str
,
default
=
'CPUExecutionProvider'
,
help
=
'execution provider for onnx runtime
\
(default = CPUExecutionProvider)'
)
parser
.
add_argument
(
'--batch'
,
type
=
int
,
...
...
@@ -50,6 +49,9 @@ def parse_args():
parser
.
add_argument
(
'--fill1'
,
action
=
'store_true'
,
help
=
'fill all arguments with a value of 1'
)
parser
.
add_argument
(
'--fill0'
,
action
=
'store_true'
,
help
=
'fill all arguments with a value of 0'
)
parser
.
add_argument
(
'--verbose'
,
action
=
'store_true'
,
help
=
'show verbose information (for debugging)'
)
...
...
@@ -57,6 +59,12 @@ def parse_args():
type
=
float
,
default
=
1e-3
,
help
=
'accuracy tolerance (default = 1e-3)'
)
parser
.
add_argument
(
'--input-dim'
,
type
=
str
,
action
=
'append'
,
help
=
'specify input parameter dimension
\
with the following format --input_dim input_name:dim0,dim1,dim2...'
)
args
=
parser
.
parse_args
()
return
args
...
...
@@ -111,42 +119,127 @@ def get_np_datatype(in_type):
def
main
():
args
=
parse_args
()
use_onnx
=
True
if
args
.
onnx
==
None
:
use_onnx
=
False
if
not
use_onnx
and
args
.
tf
==
None
:
print
(
'Error: please specify either an onnx or tf pb file'
)
sys
.
exit
(
-
1
)
model_name
=
args
.
onnx
batch
=
args
.
batch
model
=
migraphx
.
parse_onnx
(
model_name
,
default_dim_value
=
batch
)
custom_inputs
=
args
.
input_dim
input_dims
=
{}
if
custom_inputs
!=
None
:
for
input
in
custom_inputs
:
input_dim
=
''
.
join
(
input
.
split
(
':'
)[:
-
1
])
dims
=
[
int
(
dim
)
for
dim
in
input
.
split
(
':'
)[
-
1
].
split
(
','
)]
input_dims
[
input_dim
]
=
dims
if
use_onnx
:
if
not
input_dims
:
model
=
migraphx
.
parse_onnx
(
model_name
,
default_dim_value
=
batch
)
else
:
model
=
migraphx
.
parse_onnx
(
model_name
,
default_dim_value
=
batch
,
map_input_dims
=
input_dims
)
else
:
model_name
=
args
.
tf
if
not
input_dims
:
model
=
migraphx
.
parse_tf
(
model_name
,
batch_size
=
batch
)
else
:
model
=
migraphx
.
parse_tf
(
model_name
,
batch_size
=
batch
,
map_input_dims
=
input_dims
)
if
args
.
verbose
:
print
(
model
)
model
.
compile
(
migraphx
.
get_target
(
'gpu'
)
,
offload_copy
=
False
)
model
.
compile
(
migraphx
.
get_target
(
'gpu'
))
params
=
{}
test_inputs
=
{}
for
name
,
shape
in
model
.
get_parameter_shapes
().
items
():
if
args
.
verbose
:
print
(
'Parameter {} -> {
}'
.
format
(
name
,
shape
)
)
print
(
f
'Parameter
{
name
}
->
{
shape
}
'
)
in_shape
=
shape
.
lens
()
in_type
=
shape
.
type_string
()
if
not
args
.
fill1
:
if
not
args
.
fill1
and
not
args
.
fill0
:
test_input
=
np
.
random
.
rand
(
*
(
in_shape
)).
astype
(
get_np_datatype
(
in_type
))
el
se
:
el
if
not
args
.
fill0
:
test_input
=
np
.
ones
(
in_shape
).
astype
(
get_np_datatype
(
in_type
))
else
:
test_input
=
np
.
zeros
(
in_shape
).
astype
(
get_np_datatype
(
in_type
))
test_inputs
[
name
]
=
test_input
params
[
name
]
=
migraphx
.
to_gpu
(
migraphx
.
argument
(
test_input
))
params
[
name
]
=
migraphx
.
argument
(
test_input
)
pred_migx
=
np
.
array
(
model
.
run
(
params
)[
-
1
])
pred_migx
=
np
.
array
(
migraphx
.
from_gpu
(
model
.
run
(
params
)[
-
1
]))
if
use_onnx
:
sess
=
ort
.
InferenceSession
(
model_name
,
providers
=
[
args
.
provider
])
sess
=
ort
.
InferenceSession
(
model_name
,
providers
=
[
args
.
provider
])
ort_params
=
{}
for
input
in
sess
.
get_inputs
():
ort_params
[
input
.
name
]
=
test_inputs
[
input
.
name
]
try
:
pred_fw
=
sess
.
run
(
None
,
ort_params
)[
-
1
]
except
Exception
as
e
:
if
any
(
input_dims
):
print
(
'Error: custom input dim may not be compatible with onnx runtime'
)
raise
e
else
:
import
tensorflow
as
tf
def
load_tf_graph
(
model_name
):
with
tf
.
io
.
gfile
.
GFile
(
model_name
,
'rb'
)
as
f
:
graph_def
=
tf
.
compat
.
v1
.
GraphDef
()
graph_def
.
ParseFromString
(
f
.
read
())
with
tf
.
compat
.
v1
.
Graph
().
as_default
()
as
graph
:
tf
.
graph_util
.
import_graph_def
(
graph_def
)
return
graph
graph
=
load_tf_graph
(
model_name
)
is_nhwc
=
False
graph_ops
=
[]
for
op
in
graph
.
get_operations
():
graph_ops
.
append
(
op
.
name
)
if
'Conv'
in
op
.
node_def
.
op
:
if
'NHWC'
in
op
.
get_attr
(
'data_format'
).
decode
(
'utf-8'
):
is_nhwc
=
True
graph_ops_set
=
set
(
graph_ops
)
tf_dict
=
{}
for
name
in
test_inputs
.
keys
():
# graph.get_operations() adds 'import/' to the op name
tf_name
=
f
'import/
{
name
}
'
if
tf_name
not
in
graph_ops_set
:
continue
x
=
graph
.
get_tensor_by_name
(
f
'
{
tf_name
}
:0'
)
tf_input
=
test_inputs
[
name
]
# transpose input for NHWC model
if
tf_input
.
ndim
==
4
and
is_nhwc
:
tf_dict
[
x
]
=
np
.
transpose
(
tf_input
,
(
0
,
2
,
3
,
1
))
else
:
tf_dict
[
x
]
=
tf_input
ort_params
=
{}
for
input
in
sess
.
get_inputs
():
ort_params
[
input
.
name
]
=
test_inputs
[
input
.
name
]
# assume last node in graph is output
# TODO: let user specify op name for output
y
=
graph
.
get_tensor_by_name
(
f
'
{
graph_ops
[
-
1
]
}
:0'
)
pred_ort
=
sess
.
run
(
None
,
ort_params
)[
-
1
]
with
tf
.
compat
.
v1
.
Session
(
graph
=
graph
)
as
sess
:
y_out
=
sess
.
run
(
y
,
feed_dict
=
tf_dict
)
pred_fw
=
y_out
is_correct
=
check_correctness
(
pred_
ort
,
pred_migx
,
args
.
tolerance
,
is_correct
=
check_correctness
(
pred_
fw
,
pred_migx
,
args
.
tolerance
,
args
.
tolerance
,
args
.
verbose
)
verbose_string
=
' Rerun with --verbose for detailed information.'
\
if
not
args
.
verbose
else
''
...
...
tools/api/api.cpp
View file @
30c49503
...
...
@@ -32,7 +32,6 @@
#include <migraphx/register_target.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/quantization.hpp>
#include <migraphx/ref/target.hpp>
#include <migraphx/load_save.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/register_op.hpp>
...
...
@@ -134,6 +133,11 @@ void set_offload_copy(compile_options& options, bool value) { options.offload_co
void
set_fast_math
(
compile_options
&
options
,
bool
value
)
{
options
.
fast_math
=
value
;
}
void
set_exhaustive_tune_flag
(
compile_options
&
options
,
bool
value
)
{
options
.
exhaustive_tune
=
value
;
}
void
set_file_format
(
file_options
&
options
,
const
char
*
format
)
{
options
.
format
=
format
;
}
void
set_default_dim_value
(
onnx_options
&
options
,
size_t
value
)
...
...
tools/build_and_test_onnxrt.sh
View file @
30c49503
...
...
@@ -22,9 +22,12 @@
# THE SOFTWARE.
#####################################################################################
cd
/onnxruntime
pip3
install
-r
requirements.txt
pip3
install
-r
requirements
-dev
.txt
# Add newer cmake to the path
export
PATH
=
"/opt/cmake/bin:
$PATH
"
export
CXXFLAGS
=
"-D__HIP_PLATFORM_HCC__=1 -w"
./build.sh
--config
Release
--update
--build
--parallel
--cmake_extra_defines
ONNXRUNTIME_VERSION
=
$(
cat
./VERSION_NUMBER
)
--test
--use_migraphx
# pip3 install /code/onnxruntime/build/Linux/Release/dist/*.whl
export
CXXFLAGS
=
"-D__HIP_PLATFORM_AMD__=1 -w"
./build.sh
--config
Release
--cmake_extra_defines
CMAKE_HIP_COMPILER
=
/opt/rocm/llvm/bin/clang++
--update
--build
--parallel
--cmake_extra_defines
ONNXRUNTIME_VERSION
=
$(
cat
./VERSION_NUMBER
)
--skip_tests
--rocm_home
/opt/rocm
--use_migraphx
--migraphx_home
/opt/rocm
--rocm_version
=
`
cat
/opt/rocm/.info/version-dev
`
cd
build/Linux/Release
#Add test launcher for onnxrt tests
../../../tools/ci_build/github/pai/migraphx_test_launcher.sh
tools/download_models.sh
View file @
30c49503
...
...
@@ -26,10 +26,12 @@
if
[
-z
"
$ONNX_HOME
"
]
then
ONNX_HOME
=
$HOME
# The onnx library uses ONNX_HOME, by default if it doesn't exist
# the path of " ~/.onnx " is used
ONNX_HOME
=
$HOME
/.onnx
fi
model_dir
=
$ONNX_HOME
/
.onnx/
models
model_dir
=
$ONNX_HOME
/models
tmp_dir
=
$ONNX_HOME
/tmp/
mkdir
-p
$model_dir
mkdir
-p
$tmp_dir
...
...
@@ -42,7 +44,6 @@ models="bvlc_alexnet \
for
name
in
$models
do
curl https://
s3.amazonaws.com/download.onnx/models/opset_9
/
$name
.tar.gz
--output
$tmp_dir
/
$name
.tar.gz
curl https://
download.onnxruntime.ai/onnx/models
/
$name
.tar.gz
--output
$tmp_dir
/
$name
.tar.gz
tar
-xzvf
$tmp_dir
/
$name
.tar.gz
--directory
$model_dir
&&
rm
$tmp_dir
/
$name
.tar.gz
done
tools/include/context.hpp
View file @
30c49503
...
...
@@ -66,6 +66,7 @@ any_ptr get_queue_context(T&)
{
return
{};
}
template
<
class
T
>
void
wait_for_context
(
T
&
,
any_ptr
)
{
...
...
@@ -87,6 +88,7 @@ void finish_on_context(T&, any_ptr){}
{
v
=
ctx
.
to_value
();
}
inline
void
migraphx_from_value
(
const
value
&
v
,
context
&
ctx
)
{
ctx
.
from_value
(
v
);
}
#endif
...
...
tools/include/operation.hpp
View file @
30c49503
...
...
@@ -140,6 +140,8 @@ template <class T>
auto
compute_shape_op
(
rank
<
2
>
,
const
T
&
x
,
const
std
::
vector
<
shape
>&
inputs
)
->
decltype
(
x
.
normalize_compute_shape
(
inputs
))
{
if
(
inputs
.
empty
())
MIGRAPHX_THROW
(
"At least one input is required for "
+
x
.
name
());
dependent_type
<
operation
,
T
>
y
=
x
;
normalize_attributes
(
y
,
inputs
[
0
].
max_lens
());
return
any_cast
<
T
>
(
y
).
normalize_compute_shape
(
inputs
);
...
...
Prev
1
…
6
7
8
9
10
11
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment