Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
4ea39116
Commit
4ea39116
authored
Nov 10, 2023
by
Khalique Ahmed
Browse files
manual merge
parents
20128cae
d8011adf
Changes
315
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
1740 additions
and
122 deletions
+1740
-122
test/onnx/tril_row_one_test.onnx
test/onnx/tril_row_one_test.onnx
+0
-0
test/onnx/tril_test.onnx
test/onnx/tril_test.onnx
+0
-0
test/onnx/triu_batch_diff_k_test.onnx
test/onnx/triu_batch_diff_k_test.onnx
+15
-0
test/onnx/triu_neg_k_test.onnx
test/onnx/triu_neg_k_test.onnx
+13
-0
test/onnx/triu_out_k_test.onnx
test/onnx/triu_out_k_test.onnx
+13
-0
test/onnx/triu_row_one_test.onnx
test/onnx/triu_row_one_test.onnx
+13
-0
test/onnx/triu_test.onnx
test/onnx/triu_test.onnx
+11
-0
test/onnx/upsample_ver7_test.onnx
test/onnx/upsample_ver7_test.onnx
+0
-0
test/onnx/verify_onnx.cpp
test/onnx/verify_onnx.cpp
+859
-10
test/op_shape_test.cpp
test/op_shape_test.cpp
+302
-11
test/py/CMakeLists.txt
test/py/CMakeLists.txt
+71
-19
test/py/onnx_backend_test.py
test/py/onnx_backend_test.py
+0
-26
test/py/requirements-onnx.txt
test/py/requirements-onnx.txt
+6
-20
test/py/test_gpu.py
test/py/test_gpu.py
+21
-17
test/py/test_numpy.py
test/py/test_numpy.py
+3
-6
test/ref/allocate.cpp
test/ref/allocate.cpp
+19
-1
test/ref/argmax.cpp
test/ref/argmax.cpp
+34
-0
test/ref/argmin.cpp
test/ref/argmin.cpp
+34
-0
test/ref/isinf.cpp
test/ref/isinf.cpp
+105
-0
test/ref/multinomial.cpp
test/ref/multinomial.cpp
+221
-12
No files found.
test/onnx/tril_row_one_test.onnx
0 → 100644
View file @
4ea39116
File added
test/onnx/tril
u_lower
_test.onnx
→
test/onnx/tril_test.onnx
View file @
4ea39116
No preview for this file type
test/onnx/tri
l
u_batch_diff_k_test.onnx
→
test/onnx/triu_batch_diff_k_test.onnx
View file @
4ea39116
tri
l
u_batch_diff_k_test:
i
triu_batch_diff_k_test:
h
x
ky"Trilu
tri
l
u_batch_diff_k_test*
ky"Trilu
triu_batch_diff_k_test*
:BkZ
x
...
...
@@ -12,4 +12,4 @@
B
\ No newline at end of file
B
\ No newline at end of file
test/onnx/tri
l
u_neg_k_test.onnx
→
test/onnx/triu_neg_k_test.onnx
View file @
4ea39116
tri
l
u_neg_k_test:
c
triu_neg_k_test:
b
x
ky"Trilu
tri
l
u_neg_k_test*:
ky"Trilu
triu_neg_k_test*:
BkZ
x
...
...
@@ -10,4 +10,4 @@
y
B
\ No newline at end of file
B
\ No newline at end of file
test/onnx/tri
l
u_out_k_test.onnx
→
test/onnx/triu_out_k_test.onnx
View file @
4ea39116
tri
l
u_out_k_test:
Z
triu_out_k_test:
Y
x
ky"Trilu
tri
l
u_out_k_test*
ky"Trilu
triu_out_k_test*
:BkZ
x
...
...
@@ -10,4 +10,4 @@
y
B
\ No newline at end of file
B
\ No newline at end of file
test/onnx/tri
l
u_row_one_test.onnx
→
test/onnx/triu_row_one_test.onnx
View file @
4ea39116
tri
l
u_row_one_test:
\
triu_row_one_test:
[
x
ky"Trilu
tri
l
u_row_one_test*
ky"Trilu
triu_row_one_test*
:BkZ
x
...
...
@@ -10,4 +10,4 @@
y
B
\ No newline at end of file
B
\ No newline at end of file
test/onnx/tri
l
u_test.onnx
→
test/onnx/triu_test.onnx
View file @
4ea39116
trilu_test:E
triu_test:D
xy"Trilu
trilu_testZ
xy"Trilu triu_testZ
x
...
...
@@ -10,4 +8,4 @@ trilu_testZ
y
B
\ No newline at end of file
B
\ No newline at end of file
test/onnx/upsample_ver7_test.onnx
0 → 100644
View file @
4ea39116
File added
test/onnx/verify_onnx.cpp
View file @
4ea39116
...
...
@@ -538,6 +538,70 @@ TEST_CASE(gemm_half_test)
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
template
<
typename
T
=
float
>
std
::
vector
<
T
>
norm_test
(
const
std
::
vector
<
size_t
>&
x_dims
,
std
::
vector
<
T
>&
scale
,
std
::
vector
<
T
>&
bias
,
const
std
::
string
&
onnx_file
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
onnx_file
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
migraphx
::
shape
s_x
{
migraphx
::
shape
::
get_type
<
T
>
{},
x_dims
};
migraphx
::
shape
s_s
{
migraphx
::
shape
::
get_type
<
T
>
{},
{
scale
.
size
()}};
migraphx
::
shape
s_b
{
migraphx
::
shape
::
get_type
<
T
>
{},
{
scale
.
size
()}};
std
::
vector
<
T
>
x
(
s_x
.
elements
());
std
::
iota
(
std
::
begin
(
x
),
std
::
end
(
x
),
1
);
migraphx
::
parameter_map
pp
;
pp
[
"x"
]
=
migraphx
::
argument
(
s_x
,
x
.
data
());
pp
[
"scale"
]
=
migraphx
::
argument
(
s_s
,
scale
.
data
());
pp
[
"bias"
]
=
migraphx
::
argument
(
s_b
,
bias
.
data
());
auto
result
=
p
.
eval
(
pp
).
back
();
std
::
vector
<
T
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
return
result_vector
;
}
TEST_CASE
(
group_norm_test
)
{
std
::
vector
<
float
>
scale
{
1.2
,
0.8
};
std
::
vector
<
float
>
bias
{
0.5
,
0.2
};
std
::
vector
<
float
>
result_vector
=
norm_test
<
float
>
({
1
,
4
,
2
},
scale
,
bias
,
"group_norm_3d_test.onnx"
);
std
::
vector
<
float
>
gold
=
{
-
1.10996256
,
-
0.0366542
,
1.0366542
,
2.10996256
,
-
0.87330837
,
-
0.15776947
,
0.55776947
,
1.27330837
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
group_norm_half_test
)
{
using
migraphx
::
half
;
std
::
vector
<
half
>
scale
{
half
{
1.2
},
half
{
0.8
}};
std
::
vector
<
half
>
bias
{
half
{
0.5
},
half
{
0.2
}};
std
::
vector
<
half
>
result_vector
=
norm_test
<
half
>
({
1
,
4
,
2
},
scale
,
bias
,
"group_norm_3d_half_test.onnx"
);
std
::
vector
<
half
>
gold
=
{
half
{
-
1.10996256
},
half
{
-
0.0366542
},
half
{
1.0366542
},
half
{
2.10996256
},
half
{
-
0.87330837
},
half
{
-
0.15776947
},
half
{
0.55776947
},
half
{
1.27330837
}};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
greaterorequal_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"greaterorequal_test.onnx"
);
...
...
@@ -950,6 +1014,130 @@ TEST_CASE(instance_norm_3d_test)
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
isinf_half_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"isinf_half_test.onnx"
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
migraphx
::
shape
s
{
migraphx
::
shape
::
half_type
,
{
2
,
3
}};
migraphx
::
parameter_map
pp
;
migraphx
::
half
nan
=
std
::
numeric_limits
<
migraphx
::
half
>::
quiet_NaN
();
migraphx
::
half
infinity
=
std
::
numeric_limits
<
migraphx
::
half
>::
infinity
();
migraphx
::
half
max
=
std
::
numeric_limits
<
migraphx
::
half
>::
max
();
migraphx
::
half
min
=
std
::
numeric_limits
<
migraphx
::
half
>::
min
();
migraphx
::
half
val
=
migraphx
::
half
(
3.6
);
std
::
vector
<
migraphx
::
half
>
data
=
{
-
infinity
,
nan
,
min
,
val
,
max
,
infinity
};
pp
[
"t1"
]
=
migraphx
::
argument
(
s
,
data
.
data
());
auto
result
=
p
.
eval
(
pp
).
back
();
std
::
vector
<
float
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
=
{
1
,
0
,
0
,
0
,
0
,
1
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
isinf_neg_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"isinf_neg_test.onnx"
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
migraphx
::
parameter_map
pp
;
float
nan
=
std
::
numeric_limits
<
float
>::
quiet_NaN
();
float
infinity
=
std
::
numeric_limits
<
float
>::
infinity
();
float
max
=
std
::
numeric_limits
<
float
>::
max
();
float
min
=
std
::
numeric_limits
<
float
>::
min
();
std
::
vector
<
float
>
data
=
{
-
infinity
,
nan
,
min
,
3.6
,
max
,
infinity
};
pp
[
"t1"
]
=
migraphx
::
argument
(
s
,
data
.
data
());
auto
result
=
p
.
eval
(
pp
).
back
();
std
::
vector
<
float
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
=
{
1
,
0
,
0
,
0
,
0
,
0
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
isinf_double_pos_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"isinf_double_pos_test.onnx"
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
migraphx
::
shape
s
{
migraphx
::
shape
::
double_type
,
{
2
,
3
}};
migraphx
::
parameter_map
pp
;
double
nan
=
std
::
numeric_limits
<
double
>::
quiet_NaN
();
double
infinity
=
std
::
numeric_limits
<
double
>::
infinity
();
double
max
=
std
::
numeric_limits
<
double
>::
max
();
double
min
=
std
::
numeric_limits
<
double
>::
min
();
std
::
vector
<
double
>
data
=
{
-
infinity
,
nan
,
min
,
3.6
,
max
,
infinity
};
pp
[
"t1"
]
=
migraphx
::
argument
(
s
,
data
.
data
());
auto
result
=
p
.
eval
(
pp
).
back
();
std
::
vector
<
float
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
=
{
0
,
0
,
0
,
0
,
0
,
1
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
isinf_no_detect_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"isinf_no_detect_test.onnx"
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
migraphx
::
parameter_map
pp
;
float
nan
=
std
::
numeric_limits
<
float
>::
quiet_NaN
();
float
infinity
=
std
::
numeric_limits
<
float
>::
infinity
();
float
max
=
std
::
numeric_limits
<
float
>::
max
();
float
min
=
std
::
numeric_limits
<
float
>::
min
();
std
::
vector
<
double
>
data
=
{
-
infinity
,
nan
,
min
,
3.6
,
max
,
infinity
};
pp
[
"t1"
]
=
migraphx
::
argument
(
s
,
data
.
data
());
auto
result
=
p
.
eval
(
pp
).
back
();
std
::
vector
<
float
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
=
{
0
,
0
,
0
,
0
,
0
,
0
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
layer_norm_test
)
{
std
::
vector
<
float
>
scale
{
1.2
,
0.8
};
std
::
vector
<
float
>
bias
{
0.5
,
0.2
};
std
::
vector
<
float
>
result_vector
=
norm_test
<
float
>
({
1
,
4
,
2
},
scale
,
bias
,
"layer_norm_3d_test.onnx"
);
std
::
vector
<
float
>
gold
=
{
-
0.69997597
,
0.99998398
,
-
0.69997597
,
0.99998398
,
-
0.69997597
,
0.99998398
,
-
0.69997597
,
0.99998398
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
layer_norm_half_test
)
{
using
migraphx
::
half
;
std
::
vector
<
half
>
scale
{
half
{
1.2
},
half
{
0.8
}};
std
::
vector
<
half
>
bias
{
half
{
0.5
},
half
{
0.2
}};
std
::
vector
<
half
>
result_vector
=
norm_test
<
half
>
({
1
,
4
,
2
},
scale
,
bias
,
"layer_norm_3d_half_test.onnx"
);
std
::
vector
<
half
>
gold
=
{
half
{
-
0.69997597
},
half
{
0.99998398
},
half
{
-
0.69997597
},
half
{
0.99998398
},
half
{
-
0.69997597
},
half
{
0.99998398
},
half
{
-
0.69997597
},
half
{
0.99998398
}};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
lessorequal_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"lessorequal_test.onnx"
);
...
...
@@ -1112,6 +1300,115 @@ TEST_CASE(mean_integral_test)
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
template
<
typename
T
=
float
>
std
::
vector
<
T
>
mvn_test
(
std
::
vector
<
size_t
>
data_lens
,
const
std
::
string
&
test_file
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
test_file
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
migraphx
::
shape
data_shape
(
migraphx
::
shape
::
get_type
<
T
>
{},
std
::
move
(
data_lens
));
std
::
vector
<
T
>
data
(
data_shape
.
elements
());
std
::
iota
(
begin
(
data
),
end
(
data
),
0
);
migraphx
::
parameter_map
pm
;
pm
[
"data"
]
=
migraphx
::
argument
(
data_shape
,
data
.
data
());
auto
result
=
p
.
eval
(
pm
).
back
();
std
::
vector
<
T
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
return
result_vector
;
}
TEST_CASE
(
mvn_default_axes_test
)
{
auto
result
=
mvn_test
({
2
,
2
,
2
,
2
},
"mvn_default_axes_test.onnx"
);
std
::
vector
<
float
>
gold
{
-
1.32424438
,
-
1.08347268
,
-
0.84270097
,
-
0.60192927
,
-
1.32424438
,
-
1.08347268
,
-
0.84270097
,
-
0.60192927
,
0.60192927
,
0.84270097
,
1.08347268
,
1.32424438
,
0.60192927
,
0.84270097
,
1.08347268
,
1.32424438
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result
,
gold
));
}
TEST_CASE
(
mvn_default_axes_fp16_test
)
{
using
migraphx
::
half
;
auto
result
=
mvn_test
<
half
>
({
2
,
2
,
2
,
2
},
"mvn_default_axes_fp16_test.onnx"
);
std
::
vector
<
half
>
gold
{
half
{
-
1.324
},
half
{
-
1.084
},
half
{
-
0.843
},
half
{
-
0.602
},
half
{
-
1.324
},
half
{
-
1.084
},
half
{
-
0.843
},
half
{
-
0.602
},
half
{
0.602
},
half
{
0.843
},
half
{
1.084
},
half
{
1.324
},
half
{
0.602
},
half
{
0.843
},
half
{
1.084
},
half
{
1.324
}};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result
,
gold
));
}
TEST_CASE
(
mvn_rank_2_test
)
{
auto
result
=
mvn_test
({
2
,
2
},
"mvn_rank_2_test.onnx"
);
std
::
vector
<
float
>
gold
{
-
1
,
1
,
-
1
,
1
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result
,
gold
));
}
TEST_CASE
(
mvn_rank_2_fp16_test
)
{
using
migraphx
::
half
;
auto
result
=
mvn_test
<
migraphx
::
half
>
({
2
,
2
},
"mvn_rank_2_fp16_test.onnx"
);
std
::
vector
<
migraphx
::
half
>
gold
{
half
{
-
1
},
half
{
1
},
half
{
-
1
},
half
{
1
}};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result
,
gold
));
}
TEST_CASE
(
mvn_rank_3_test
)
{
auto
result
=
mvn_test
({
2
,
2
,
2
},
"mvn_rank_3_test.onnx"
);
std
::
vector
<
float
>
gold
{
-
1.34164079
,
-
1.34164079
,
-
0.4472136
,
-
0.4472136
,
0.4472136
,
0.4472136
,
1.34164079
,
1.34164079
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result
,
gold
));
}
TEST_CASE
(
mvn_rank_3_fp16_test
)
{
using
migraphx
::
half
;
auto
result
=
mvn_test
<
half
>
({
2
,
2
,
2
},
"mvn_rank_3_fp16_test.onnx"
);
std
::
vector
<
half
>
gold
{
half
{
-
1.342
},
half
{
-
1.342
},
half
{
-
0.4473
},
half
{
-
0.4473
},
half
{
0.4473
},
half
{
0.4473
},
half
{
1.342
},
half
{
1.342
}};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result
,
gold
));
}
TEST_CASE
(
mod_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"mod_test.onnx"
);
...
...
@@ -1226,6 +1523,77 @@ TEST_CASE(mod_test_fmod_different_types)
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
multinomial_dyn_test
)
{
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
};
auto
p
=
migraphx
::
parse_onnx
(
"multinomial_dyn_test.onnx"
,
options
);
const
size_t
batch_size
(
2
);
const
size_t
categories
(
5
);
const
size_t
sample_size
(
100000
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
// Distribution function (2 distributions of 5 categories each)
std
::
vector
<
int
>
dist
{
15
,
25
,
15
,
25
,
20
,
20
,
20
,
10
,
25
,
25
};
EXPECT
(
dist
.
size
()
==
categories
*
batch_size
);
std
::
vector
<
float
>
data
(
categories
*
batch_size
);
std
::
transform
(
dist
.
begin
(),
dist
.
end
(),
data
.
begin
(),
[
&
](
auto
d
)
{
return
log
(
d
);
});
// Shape of the probability distribution, which also defines the number of categories
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
batch_size
,
categories
}};
migraphx
::
parameter_map
pp
;
pp
[
"input"
]
=
migraphx
::
argument
(
s
,
data
.
data
());
auto
result
=
p
.
eval
(
pp
).
back
();
std
::
vector
<
int32_t
>
result_vec
(
batch_size
*
sample_size
);
result
.
visit
([
&
](
auto
output
)
{
result_vec
.
assign
(
output
.
begin
(),
output
.
end
());
});
// Make a categorical histogram of output
// for first result in batch
std
::
vector
<
int
>
res_dist
(
categories
,
0
);
size_t
r
=
0
;
for
(
r
=
0
;
r
<
result_vec
.
size
()
/
2
;
r
++
)
res_dist
[
result_vec
[
r
]]
++
;
// normalizing factors for original and measured distributions
auto
dist_sum
=
std
::
accumulate
(
dist
.
begin
(),
dist
.
begin
()
+
5
,
0
);
auto
res_dist_sum
=
std
::
accumulate
(
res_dist
.
begin
(),
res_dist
.
end
(),
0
);
// Values approximate the distribution in dist
std
::
vector
<
float
>
norm
(
5
);
std
::
vector
<
float
>
res_norm
(
5
);
std
::
transform
(
dist
.
begin
(),
dist
.
begin
()
+
5
,
norm
.
begin
(),
[
&
](
auto
n
)
{
return
static_cast
<
double
>
(
n
)
/
dist_sum
;
});
std
::
transform
(
res_dist
.
begin
(),
res_dist
.
end
(),
res_norm
.
begin
(),
[
&
](
auto
n
)
{
return
static_cast
<
double
>
(
n
)
/
res_dist_sum
;
});
EXPECT
(
migraphx
::
verify
::
verify_range_with_tolerance
(
norm
,
migraphx
::
verify
::
expected
{
res_norm
},
migraphx
::
verify
::
tolerance
{
0.01
}));
// Make a categorical histogram of output
// for second result in batch
std
::
fill
(
res_dist
.
begin
(),
res_dist
.
end
(),
0
);
for
(;
r
<
result_vec
.
size
();
r
++
)
res_dist
[
result_vec
[
r
]]
++
;
dist_sum
=
std
::
accumulate
(
dist
.
begin
()
+
5
,
dist
.
end
(),
0
);
res_dist_sum
=
std
::
accumulate
(
res_dist
.
begin
(),
res_dist
.
end
(),
0
);
std
::
transform
(
dist
.
begin
()
+
5
,
dist
.
end
(),
norm
.
begin
(),
[
&
](
auto
n
)
{
return
static_cast
<
double
>
(
n
)
/
dist_sum
;
});
std
::
transform
(
res_dist
.
begin
(),
res_dist
.
end
(),
res_norm
.
begin
(),
[
&
](
auto
n
)
{
return
static_cast
<
double
>
(
n
)
/
res_dist_sum
;
});
EXPECT
(
migraphx
::
verify
::
verify_range_with_tolerance
(
res_norm
,
migraphx
::
verify
::
expected
{
norm
},
migraphx
::
verify
::
tolerance
{
0.01
}));
}
TEST_CASE
(
nonzero_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"nonzero_dynamic_test.onnx"
);
...
...
@@ -1245,6 +1613,288 @@ TEST_CASE(nonzero_test)
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
qlinearadd_test
)
{
// github.com/microsoft/onnxruntime/blob/main/docs/ContribOperators.md#com.microsoft.QLinearAdd
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"qlinearadd_test.onnx"
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
migraphx
::
shape
a
{
migraphx
::
shape
::
uint8_type
,
{
64
}};
std
::
vector
<
uint8_t
>
data_a
=
{
0
,
2
,
4
,
6
,
8
,
10
,
12
,
14
,
16
,
18
,
20
,
22
,
24
,
26
,
28
,
30
,
32
,
34
,
36
,
38
,
40
,
42
,
44
,
46
,
48
,
50
,
52
,
54
,
56
,
58
,
60
,
62
,
64
,
66
,
68
,
70
,
72
,
74
,
76
,
78
,
80
,
82
,
84
,
86
,
88
,
90
,
92
,
94
,
96
,
98
,
100
,
102
,
104
,
106
,
108
,
110
,
112
,
114
,
116
,
118
,
120
,
122
,
124
,
126
};
migraphx
::
shape
b
{
migraphx
::
shape
::
uint8_type
,
{
64
}};
std
::
vector
<
uint8_t
>
data_b
=
{
128
,
126
,
124
,
122
,
120
,
118
,
116
,
114
,
112
,
110
,
108
,
106
,
104
,
102
,
100
,
98
,
96
,
94
,
92
,
90
,
88
,
86
,
84
,
82
,
80
,
78
,
76
,
74
,
72
,
70
,
68
,
66
,
64
,
62
,
60
,
58
,
56
,
54
,
52
,
50
,
48
,
46
,
44
,
42
,
40
,
38
,
36
,
34
,
32
,
30
,
28
,
26
,
24
,
22
,
20
,
18
,
16
,
14
,
12
,
10
,
8
,
6
,
4
,
2
};
migraphx
::
parameter_map
pp
;
pp
[
"A"
]
=
migraphx
::
argument
(
a
,
data_a
.
data
());
pp
[
"B"
]
=
migraphx
::
argument
(
b
,
data_b
.
data
());
auto
result
=
p
.
eval
(
pp
).
back
();
std
::
vector
<
uint8_t
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
uint8_t
>
gold
=
{
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
,
64
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
qlinearadd_bcast_test
)
{
// github.com/microsoft/onnxruntime/blob/main/docs/ContribOperators.md#com.microsoft.QLinearAdd
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"qlinearadd_bcast_test.onnx"
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
migraphx
::
shape
a
{
migraphx
::
shape
::
int8_type
,
{
64
}};
std
::
vector
<
int8_t
>
data_a
=
{
-
64
,
-
62
,
-
60
,
-
58
,
-
56
,
-
54
,
-
52
,
-
50
,
-
48
,
-
46
,
-
44
,
-
42
,
-
40
,
-
38
,
-
36
,
-
34
,
-
32
,
-
30
,
-
28
,
-
26
,
-
24
,
-
22
,
-
20
,
-
18
,
-
16
,
-
14
,
-
12
,
-
10
,
-
8
,
-
6
,
-
4
,
-
2
,
0
,
2
,
4
,
6
,
8
,
10
,
12
,
14
,
16
,
18
,
20
,
22
,
24
,
26
,
28
,
30
,
32
,
34
,
36
,
38
,
40
,
42
,
44
,
46
,
48
,
50
,
52
,
54
,
56
,
58
,
60
,
62
};
migraphx
::
shape
b
{
migraphx
::
shape
::
int8_type
,
{
1
,
1
,
64
}};
std
::
vector
<
int8_t
>
data_b
=
{
96
,
94
,
92
,
90
,
88
,
86
,
84
,
82
,
80
,
78
,
76
,
74
,
72
,
70
,
68
,
66
,
64
,
62
,
60
,
58
,
56
,
54
,
52
,
50
,
48
,
46
,
44
,
42
,
40
,
38
,
36
,
34
,
32
,
30
,
28
,
26
,
24
,
22
,
20
,
18
,
16
,
14
,
12
,
10
,
8
,
6
,
4
,
2
,
0
,
-
2
,
-
4
,
-
6
,
-
8
,
-
10
,
-
12
,
-
14
,
-
16
,
-
18
,
-
20
,
-
22
,
-
24
,
-
26
,
-
28
,
-
30
};
migraphx
::
parameter_map
pp
;
pp
[
"A"
]
=
migraphx
::
argument
(
a
,
data_a
.
data
());
pp
[
"B"
]
=
migraphx
::
argument
(
b
,
data_b
.
data
());
auto
result
=
p
.
eval
(
pp
).
back
();
std
::
vector
<
int8_t
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
int8_t
>
gold
=
{
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
,
-
64
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
qlinearconv_test
)
{
// https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__QLinearConv.html
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"qlinearconv_test.onnx"
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
migraphx
::
shape
sx
{
migraphx
::
shape
::
uint8_type
,
{
1
,
1
,
7
,
7
}};
std
::
vector
<
uint8_t
>
x_data
=
{
255
,
174
,
162
,
25
,
203
,
168
,
58
,
15
,
59
,
237
,
95
,
129
,
0
,
64
,
56
,
242
,
153
,
221
,
168
,
12
,
166
,
232
,
178
,
186
,
195
,
237
,
162
,
237
,
188
,
39
,
124
,
77
,
80
,
102
,
43
,
127
,
230
,
21
,
83
,
41
,
40
,
134
,
255
,
154
,
92
,
141
,
42
,
148
,
247
};
migraphx
::
parameter_map
pp
;
pp
[
"X"
]
=
migraphx
::
argument
(
sx
,
x_data
.
data
());
auto
result
=
p
.
eval
(
pp
).
back
();
std
::
vector
<
uint8_t
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
uint8_t
>
gold
=
{
0
,
81
,
93
,
230
,
52
,
87
,
197
,
240
,
196
,
18
,
160
,
126
,
255
,
191
,
199
,
13
,
102
,
34
,
87
,
243
,
89
,
23
,
77
,
69
,
60
,
18
,
93
,
18
,
67
,
216
,
131
,
178
,
175
,
153
,
212
,
128
,
25
,
234
,
172
,
214
,
215
,
121
,
0
,
101
,
163
,
114
,
213
,
107
,
8
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
qlinearconv_pad_0_test
)
{
// https:xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__Conv.html
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"qlinearconv_pad_0_test.onnx"
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
migraphx
::
shape
sx
{
migraphx
::
shape
::
uint8_type
,
{
1
,
1
,
5
,
5
}};
std
::
vector
<
uint8_t
>
x_data
=
{
0
,
11
,
21
,
32
,
42
,
53
,
64
,
74
,
85
,
96
,
106
,
117
,
128
,
138
,
149
,
159
,
170
,
181
,
191
,
202
,
212
,
223
,
234
,
244
,
255
};
migraphx
::
parameter_map
pp
;
pp
[
"X"
]
=
migraphx
::
argument
(
sx
,
x_data
.
data
());
auto
result
=
p
.
eval
(
pp
).
back
();
std
::
vector
<
int8_t
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
// # (1, 1, 3, 3) output tensor
std
::
vector
<
int8_t
>
gold
=
{
-
43
,
-
29
,
-
15
,
28
,
42
,
56
,
99
,
113
,
127
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
qlinearconv_pad_1_test
)
{
// https:xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__Conv.html
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"qlinearconv_pad_1_test.onnx"
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
migraphx
::
shape
sx
{
migraphx
::
shape
::
uint8_type
,
{
1
,
1
,
5
,
5
}};
std
::
vector
<
uint8_t
>
x_data
=
{
0
,
11
,
21
,
32
,
42
,
53
,
64
,
74
,
85
,
96
,
106
,
117
,
128
,
138
,
149
,
159
,
170
,
181
,
191
,
202
,
212
,
223
,
234
,
244
,
255
};
migraphx
::
parameter_map
pp
;
pp
[
"X"
]
=
migraphx
::
argument
(
sx
,
x_data
.
data
());
auto
result
=
p
.
eval
(
pp
).
back
();
std
::
vector
<
uint8_t
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
// # (1, 1, 5, 5) output tensor
std
::
vector
<
uint8_t
>
gold
=
{
19
,
33
,
43
,
52
,
38
,
52
,
85
,
99
,
113
,
80
,
99
,
156
,
170
,
184
,
128
,
146
,
227
,
241
,
255
,
175
,
113
,
175
,
184
,
194
,
132
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
qlinearconv_scale_1D_test
)
{
// https:xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__Conv.html
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"qlinearconv_scale_1D_test.onnx"
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
migraphx
::
shape
sx
{
migraphx
::
shape
::
uint8_type
,
{
1
,
1
,
5
,
5
}};
std
::
vector
<
uint8_t
>
x_data
=
{
0
,
11
,
21
,
32
,
42
,
53
,
64
,
74
,
85
,
96
,
106
,
117
,
128
,
138
,
149
,
159
,
170
,
181
,
191
,
202
,
212
,
223
,
234
,
244
,
255
};
migraphx
::
parameter_map
pp
;
pp
[
"X"
]
=
migraphx
::
argument
(
sx
,
x_data
.
data
());
auto
result
=
p
.
eval
(
pp
).
back
();
std
::
vector
<
int8_t
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
// # (1, 2, 3, 3) output tensor
std
::
vector
<
int8_t
>
gold
=
{
-
43
,
-
29
,
-
15
,
28
,
42
,
56
,
99
,
113
,
127
,
-
43
,
-
29
,
-
15
,
28
,
42
,
56
,
99
,
113
,
127
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
qlinearglobalavgpool_test
)
{
// github.com/microsoft/onnxruntime/blob/main/docs/ContribOperators.md
// #com.microsoft.QLinearGlobalAveragePool
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"qlinearglobalavgpool_test.onnx"
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
migraphx
::
shape
sh_x
{
migraphx
::
shape
::
uint8_type
,
{
1
,
3
,
4
,
4
}};
std
::
vector
<
uint8_t
>
data_x
=
{
160
,
156
,
152
,
148
,
144
,
140
,
136
,
132
,
124
,
120
,
116
,
112
,
108
,
104
,
100
,
96
,
64
,
72
,
80
,
88
,
96
,
104
,
112
,
120
,
136
,
144
,
152
,
160
,
168
,
176
,
184
,
192
,
120
,
121
,
122
,
123
,
124
,
125
,
126
,
127
,
129
,
130
,
131
,
132
,
133
,
134
,
135
,
136
};
migraphx
::
parameter_map
pp
;
pp
[
"X"
]
=
migraphx
::
argument
(
sh_x
,
data_x
.
data
());
auto
result
=
p
.
eval
(
pp
).
back
();
std
::
vector
<
uint8_t
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
uint8_t
>
gold
=
{
64
,
64
,
64
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
qlinearmatmul_1D_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"qlinearmatmul_1D_test.onnx"
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
migraphx
::
shape
a
{
migraphx
::
shape
::
uint8_type
,
{
8
}};
std
::
vector
<
uint8_t
>
data_a
=
{
2
,
4
,
6
,
8
,
10
,
12
,
14
,
16
};
migraphx
::
shape
b
{
migraphx
::
shape
::
uint8_type
,
{
8
}};
std
::
vector
<
uint8_t
>
data_b
=
{
126
,
130
,
124
,
132
,
122
,
134
,
120
,
136
};
migraphx
::
parameter_map
pp
;
pp
[
"A"
]
=
migraphx
::
argument
(
a
,
data_a
.
data
());
pp
[
"B"
]
=
migraphx
::
argument
(
b
,
data_b
.
data
());
auto
result
=
p
.
eval
(
pp
).
back
();
std
::
vector
<
uint8_t
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
uint8_t
>
gold
=
{
66
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
qlinearmatmul_2D_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"qlinearmatmul_2D_test.onnx"
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
migraphx
::
shape
a
{
migraphx
::
shape
::
uint8_type
,
{
1
,
8
}};
std
::
vector
<
uint8_t
>
data_a
=
{
2
,
4
,
6
,
8
,
10
,
12
,
14
,
16
};
migraphx
::
shape
b
{
migraphx
::
shape
::
uint8_type
,
{
8
,
1
}};
std
::
vector
<
uint8_t
>
data_b
=
{
126
,
130
,
124
,
132
,
122
,
134
,
120
,
136
};
migraphx
::
parameter_map
pp
;
pp
[
"A"
]
=
migraphx
::
argument
(
a
,
data_a
.
data
());
pp
[
"B"
]
=
migraphx
::
argument
(
b
,
data_b
.
data
());
auto
result
=
p
.
eval
(
pp
).
back
();
std
::
vector
<
uint8_t
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
uint8_t
>
gold
=
{
66
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
qlinearmatmul_3D_test
)
{
// https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__QLinearMatMul.html
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"qlinearmatmul_3D_test.onnx"
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
migraphx
::
shape
a
{
migraphx
::
shape
::
uint8_type
,
{
2
,
2
,
4
}};
std
::
vector
<
uint8_t
>
data_a
=
{
208
,
236
,
0
,
238
,
3
,
214
,
255
,
29
,
208
,
236
,
0
,
238
,
3
,
214
,
255
,
29
};
migraphx
::
shape
b
{
migraphx
::
shape
::
uint8_type
,
{
2
,
4
,
3
}};
std
::
vector
<
uint8_t
>
data_b
=
{
152
,
51
,
244
,
60
,
26
,
255
,
0
,
127
,
246
,
127
,
254
,
247
,
152
,
51
,
244
,
60
,
26
,
255
,
0
,
127
,
246
,
127
,
254
,
247
};
migraphx
::
parameter_map
pp
;
pp
[
"A"
]
=
migraphx
::
argument
(
a
,
data_a
.
data
());
pp
[
"B"
]
=
migraphx
::
argument
(
b
,
data_b
.
data
());
auto
result
=
p
.
eval
(
pp
).
back
();
std
::
vector
<
uint8_t
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
uint8_t
>
gold
=
{
168
,
115
,
255
,
1
,
66
,
151
,
168
,
115
,
255
,
1
,
66
,
151
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
resize_downsample_f_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"resize_downsample_f_test.onnx"
);
...
...
@@ -1406,6 +2056,43 @@ TEST_CASE(reversesequence_time_verify_test)
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
round_half_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"round_half_test.onnx"
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
migraphx
::
shape
xs
{
migraphx
::
shape
::
half_type
,
{
4
,
4
}};
std
::
vector
<
float
>
tmp
=
{
-
3.51
,
-
3.5
,
-
3.49
,
-
2.51
,
-
2.50
,
-
2.49
,
-
1.6
,
-
1.5
,
-
0.51
,
-
0.5
,
0.5
,
0.6
,
2.4
,
2.5
,
3.5
,
4.5
};
std
::
vector
<
migraphx
::
half
>
data
{
tmp
.
cbegin
(),
tmp
.
cend
()};
migraphx
::
parameter_map
param_map
;
param_map
[
"x"
]
=
migraphx
::
argument
(
xs
,
data
.
data
());
auto
result
=
p
.
eval
(
param_map
).
back
();
std
::
vector
<
migraphx
::
half
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
tmp
=
{
-
4.0
,
-
4.0
,
-
3.0
,
-
3.0
,
-
2.0
,
-
2.0
,
-
2.0
,
-
2.0
,
-
1.0
,
0.0
,
0.0
,
1.0
,
2.0
,
2.0
,
4.0
,
4.0
};
std
::
vector
<
migraphx
::
half
>
gold
{
tmp
.
cbegin
(),
tmp
.
cend
()};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
selu_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"selu_test.onnx"
);
...
...
@@ -1426,6 +2113,112 @@ TEST_CASE(selu_test)
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
shrink_hard_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"shrink_hard_test.onnx"
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
5
}};
std
::
vector
<
float
>
data
{
-
2
,
-
1
,
0
,
1
,
2
};
migraphx
::
parameter_map
pp
;
pp
[
"x"
]
=
migraphx
::
argument
(
s
,
data
.
data
());
auto
result
=
p
.
eval
(
pp
).
back
();
std
::
vector
<
float
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
=
{
-
2
,
0
,
0
,
0
,
2
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
shrink_soft_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"shrink_soft_test.onnx"
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
5
}};
std
::
vector
<
float
>
data
{
-
2
,
-
1
,
0
,
1
,
2
};
migraphx
::
parameter_map
pp
;
pp
[
"x"
]
=
migraphx
::
argument
(
s
,
data
.
data
());
auto
result
=
p
.
eval
(
pp
).
back
();
std
::
vector
<
float
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
=
{
-
0.5
,
0
,
0
,
0
,
0.5
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
shrink_verify_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"shrink_verify_test.onnx"
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
migraphx
::
shape
s
{
migraphx
::
shape
::
half_type
,
{
5
}};
std
::
vector
<
float
>
tmp
=
{
-
10.0
,
-
5.0
,
0.0
,
5.0
,
10.0
};
std
::
vector
<
migraphx
::
half
>
data
{
tmp
.
cbegin
(),
tmp
.
cend
()};
migraphx
::
parameter_map
pp
;
pp
[
"x"
]
=
migraphx
::
argument
(
s
,
data
.
data
());
auto
result
=
p
.
eval
(
pp
).
back
();
std
::
vector
<
migraphx
::
half
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
tmp
=
{
-
9.0
,
-
4.0
,
1.0
,
4.0
,
9.0
};
std
::
vector
<
migraphx
::
half
>
gold
{
tmp
.
cbegin
(),
tmp
.
cend
()};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
shrink_verify2_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"shrink_verify2_test.onnx"
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
migraphx
::
shape
s
{
migraphx
::
shape
::
half_type
,
{
5
}};
std
::
vector
<
float
>
tmp
=
{
-
10.0
,
-
5.0
,
0.0
,
5.0
,
10.0
};
std
::
vector
<
migraphx
::
half
>
data
{
tmp
.
cbegin
(),
tmp
.
cend
()};
migraphx
::
parameter_map
pp
;
pp
[
"x"
]
=
migraphx
::
argument
(
s
,
data
.
data
());
auto
result
=
p
.
eval
(
pp
).
back
();
std
::
vector
<
migraphx
::
half
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
tmp
=
{
-
5.0
,
0.0
,
5.0
,
10.0
,
5.0
};
std
::
vector
<
migraphx
::
half
>
gold
{
tmp
.
cbegin
(),
tmp
.
cend
()};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
shrink_int8_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"shrink_int8_test.onnx"
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
migraphx
::
shape
s
{
migraphx
::
shape
::
int8_type
,
{
3
,
3
}};
std
::
vector
<
int8_t
>
data
{
-
4
,
-
3
,
-
2
,
-
1
,
0
,
1
,
2
,
3
,
4
};
migraphx
::
parameter_map
pp
;
pp
[
"x"
]
=
migraphx
::
argument
(
s
,
data
.
data
());
auto
result
=
p
.
eval
(
pp
).
back
();
std
::
vector
<
int8_t
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
int8_t
>
gold
=
{
-
2
,
-
1
,
0
,
0
,
0
,
0
,
0
,
1
,
2
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
shrink_uint8_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"shrink_uint8_test.onnx"
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
migraphx
::
shape
s
{
migraphx
::
shape
::
uint8_type
,
{
3
,
3
}};
std
::
vector
<
uint8_t
>
data
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
};
migraphx
::
parameter_map
pp
;
pp
[
"x"
]
=
migraphx
::
argument
(
s
,
data
.
data
());
auto
result
=
p
.
eval
(
pp
).
back
();
std
::
vector
<
uint8_t
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
uint8_t
>
gold
=
{
0
,
0
,
0
,
0
,
0
,
10
,
11
,
12
,
13
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
size_verify_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"size_verify_test.onnx"
);
...
...
@@ -1637,9 +2430,10 @@ std::vector<float> gen_trilu_test(const migraphx::shape& s, const migraphx::prog
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
return
result_vector
;
}
TEST_CASE
(
trilu_test
)
TEST_CASE
(
triu_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"tri
l
u_test.onnx"
);
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"triu_test.onnx"
);
std
::
vector
<
float
>
result_vector
=
gen_trilu_test
({
migraphx
::
shape
::
float_type
,
{
3
,
4
}},
p
);
...
...
@@ -1648,9 +2442,9 @@ TEST_CASE(trilu_test)
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
tri
l
u_batch_diff_k_test
)
TEST_CASE
(
triu_batch_diff_k_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"tri
l
u_batch_diff_k_test.onnx"
);
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"triu_batch_diff_k_test.onnx"
);
std
::
vector
<
float
>
result_vector
=
gen_trilu_test
({
migraphx
::
shape
::
float_type
,
{
2
,
2
,
3
}},
p
);
...
...
@@ -1659,9 +2453,42 @@ TEST_CASE(trilu_batch_diff_k_test)
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
trilu_lower_test
)
TEST_CASE
(
tril_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"tril_test.onnx"
);
std
::
vector
<
float
>
result_vector
=
gen_trilu_test
({
migraphx
::
shape
::
float_type
,
{
3
,
4
}},
p
);
std
::
vector
<
float
>
gold
=
{
1
,
0
,
0
,
0
,
5
,
6
,
0
,
0
,
9
,
10
,
11
,
0
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
tril_batch_diff_k_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"tril_batch_diff_k_test.onnx"
);
std
::
vector
<
float
>
result_vector
=
gen_trilu_test
({
migraphx
::
shape
::
float_type
,
{
2
,
2
,
3
}},
p
);
std
::
vector
<
float
>
gold
=
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
10
,
11
,
12
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
triu_neg_k_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"trilu_lower_test.onnx"
);
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"triu_neg_k_test.onnx"
);
std
::
vector
<
float
>
result_vector
=
gen_trilu_test
({
migraphx
::
shape
::
float_type
,
{
3
,
4
}},
p
);
std
::
vector
<
float
>
gold
=
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
0
,
10
,
11
,
12
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
tril_neg_k_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"tril_neg_k_test.onnx"
);
std
::
vector
<
float
>
result_vector
=
gen_trilu_test
({
migraphx
::
shape
::
float_type
,
{
3
,
4
}},
p
);
...
...
@@ -1670,9 +2497,9 @@ TEST_CASE(trilu_lower_test)
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
tri
l
u_out_k_test
)
TEST_CASE
(
triu_out_k_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"tri
l
u_out_k_test.onnx"
);
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"triu_out_k_test.onnx"
);
std
::
vector
<
float
>
result_vector
=
gen_trilu_test
({
migraphx
::
shape
::
float_type
,
{
3
,
4
}},
p
);
...
...
@@ -1681,9 +2508,20 @@ TEST_CASE(trilu_out_k_test)
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
trilu_row_one_test
)
TEST_CASE
(
tril_out_k_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"tril_out_k_test.onnx"
);
std
::
vector
<
float
>
result_vector
=
gen_trilu_test
({
migraphx
::
shape
::
float_type
,
{
3
,
4
}},
p
);
std
::
vector
<
float
>
gold
=
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
10
,
11
,
12
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
triu_row_one_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"tri
l
u_row_one_test.onnx"
);
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"triu_row_one_test.onnx"
);
std
::
vector
<
float
>
result_vector
=
gen_trilu_test
({
migraphx
::
shape
::
float_type
,
{
1
,
4
}},
p
);
...
...
@@ -1692,4 +2530,15 @@ TEST_CASE(trilu_row_one_test)
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
TEST_CASE
(
tril_row_one_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"tril_row_one_test.onnx"
);
std
::
vector
<
float
>
result_vector
=
gen_trilu_test
({
migraphx
::
shape
::
float_type
,
{
1
,
4
}},
p
);
std
::
vector
<
float
>
gold
=
{
1
,
2
,
0
,
0
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vector
,
gold
));
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/op_shape_test.cpp
View file @
4ea39116
...
...
@@ -88,7 +88,7 @@ TEST_CASE(allocate_static)
expect_shape
(
out_shape
,
migraphx
::
make_op
(
"allocate"
,
{{
"shape"
,
to_value
(
out_shape
)}}));
}
TEST_CASE
(
allocate_static_input
_error
)
TEST_CASE
(
allocate_static_input
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
int64_type
,
{
3
}};
migraphx
::
shape
out_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
}};
...
...
@@ -116,7 +116,7 @@ TEST_CASE(allocate_dyn_with_shape_attr)
input
);
}
TEST_CASE
(
allocate_dyn_no_input
_error
)
TEST_CASE
(
allocate_dyn_no_input
)
{
migraphx
::
shape
shape_attr
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
3
,
3
},
{
4
,
8
,
{
4
,
6
}},
{
4
,
8
},
{
4
,
6
}}};
...
...
@@ -124,6 +124,21 @@ TEST_CASE(allocate_dyn_no_input_error)
migraphx
::
make_op
(
"allocate"
,
{{
"shape"
,
migraphx
::
to_value
(
shape_attr
)}}));
}
TEST_CASE
(
allocate_shape_and_buf_type_error
)
{
migraphx
::
shape
shape_attr
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
3
,
3
},
{
4
,
8
,
{
4
,
6
}},
{
4
,
8
},
{
4
,
6
}}};
throws_shape
(
migraphx
::
make_op
(
"allocate"
,
{{
"shape"
,
migraphx
::
to_value
(
shape_attr
)},
{
"buf_type"
,
migraphx
::
shape
::
half_type
}}));
}
TEST_CASE
(
allocate_no_attr_error
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
int64_type
,
{
4
}};
throws_shape
(
migraphx
::
make_op
(
"allocate"
),
input
);
}
TEST_CASE
(
argmax_axis0
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
half_type
,
{
2
,
3
,
4
,
5
}};
...
...
@@ -1942,12 +1957,42 @@ TEST_CASE(multibroadcast_3in_dyn_dyn)
expect_shape
(
expected_shape
,
migraphx
::
make_op
(
"multibroadcast"
),
c_shape
,
a_shape
,
b_shape
);
}
TEST_CASE
(
multinomial
)
TEST_CASE
(
multinomial
_bool_type
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
5
}};
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
1
,
2
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
3
,
4
}};
int
dtype
=
0
;
throws_shape
(
migraphx
::
make_op
(
"multinomial"
,
{{
"dtype"
,
dtype
}}),
s
,
s
);
throws_shape
(
migraphx
::
make_op
(
"multinomial"
,
{{
"dtype"
,
dtype
}}),
s1
,
s2
);
}
TEST_CASE
(
multinomial
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
1
,
2
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
3
,
4
}};
migraphx
::
shape
s3
{
migraphx
::
shape
::
float_type
,
{
1
,
4
}};
int
dtype
=
2
;
expect_shape
(
s3
,
migraphx
::
make_op
(
"multinomial"
,
{{
"dtype"
,
dtype
}}),
s1
,
s2
);
}
TEST_CASE
(
multinomial_0size_input
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
1
,
2
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{}};
int
dtype
=
2
;
throws_shape
(
migraphx
::
make_op
(
"multinomial"
,
{{
"dtype"
,
dtype
}}),
s1
,
s2
);
}
TEST_CASE
(
multinomial_dyn
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
int32_type
,
{{
2
,
3
},
{
5
,
6
}}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
int32_type
,
{{
7
,
8
},
{
9
,
10
}}};
migraphx
::
shape
s3
{
migraphx
::
shape
::
int32_type
,
{{
2
,
3
},
{
9
,
10
}}};
expect_shape
(
s3
,
migraphx
::
make_op
(
"multinomial"
,
{{
"dtype"
,
migraphx
::
shape
::
int32_type
}}),
s1
,
s2
);
}
TEST_CASE
(
nms_shape
)
...
...
@@ -2684,7 +2729,7 @@ TEST_CASE(reshape_broadcast_squeeze_memlayout_change)
expect_shape
(
output
,
migraphx
::
make_op
(
"reshape"
,
{{
"dims"
,
output
.
lens
()}}),
input
);
}
TEST_CASE
(
reshape_dyn_
shape
)
TEST_CASE
(
reshape_dyn_
1in
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
24
,
24
},
{
1
,
1
},
{
1
,
1
}}};
for
(
auto
&&
new_shape
:
std
::
vector
<
std
::
vector
<
int64_t
>>
{
...
...
@@ -2708,6 +2753,27 @@ TEST_CASE(reshape_dyn_shape)
}
}
TEST_CASE
(
reshape_dyn_2in_0
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
24
,
24
},
{
1
,
1
},
{
1
,
1
}}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
8
,
8
},
{
3
,
3
},
{
1
,
1
}}};
expect_shape
(
output
,
migraphx
::
make_op
(
"reshape"
),
input
,
output
);
}
TEST_CASE
(
reshape_dyn_2in_1
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
24
,
24
},
{
1
,
1
},
{
1
,
1
}}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{{
12
,
12
},
{
2
,
2
},
{
1
,
1
},
{
1
,
4
}}};
expect_shape
(
output
,
migraphx
::
make_op
(
"reshape"
),
input
,
output
);
}
TEST_CASE
(
reshape_dyn_2in_2
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
2
,
24
,
1
,
1
}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{{
1
,
2
},
{
6
,
12
},
{
1
,
1
},
{
4
,
4
}}};
expect_shape
(
output
,
migraphx
::
make_op
(
"reshape"
),
input
,
output
);
}
TEST_CASE
(
reshape_multiple_non_fixed_error
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
24
,
24
},
{
10
,
20
},
{
1
,
1
}}};
...
...
@@ -3166,7 +3232,65 @@ TEST_CASE(slice_static_shape)
}
TEST_CASE
(
slice_var_inputs_static_shape0
)
{
// attr ends and axes set; inputs are (data, input_starts)
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
4
}};
migraphx
::
shape
starts
{
migraphx
::
shape
::
int64_type
,
{
2
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
3
,
3
},
{
0
,
4
},
{
0
,
4
}}},
migraphx
::
make_op
(
"slice"
,
{{
"ends"
,
{
2
,
3
}},
{
"axes"
,
{
1
,
2
}}}),
input
,
starts
);
}
TEST_CASE
(
slice_var_inputs_static_mismatch_error0
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
4
}};
migraphx
::
shape
starts
{
migraphx
::
shape
::
int64_type
,
{
2
}};
throws_shape
(
migraphx
::
make_op
(
"slice"
,
{{
"ends"
,
{
2
,
3
,
4
}},
{
"axes"
,
{
0
,
1
,
2
}}}),
input
,
starts
);
}
TEST_CASE
(
slice_var_inputs_static_shape1
)
{
// attr starts and axes set; inputs are (data, input_ends)
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
4
}};
migraphx
::
shape
ends
{
migraphx
::
shape
::
int64_type
,
{
2
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
3
,
3
},
{
0
,
4
},
{
0
,
4
}}},
migraphx
::
make_op
(
"slice"
,
{{
"starts"
,
{
0
,
1
}},
{
"axes"
,
{
1
,
2
}}}),
input
,
ends
);
}
TEST_CASE
(
slice_var_inputs_static_mismatch_error1
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
4
}};
migraphx
::
shape
ends
{
migraphx
::
shape
::
int64_type
,
{
2
}};
throws_shape
(
migraphx
::
make_op
(
"slice"
,
{{
"starts"
,
{
0
,
1
,
2
}},
{
"axes"
,
{
0
,
1
,
2
}}}),
input
,
ends
);
}
TEST_CASE
(
slice_var_inputs_static_shape2
)
{
// attr starts and ends set; inputs are (data, input_axes)
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
4
}};
migraphx
::
shape
axes
{
migraphx
::
shape
::
int64_type
,
{
2
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
0
,
3
},
{
0
,
4
},
{
0
,
4
}}},
migraphx
::
make_op
(
"slice"
,
{{
"starts"
,
{
0
,
1
}},
{
"ends"
,
{
1
,
2
}}}),
input
,
axes
);
}
TEST_CASE
(
slice_var_inputs_static_mismatch_error2
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
4
}};
migraphx
::
shape
axes
{
migraphx
::
shape
::
int64_type
,
{
2
}};
throws_shape
(
migraphx
::
make_op
(
"slice"
,
{{
"starts"
,
{
0
,
1
,
2
}},
{
"ends"
,
{
3
,
4
,
4
}}}),
input
,
axes
);
}
TEST_CASE
(
slice_var_inputs_static_shape3
)
{
// attr axes set; inputs are (data, input_starts, input_ends)
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
4
}};
migraphx
::
shape
starts
{
migraphx
::
shape
::
int64_type
,
{
2
}};
migraphx
::
shape
ends
{
migraphx
::
shape
::
int64_type
,
{
2
}};
...
...
@@ -3177,7 +3301,57 @@ TEST_CASE(slice_var_inputs_static_shape0)
ends
);
}
TEST_CASE
(
slice_var_inputs_static_shape1
)
TEST_CASE
(
slice_var_inputs_static_mismatch_error3
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
4
}};
migraphx
::
shape
starts
{
migraphx
::
shape
::
int64_type
,
{
2
}};
migraphx
::
shape
ends
{
migraphx
::
shape
::
int64_type
,
{
2
}};
throws_shape
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
0
,
1
,
2
}}}),
input
,
starts
,
ends
);
}
TEST_CASE
(
slice_var_inputs_static_shape4
)
{
// attr ends set; inputs are (data, input_starts, input_axes)
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
4
}};
migraphx
::
shape
starts
{
migraphx
::
shape
::
int64_type
,
{
2
}};
migraphx
::
shape
axes
{
migraphx
::
shape
::
int64_type
,
{
2
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
0
,
3
},
{
0
,
4
},
{
0
,
4
}}},
migraphx
::
make_op
(
"slice"
,
{{
"ends"
,
{
3
,
4
}}}),
input
,
starts
,
axes
);
}
TEST_CASE
(
slice_var_inputs_static_mismatch_error4
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
4
}};
migraphx
::
shape
starts
{
migraphx
::
shape
::
int64_type
,
{
2
}};
migraphx
::
shape
axes
{
migraphx
::
shape
::
int64_type
,
{
2
}};
throws_shape
(
migraphx
::
make_op
(
"slice"
,
{{
"ends"
,
{
3
,
3
,
3
}}}),
input
,
starts
,
axes
);
}
TEST_CASE
(
slice_var_inputs_static_shape5
)
{
// attr starts set; inputs are (data, input_ends, input_axes)
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
4
}};
migraphx
::
shape
ends
{
migraphx
::
shape
::
int64_type
,
{
2
}};
migraphx
::
shape
axes
{
migraphx
::
shape
::
int64_type
,
{
2
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
0
,
3
},
{
0
,
4
},
{
0
,
4
}}},
migraphx
::
make_op
(
"slice"
,
{{
"starts"
,
{
0
,
2
}}}),
input
,
ends
,
axes
);
}
TEST_CASE
(
slice_var_inputs_static_mismatch_error5
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
4
}};
migraphx
::
shape
ends
{
migraphx
::
shape
::
int64_type
,
{
2
}};
migraphx
::
shape
axes
{
migraphx
::
shape
::
int64_type
,
{
2
}};
throws_shape
(
migraphx
::
make_op
(
"slice"
,
{{
"starts"
,
{
0
,
1
,
2
}}}),
input
,
ends
,
axes
);
}
TEST_CASE
(
slice_var_inputs_static_shape6
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
4
}};
migraphx
::
shape
starts
{
migraphx
::
shape
::
int64_type
,
{
2
}};
...
...
@@ -3191,7 +3365,7 @@ TEST_CASE(slice_var_inputs_static_shape1)
axes
);
}
TEST_CASE
(
slice_var_inputs_static_error
0
)
TEST_CASE
(
slice_var_inputs_static_
mismatch_
error
6
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
4
}};
migraphx
::
shape
starts
{
migraphx
::
shape
::
int64_type
,
{
2
}};
...
...
@@ -3202,17 +3376,125 @@ TEST_CASE(slice_var_inputs_static_error0)
TEST_CASE
(
slice_var_inputs_dyn_shape0
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
3
,
6
},
{
2
,
4
,
{
2
,
4
}},
{
2
,
4
,
{
2
,
4
}}}};
// attr ends and axes set; inputs are (data, input_starts)
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
3
,
6
},
{
4
,
6
},
{
4
,
6
}}};
migraphx
::
shape
starts
{
migraphx
::
shape
::
int64_type
,
{
2
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
3
,
6
},
{
0
,
6
},
{
0
,
6
}}},
migraphx
::
make_op
(
"slice"
,
{{
"ends"
,
{
2
,
3
}},
{
"axes"
,
{
1
,
2
}}}),
input
,
starts
);
}
TEST_CASE
(
slice_var_inputs_dyn_mismatch_error0
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
3
,
6
},
{
4
,
6
},
{
4
,
6
}}};
migraphx
::
shape
starts
{
migraphx
::
shape
::
int64_type
,
{
2
}};
throws_shape
(
migraphx
::
make_op
(
"slice"
,
{{
"ends"
,
{
2
,
3
,
4
}},
{
"axes"
,
{
0
,
1
,
2
}}}),
input
,
starts
);
}
TEST_CASE
(
slice_var_inputs_dyn_shape1
)
{
// attr starts and axes set; inputs are (data, input_ends)
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
3
,
6
},
{
4
,
6
},
{
4
,
6
}}};
migraphx
::
shape
ends
{
migraphx
::
shape
::
int64_type
,
{
2
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
3
,
6
},
{
0
,
6
},
{
0
,
6
}}},
migraphx
::
make_op
(
"slice"
,
{{
"starts"
,
{
0
,
1
}},
{
"axes"
,
{
1
,
2
}}}),
input
,
ends
);
}
TEST_CASE
(
slice_var_inputs_dyn_mismatch_error1
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
3
,
6
},
{
4
,
6
},
{
4
,
6
}}};
migraphx
::
shape
ends
{
migraphx
::
shape
::
int64_type
,
{
2
}};
throws_shape
(
migraphx
::
make_op
(
"slice"
,
{{
"starts"
,
{
0
,
1
,
2
}},
{
"axes"
,
{
0
,
1
,
2
}}}),
input
,
ends
);
}
TEST_CASE
(
slice_var_inputs_dyn_shape2
)
{
// attr starts and ends set; inputs are (data, input_axes)
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
3
,
6
},
{
4
,
6
},
{
4
,
6
}}};
migraphx
::
shape
axes
{
migraphx
::
shape
::
int64_type
,
{
2
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
0
,
6
},
{
0
,
6
},
{
0
,
6
}}},
migraphx
::
make_op
(
"slice"
,
{{
"starts"
,
{
0
,
1
}},
{
"ends"
,
{
8
,
8
}}}),
input
,
axes
);
}
TEST_CASE
(
slice_var_inputs_dyn_mismatch_error2
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
3
,
6
},
{
4
,
6
},
{
4
,
6
}}};
migraphx
::
shape
axes
{
migraphx
::
shape
::
int64_type
,
{
2
}};
throws_shape
(
migraphx
::
make_op
(
"slice"
,
{{
"starts"
,
{
0
,
1
,
2
}},
{
"ends"
,
{
3
,
4
,
4
}}}),
input
,
axes
);
}
TEST_CASE
(
slice_var_inputs_dyn_shape3
)
{
// attr axes set; inputs are (data, input_starts, input_ends)
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
3
,
6
},
{
4
,
6
},
{
4
,
6
}}};
migraphx
::
shape
starts
{
migraphx
::
shape
::
int64_type
,
{
2
}};
migraphx
::
shape
ends
{
migraphx
::
shape
::
int64_type
,
{
2
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
3
,
6
},
{
0
,
4
},
{
0
,
4
}}},
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
3
,
6
},
{
0
,
6
},
{
0
,
6
}}},
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
1
,
2
}}}),
input
,
starts
,
ends
);
}
TEST_CASE
(
slice_var_inputs_dyn_shape1
)
TEST_CASE
(
slice_var_inputs_dyn_mismatch_error3
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
3
,
6
},
{
4
,
6
},
{
4
,
6
}}};
migraphx
::
shape
starts
{
migraphx
::
shape
::
int64_type
,
{
2
}};
migraphx
::
shape
ends
{
migraphx
::
shape
::
int64_type
,
{
2
}};
throws_shape
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
0
,
1
,
2
}}}),
input
,
starts
,
ends
);
}
TEST_CASE
(
slice_var_inputs_dyn_shape4
)
{
// attr ends set; inputs are (data, input_starts, input_axes)
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
3
,
6
},
{
4
,
6
},
{
4
,
6
}}};
migraphx
::
shape
starts
{
migraphx
::
shape
::
int64_type
,
{
2
}};
migraphx
::
shape
axes
{
migraphx
::
shape
::
int64_type
,
{
2
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
0
,
6
},
{
0
,
6
},
{
0
,
6
}}},
migraphx
::
make_op
(
"slice"
,
{{
"ends"
,
{
3
,
4
}}}),
input
,
starts
,
axes
);
}
TEST_CASE
(
slice_var_inputs_dyn_mismatch_error4
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
3
,
6
},
{
4
,
6
},
{
4
,
6
}}};
migraphx
::
shape
starts
{
migraphx
::
shape
::
int64_type
,
{
2
}};
migraphx
::
shape
axes
{
migraphx
::
shape
::
int64_type
,
{
2
}};
throws_shape
(
migraphx
::
make_op
(
"slice"
,
{{
"ends"
,
{
3
,
3
,
3
}}}),
input
,
starts
,
axes
);
}
TEST_CASE
(
slice_var_inputs_dyn_shape5
)
{
// attr starts set; inputs are (data, input_ends, input_axes)
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
3
,
6
},
{
4
,
6
},
{
4
,
6
}}};
migraphx
::
shape
ends
{
migraphx
::
shape
::
int64_type
,
{
2
}};
migraphx
::
shape
axes
{
migraphx
::
shape
::
int64_type
,
{
2
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
0
,
6
},
{
0
,
6
},
{
0
,
6
}}},
migraphx
::
make_op
(
"slice"
,
{{
"starts"
,
{
0
,
2
}}}),
input
,
ends
,
axes
);
}
TEST_CASE
(
slice_var_inputs_dyn_mismatch_error5
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
3
,
6
},
{
4
,
6
},
{
4
,
6
}}};
migraphx
::
shape
ends
{
migraphx
::
shape
::
int64_type
,
{
2
}};
migraphx
::
shape
axes
{
migraphx
::
shape
::
int64_type
,
{
2
}};
throws_shape
(
migraphx
::
make_op
(
"slice"
,
{{
"starts"
,
{
0
,
1
,
2
}}}),
input
,
ends
,
axes
);
}
TEST_CASE
(
slice_var_inputs_dyn_shape6
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
3
,
6
},
{
2
,
4
,
{
2
,
4
}},
{
2
,
4
,
{
2
,
4
}}}};
migraphx
::
shape
starts
{
migraphx
::
shape
::
int64_type
,
{
2
}};
...
...
@@ -3226,6 +3508,15 @@ TEST_CASE(slice_var_inputs_dyn_shape1)
axes
);
}
TEST_CASE
(
slice_var_inputs_dyn_mismatch_error6
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
3
,
6
},
{
4
,
6
},
{
4
,
6
}}};
migraphx
::
shape
starts
{
migraphx
::
shape
::
int64_type
,
{
2
}};
migraphx
::
shape
ends
{
migraphx
::
shape
::
int64_type
,
{
2
}};
migraphx
::
shape
axes
{
migraphx
::
shape
::
int64_type
,
{
3
}};
throws_shape
(
migraphx
::
make_op
(
"slice"
),
input
,
starts
,
ends
,
axes
);
}
TEST_CASE
(
slice_dyn_shape0
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
int32_type
,
{{
2
,
3
},
{
7
,
7
},
{
2
,
3
}}};
...
...
test/py/CMakeLists.txt
View file @
4ea39116
#####################################################################################
# The MIT License (MIT)
#
# Copyright (c) 2015-202
2
Advanced Micro Devices, Inc. All rights reserved.
# Copyright (c) 2015-202
3
Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
...
...
@@ -23,8 +23,15 @@
#####################################################################################
include
(
PythonModules
)
set
(
VENV
${
CMAKE_BINARY_DIR
}
/test/py/venv
)
set
(
VENV_ONNX
${
CMAKE_BINARY_DIR
}
/test/py/venv-onnx
)
set
(
REQUIREMENTS
${
CMAKE_CURRENT_SOURCE_DIR
}
/requirements.txt
)
set
(
REQUIREMENTS_ONNX
${
CMAKE_CURRENT_SOURCE_DIR
}
/requirements-onnx.txt
)
set
(
PYTHON_VERSION_TO_DISABLE_ONNX 3.6
)
option
(
MIGRAPHX_DISABLE_VIRTUAL_ENV
"Disable python virtual environments"
OFF
)
function
(
add_py_test NAME SCRIPT
)
function
(
add_py_venv_fixture FIXTURE_NAME VIRTUAL_ENV_DIR REQUIREMENTS_FILE
)
foreach
(
PYTHON_VERSION
${
PYTHON_VERSIONS
}
)
set
(
ENV_COMMAND
${
CMAKE_COMMAND
}
-E env
"PYTHONPATH=$<TARGET_FILE_DIR:migraphx_pybind_
${
PYTHON_VERSION
}
>"
...
...
@@ -32,28 +39,73 @@ function(add_py_test NAME SCRIPT)
"MALLOC_CHECK_=3"
)
set
(
PYTHON_EXECUTABLE
${
PYTHON_
${
PYTHON_VERSION
}
_EXECUTABLE
}
)
if
(
NOT TEST py_
${
PYTHON_VERSION
}
_
${
FIXTURE_NAME
}
_initialize_env
)
if
(
NOT
(
${
FIXTURE_NAME
}
STREQUAL
"onnx"
AND
${
PYTHON_VERSION
}
STREQUAL
${
PYTHON_VERSION_TO_DISABLE_ONNX
}
))
add_test
(
NAME py_
${
PYTHON_VERSION
}
_
${
FIXTURE_NAME
}
_initialize_env COMMAND
${
PYTHON_EXECUTABLE
}
-m venv
${
VIRTUAL_ENV_DIR
}
/
${
PYTHON_VERSION
}
--clear
)
set_tests_properties
(
py_
${
PYTHON_VERSION
}
_
${
FIXTURE_NAME
}
_initialize_env PROPERTIES FIXTURES_SETUP
${
FIXTURE_NAME
}
_
${
PYTHON_VERSION
}
_INIT_VENV
)
set
(
PYTHON_EXECUTABLE
${
VIRTUAL_ENV_DIR
}
/
${
PYTHON_VERSION
}
/bin/python
)
if
(
EXISTS
${
REQUIREMENTS_FILE
}
)
add_test
(
NAME py_
${
PYTHON_VERSION
}
_
${
FIXTURE_NAME
}
_setup_env
COMMAND
${
PYTHON_EXECUTABLE
}
-m pip install -r
${
REQUIREMENTS_FILE
}
)
else
()
# If there is no requirements file, then there are no packages to install in the virtual env.
# Just create a placeholder test for setting up the required fixture for running the tests.
add_test
(
NAME py_
${
PYTHON_VERSION
}
_
${
FIXTURE_NAME
}
_setup_env
COMMAND
${
PYTHON_EXECUTABLE
}
-m pip install --help
)
endif
()
set_tests_properties
(
py_
${
PYTHON_VERSION
}
_
${
FIXTURE_NAME
}
_setup_env PROPERTIES FIXTURES_REQUIRED
${
FIXTURE_NAME
}
_
${
PYTHON_VERSION
}
_INIT_VENV
)
set_tests_properties
(
py_
${
PYTHON_VERSION
}
_
${
FIXTURE_NAME
}
_setup_env PROPERTIES FIXTURES_SETUP
${
FIXTURE_NAME
}
_
${
PYTHON_VERSION
}
_VENV
)
endif
()
endif
()
endforeach
()
endfunction
()
function
(
add_py_test NAME SCRIPT FIXTURE_NAME VENV_DIR
)
foreach
(
PYTHON_VERSION
${
PYTHON_VERSIONS
}
)
set
(
ENV_COMMAND
${
CMAKE_COMMAND
}
-E env
"PYTHONPATH=$<TARGET_FILE_DIR:migraphx_pybind_
${
PYTHON_VERSION
}
>"
"PYTHONMALLOC=debug"
"MALLOC_CHECK_=3"
)
if
(
MIGRAPHX_DISABLE_VIRTUAL_ENV
)
set
(
PYTHON_EXECUTABLE
${
PYTHON_
${
PYTHON_VERSION
}
_EXECUTABLE
}
)
else
()
set
(
PYTHON_EXECUTABLE
${
VENV_DIR
}
/
${
PYTHON_VERSION
}
/bin/python
)
endif
()
if
(
NOT
(
${
FIXTURE_NAME
}
STREQUAL
"onnx"
AND
${
PYTHON_VERSION
}
STREQUAL
${
PYTHON_VERSION_TO_DISABLE_ONNX
}
))
add_test
(
NAME test_py_
${
PYTHON_VERSION
}
_
${
NAME
}
COMMAND
${
ENV_COMMAND
}
${
PYTHON_EXECUTABLE
}
${
CMAKE_CURRENT_SOURCE_DIR
}
/
${
SCRIPT
}
${
ARGN
}
)
add_custom_target
(
test_py_
${
PYTHON_VERSION
}
_
${
NAME
}
COMMAND
${
ENV_COMMAND
}
${
PYTHON_EXECUTABLE
}
${
CMAKE_CURRENT_SOURCE_DIR
}
/
${
SCRIPT
}
${
ARGN
}
COMMENT
"
${
PYTHON_EXECUTABLE
}
${
SCRIPT
}
"
)
if
(
NOT MIGRAPHX_DISABLE_VIRTUAL_ENV
)
set_tests_properties
(
test_py_
${
PYTHON_VERSION
}
_
${
NAME
}
PROPERTIES FIXTURES_REQUIRED
${
FIXTURE_NAME
}
_
${
PYTHON_VERSION
}
_VENV
)
endif
()
endif
()
endforeach
()
endfunction
()
add_dependencies
(
tests migraphx_py
)
add_dependencies
(
check migraphx_py
)
add_py_test
(
ref test_cpu.py WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
add_py_test
(
save_load test_save_load.py WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
add_py_test
(
op test_op.py WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
add_py_test
(
shape test_shape.py WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
add_py_test
(
module_construct test_module_construct.py WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
add_py_test
(
literal test_literal.py WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
if
(
NOT MIGRAPHX_DISABLE_VIRTUAL_ENV
)
add_py_venv_fixture
(
common
${
VENV
}
${
REQUIREMENTS
}
)
add_py_venv_fixture
(
onnx
${
VENV_ONNX
}
${
REQUIREMENTS_ONNX
}
)
endif
()
add_py_test
(
ref test_cpu.py common
${
VENV
}
WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
add_py_test
(
save_load test_save_load.py common
${
VENV
}
WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
add_py_test
(
op test_op.py common
${
VENV
}
WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
add_py_test
(
shape test_shape.py common
${
VENV
}
WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
add_py_test
(
module_construct test_module_construct.py common
${
VENV
}
WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
add_py_test
(
literal test_literal.py common
${
VENV
}
WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
if
(
MIGRAPHX_ENABLE_GPU
)
add_py_test
(
gpu_offload test_gpu_offload.py WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
add_py_test
(
gpu test_gpu.py WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
add_py_test
(
array test_array.py WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
add_py_test
(
backend onnx_backend_test.py WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
add_py_test
(
gpu_async test_gpu_async.py WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
add_py_test
(
gpu_offload test_gpu_offload.py
common
${
VENV
}
WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
add_py_test
(
gpu test_gpu.py
common
${
VENV
}
WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
add_py_test
(
array test_array.py
common
${
VENV
}
WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
add_py_test
(
backend onnx_backend_test.py
onnx
${
VENV_ONNX
}
WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
add_py_test
(
gpu_async test_gpu_async.py
common
${
VENV
}
WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
endif
()
test/py/onnx_backend_test.py
View file @
4ea39116
...
...
@@ -66,16 +66,6 @@ class MIGraphXBackendTest(onnx.backend.test.BackendTest):
def
disabled_tests_onnx_1_7_0
(
backend_test
):
# fails
# from OnnxBackendNodeModelTest
backend_test
.
exclude
(
r
'test_argmax_keepdims_example_select_last_index_cpu'
)
backend_test
.
exclude
(
r
'test_argmax_negative_axis_keepdims_example_select_last_index_cpu'
)
backend_test
.
exclude
(
r
'test_argmax_no_keepdims_example_select_last_index_cpu'
)
backend_test
.
exclude
(
r
'test_argmin_keepdims_example_select_last_index_cpu'
)
backend_test
.
exclude
(
r
'test_argmin_negative_axis_keepdims_example_select_last_index_cpu'
)
backend_test
.
exclude
(
r
'test_argmin_no_keepdims_example_select_last_index_cpu'
)
backend_test
.
exclude
(
r
'test_logsoftmax_axis_0_cpu'
)
backend_test
.
exclude
(
r
'test_logsoftmax_axis_1_cpu'
)
backend_test
.
exclude
(
r
'test_logsoftmax_default_axis_cpu'
)
...
...
@@ -93,7 +83,6 @@ def disabled_tests_onnx_1_7_0(backend_test):
backend_test
.
exclude
(
r
'test_nonmaxsuppression_two_batches_cpu'
)
backend_test
.
exclude
(
r
'test_nonmaxsuppression_two_classes_cpu'
)
backend_test
.
exclude
(
r
'test_nonzero_example_cpu'
)
backend_test
.
exclude
(
r
'test_round_cpu'
)
backend_test
.
exclude
(
r
'test_softmax_axis_0_cpu'
)
backend_test
.
exclude
(
r
'test_softmax_axis_1_cpu'
)
backend_test
.
exclude
(
r
'test_softmax_default_axis_cpu'
)
...
...
@@ -145,16 +134,12 @@ def disabled_tests_onnx_1_7_0(backend_test):
backend_test
.
exclude
(
r
'test_hardmax_example_cpu'
)
backend_test
.
exclude
(
r
'test_hardmax_negative_axis_cpu'
)
backend_test
.
exclude
(
r
'test_hardmax_one_hot_cpu'
)
backend_test
.
exclude
(
r
'test_isinf_cpu'
)
backend_test
.
exclude
(
r
'test_isinf_negative_cpu'
)
backend_test
.
exclude
(
r
'test_isinf_positive_cpu'
)
backend_test
.
exclude
(
r
'test_matmulinteger_cpu'
)
backend_test
.
exclude
(
r
'test_maxpool_2d_uint8_cpu'
)
backend_test
.
exclude
(
r
'test_maxunpool_export_with_output_shape_cpu'
)
backend_test
.
exclude
(
r
'test_maxunpool_export_without_output_shape_cpu'
)
backend_test
.
exclude
(
r
'test_mod_mixed_sign_int32_cpu'
)
backend_test
.
exclude
(
r
'test_mod_mixed_sign_int8_cpu'
)
backend_test
.
exclude
(
r
'test_mvn_cpu'
)
backend_test
.
exclude
(
r
'test_negative_log_likelihood_loss_iinput_shape_is_NCd1_weight_ignore_index_cpu'
)
...
...
@@ -249,8 +234,6 @@ def disabled_tests_onnx_1_7_0(backend_test):
backend_test
.
exclude
(
r
'test_reversesequence_time_cpu'
)
backend_test
.
exclude
(
r
'test_scan9_sum_cpu'
)
backend_test
.
exclude
(
r
'test_scan_sum_cpu'
)
backend_test
.
exclude
(
r
'test_shrink_hard_cpu'
)
backend_test
.
exclude
(
r
'test_shrink_soft_cpu'
)
backend_test
.
exclude
(
r
'test_slice_cpu'
)
backend_test
.
exclude
(
r
'test_slice_default_axes_cpu'
)
backend_test
.
exclude
(
r
'test_slice_default_steps_cpu'
)
...
...
@@ -463,7 +446,6 @@ def disabled_tests_onnx_1_7_0(backend_test):
backend_test
.
exclude
(
r
'test_sequence_model6_cpu'
)
backend_test
.
exclude
(
r
'test_sequence_model7_cpu'
)
backend_test
.
exclude
(
r
'test_sequence_model8_cpu'
)
backend_test
.
exclude
(
r
'test_shrink_cpu'
)
backend_test
.
exclude
(
r
'test_strnorm_model_monday_casesensintive_lower_cpu'
)
backend_test
.
exclude
(
r
'test_strnorm_model_monday_casesensintive_nochangecase_cpu'
)
...
...
@@ -594,9 +576,6 @@ def disabled_tests_onnx_1_9_0(backend_test):
backend_test
.
exclude
(
r
'test_gru_batchwise_cpu'
)
backend_test
.
exclude
(
r
'test_lstm_batchwise_cpu'
)
backend_test
.
exclude
(
r
'test_simple_rnn_batchwise_cpu'
)
backend_test
.
exclude
(
r
'test_tril_cpu'
)
backend_test
.
exclude
(
r
'test_tril_one_row_neg_cpu'
)
backend_test
.
exclude
(
r
'test_tril_square_cpu'
)
# from OnnxBackendPyTorchConvertedModelTest
backend_test
.
exclude
(
r
'test_MaxPool1d_stride_padding_dilation_cpu'
)
backend_test
.
exclude
(
r
'test_MaxPool2d_stride_padding_dilation_cpu'
)
...
...
@@ -806,7 +785,6 @@ def disabled_tests_onnx_1_13_0(backend_test):
backend_test
.
exclude
(
r
'test_group_normalization_example_cpu'
)
backend_test
.
exclude
(
r
'test_group_normalization_example_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_mish_cpu'
)
backend_test
.
exclude
(
r
'test_mvn_expanded_ver18_cpu'
)
backend_test
.
exclude
(
r
'test_optional_get_element_optional_sequence_cpu'
)
backend_test
.
exclude
(
r
'test_optional_get_element_optional_tensor_cpu'
)
backend_test
.
exclude
(
r
'test_optional_get_element_tensor_cpu'
)
...
...
@@ -853,10 +831,6 @@ def disabled_tests_onnx_1_13_0(backend_test):
backend_test
.
exclude
(
r
'test_scatter_elements_with_reduction_max_cpu'
)
backend_test
.
exclude
(
r
'test_scatter_elements_with_reduction_min_cpu'
)
# The following tests fail due to the CastLike operator being unsupported
backend_test
.
exclude
(
r
'test_split_1d_uneven_split_opset18_cpu'
)
backend_test
.
exclude
(
r
'test_split_2d_uneven_split_opset18_cpu'
)
def
disabled_tests_onnx_1_14_0
(
backend_test
):
# fails
...
...
t
ools/generate.sh
→
t
est/py/requirements-onnx.txt
100755 → 100644
View file @
4ea39116
#####################################################################################
# The MIT License (MIT)
#
# Copyright (c) 2015-202
2
Advanced Micro Devices, Inc. All rights reserved.
# Copyright (c) 2015-202
3
Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
...
...
@@ -21,23 +21,9 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#####################################################################################
DIR
=
"
$(
cd
"
$(
dirname
"
${
BASH_SOURCE
[0]
}
"
)
"
&&
pwd
)
"
CLANG_FORMAT
=
/opt/rocm/llvm/bin/clang-format
SRC_DIR
=
$DIR
/../src
PYTHON
=
python3
if
type
-p
python3.6
>
/dev/null
;
then
PYTHON
=
python3.6
fi
if
type
-p
python3.8
>
/dev/null
;
then
PYTHON
=
python3.8
fi
ls
-1
$DIR
/include/ | xargs
-n
1
-P
$(
nproc
)
-I
{}
-t
bash
-c
"
$PYTHON
$DIR
/te.py
$DIR
/include/{} |
$CLANG_FORMAT
-style=file >
$SRC_DIR
/include/migraphx/{}"
function
api
{
$PYTHON
$DIR
/api.py
$SRC_DIR
/api/migraphx.py
$1
|
$CLANG_FORMAT
-style
=
file
>
$2
}
api
$DIR
/api/migraphx.h
$SRC_DIR
/api/include/migraphx/migraphx.h
echo
"Finished generating header migraphx.h"
api
$DIR
/api/api.cpp
$SRC_DIR
/api/api.cpp
echo
"Finished generating source api.cpp "
onnx==1.14.1
protobuf==3.20.2
numpy==1.21.6
packaging==23.0
pytest==6.0.1
test/py/test_gpu.py
View file @
4ea39116
#####################################################################################
# The MIT License (MIT)
#
# Copyright (c) 2015-202
2
Advanced Micro Devices, Inc. All rights reserved.
# Copyright (c) 2015-202
3
Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
...
...
@@ -21,12 +21,7 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#####################################################################################
import
sys
import
migraphx
try
:
import
numpy
as
np
except
:
sys
.
exit
()
def
test_conv_relu
():
...
...
@@ -55,8 +50,12 @@ def test_sub_uint64():
params
=
{}
shapes
=
p
.
get_parameter_shapes
()
params
[
"0"
]
=
np
.
arange
(
120
).
reshape
(
shapes
[
"0"
].
lens
()).
astype
(
np
.
uint64
)
params
[
"1"
]
=
np
.
arange
(
20
).
reshape
(
shapes
[
"1"
].
lens
()).
astype
(
np
.
uint64
)
params
[
"0"
]
=
migraphx
.
create_argument
(
migraphx
.
shape
(
type
=
'uint64_type'
,
lens
=
shapes
[
"0"
].
lens
()),
list
(
range
(
120
)))
params
[
"1"
]
=
migraphx
.
create_argument
(
migraphx
.
shape
(
type
=
'uint64_type'
,
lens
=
shapes
[
"1"
].
lens
()),
list
(
range
(
20
)))
r
=
p
.
run
(
params
)
print
(
r
)
...
...
@@ -71,7 +70,9 @@ def test_neg_int64():
params
=
{}
shapes
=
p
.
get_parameter_shapes
()
params
[
"0"
]
=
np
.
arange
(
6
).
reshape
(
shapes
[
"0"
].
lens
()).
astype
(
np
.
int64
)
params
[
"0"
]
=
migraphx
.
create_argument
(
migraphx
.
shape
(
type
=
'int64_type'
,
lens
=
shapes
[
"0"
].
lens
()),
list
(
range
(
6
)))
r
=
p
.
run
(
params
)
print
(
r
)
...
...
@@ -86,8 +87,9 @@ def test_nonzero():
params
=
{}
shapes
=
p
.
get_parameter_shapes
()
params
[
"data"
]
=
np
.
array
([
1
,
1
,
0
,
1
]).
reshape
(
shapes
[
"data"
].
lens
()).
astype
(
bool
)
params
[
"data"
]
=
migraphx
.
create_argument
(
migraphx
.
shape
(
type
=
'bool_type'
,
lens
=
shapes
[
"data"
].
lens
()),
[
1
,
1
,
0
,
1
])
r
=
p
.
run
(
params
)
print
(
r
)
...
...
@@ -105,8 +107,8 @@ def test_fp16_imagescaler():
params
=
{}
shapes
=
p
.
get_parameter_shapes
()
params
[
"0"
]
=
np
.
random
.
randn
(
768
).
reshape
(
shapes
[
"0"
].
lens
()).
astype
(
np
.
float16
)
params
[
"0"
]
=
migraphx
.
generate_argument
(
migraphx
.
shape
(
type
=
'half_type'
,
lens
=
shapes
[
"0"
].
lens
()),
768
)
r
=
p
.
run
(
params
)[
-
1
]
print
(
r
)
...
...
@@ -124,10 +126,12 @@ def test_if_pl():
params
=
{}
shapes
=
p
.
get_parameter_shapes
()
params
[
"x"
]
=
np
.
ones
(
6
).
reshape
(
shapes
[
"x"
].
lens
()).
astype
(
np
.
float32
)
params
[
"y"
]
=
np
.
array
([
2.0
,
2.0
,
2.0
,
2.0
,
2.0
,
2.0
,
2.0
,
2.0
,
2.0
]).
reshape
(
shapes
[
"y"
].
lens
()).
astype
(
np
.
float32
)
params
[
"cond"
]
=
np
.
array
([
1
]).
reshape
(()).
astype
(
bool
)
params
[
"x"
]
=
migraphx
.
fill_argument
(
migraphx
.
shape
(
type
=
'float_type'
,
lens
=
shapes
[
"x"
].
lens
()),
1
)
params
[
"y"
]
=
migraphx
.
fill_argument
(
migraphx
.
shape
(
type
=
'float_type'
,
lens
=
shapes
[
"y"
].
lens
()),
2.0
)
params
[
"cond"
]
=
migraphx
.
fill_argument
(
migraphx
.
shape
(
type
=
"bool"
,
lens
=
[
1
],
strides
=
[
0
]),
1
)
r
=
p
.
run
(
params
)[
-
1
]
print
(
r
)
...
...
test/py/test_numpy.py
View file @
4ea39116
#####################################################################################
# The MIT License (MIT)
#
# Copyright (c) 2015-202
2
Advanced Micro Devices, Inc. All rights reserved.
# Copyright (c) 2015-202
3
Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
...
...
@@ -21,11 +21,8 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#####################################################################################
import
migraphx
,
sys
try
:
import
numpy
as
np
except
:
sys
.
exit
()
import
migraphx
import
numpy
as
np
def
test_add_op
():
...
...
test/ref/allocate.cpp
View file @
4ea39116
...
...
@@ -30,7 +30,7 @@
#include <test.hpp>
TEST_CASE
(
allocate_dyn
)
TEST_CASE
(
allocate_dyn
0
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
...
...
@@ -47,3 +47,21 @@ TEST_CASE(allocate_dyn)
migraphx
::
shape
sresult
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
,
4
}};
result
.
visit
([
&
](
auto
output
)
{
EXPECT
(
output
.
get_shape
()
==
sresult
);
});
}
TEST_CASE
(
allocate_dyn1
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
s
{
migraphx
::
shape
::
int64_type
,
{
4
}};
migraphx
::
shape
out_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
,
4
}};
auto
out_dims
=
mm
->
add_parameter
(
"out_dims"
,
s
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"allocate"
,
{{
"shape"
,
migraphx
::
to_value
(
out_shape
)}}),
out_dims
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
migraphx
::
parameter_map
params
;
std
::
vector
<
int64_t
>
data
=
{
2
,
3
,
4
,
4
};
params
[
"out_dims"
]
=
migraphx
::
argument
(
s
,
data
.
data
());
auto
result
=
p
.
eval
(
params
).
back
();
result
.
visit
([
&
](
auto
output
)
{
EXPECT
(
output
.
get_shape
()
==
out_shape
);
});
}
test/ref/argmax.cpp
View file @
4ea39116
...
...
@@ -147,3 +147,37 @@ TEST_CASE(argmax_test_nonstd_shape)
res_gold
.
visit
([
&
](
auto
output
)
{
res_gold_vec
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vec
,
res_gold_vec
));
}
TEST_CASE
(
argmax_test_select_last_index_0
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
std
::
vector
<
float
>
data
=
{
2.0305
,
-
1.853
,
2.0305
,
-
1.5706
,
0.7545
,
0.7545
};
std
::
vector
<
int64_t
>
res_gold
=
{
2
,
2
};
migraphx
::
shape
data_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
auto
dl
=
mm
->
add_literal
(
migraphx
::
literal
{
data_shape
,
data
});
mm
->
add_instruction
(
migraphx
::
make_op
(
"argmax"
,
{{
"axis"
,
1
},
{
"select_last_index"
,
true
}}),
dl
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
auto
result
=
p
.
eval
({}).
back
();
std
::
vector
<
int64_t
>
result_vec
;
result
.
visit
([
&
](
auto
output
)
{
result_vec
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vec
,
res_gold
));
}
TEST_CASE
(
argmax_test_select_last_index_1
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
std
::
vector
<
float
>
data
=
{
2.0305
,
-
1.853
,
2.0305
,
-
1.5706
,
0.7545
,
0.7545
};
std
::
vector
<
int64_t
>
res_gold
=
{
0
,
1
};
migraphx
::
shape
data_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
auto
dl
=
mm
->
add_literal
(
migraphx
::
literal
{
data_shape
,
data
});
mm
->
add_instruction
(
migraphx
::
make_op
(
"argmax"
,
{{
"axis"
,
1
},
{
"select_last_index"
,
false
}}),
dl
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
auto
result
=
p
.
eval
({}).
back
();
std
::
vector
<
int64_t
>
result_vec
;
result
.
visit
([
&
](
auto
output
)
{
result_vec
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vec
,
res_gold
));
}
test/ref/argmin.cpp
View file @
4ea39116
...
...
@@ -125,3 +125,37 @@ TEST_CASE(argmin_test_nonstd_shape)
res_gold
.
visit
([
&
](
auto
output
)
{
res_gold_vec
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vec
,
res_gold_vec
));
}
TEST_CASE
(
argmin_test_select_last_index_0
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
std
::
vector
<
float
>
data
=
{
-
2.0305
,
0.853
,
-
2.0305
,
1.5706
,
0.7545
,
0.7545
};
std
::
vector
<
int64_t
>
res_gold
=
{
2
,
2
};
migraphx
::
shape
data_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
auto
dl
=
mm
->
add_literal
(
migraphx
::
literal
{
data_shape
,
data
});
mm
->
add_instruction
(
migraphx
::
make_op
(
"argmin"
,
{{
"axis"
,
1
},
{
"select_last_index"
,
true
}}),
dl
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
auto
result
=
p
.
eval
({}).
back
();
std
::
vector
<
int64_t
>
result_vec
;
result
.
visit
([
&
](
auto
output
)
{
result_vec
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vec
,
res_gold
));
}
TEST_CASE
(
argmin_test_select_last_index_1
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
std
::
vector
<
float
>
data
=
{
-
2.0305
,
0.853
,
-
2.0305
,
1.5706
,
0.7545
,
0.7545
};
std
::
vector
<
int64_t
>
res_gold
=
{
0
,
1
};
migraphx
::
shape
data_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
auto
dl
=
mm
->
add_literal
(
migraphx
::
literal
{
data_shape
,
data
});
mm
->
add_instruction
(
migraphx
::
make_op
(
"argmin"
,
{{
"axis"
,
1
},
{
"select_last_index"
,
false
}}),
dl
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
auto
result
=
p
.
eval
({}).
back
();
std
::
vector
<
int64_t
>
result_vec
;
result
.
visit
([
&
](
auto
output
)
{
result_vec
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
result_vec
,
res_gold
));
}
test/ref/isinf.cpp
0 → 100644
View file @
4ea39116
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/instruction.hpp>
#include <migraphx/literal.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/program.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/verify.hpp>
#include <test.hpp>
TEST_CASE
(
isinf_double_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
s
{
migraphx
::
shape
::
double_type
,
{
2
,
3
}};
auto
inf_val
=
std
::
numeric_limits
<
double
>::
infinity
();
std
::
vector
<
double
>
data0
=
{
1.2
,
5.2
,
inf_val
,
-
inf_val
,
0.
,
100.
};
auto
l1
=
mm
->
add_literal
(
migraphx
::
literal
{
s
,
data0
});
mm
->
add_instruction
(
migraphx
::
make_op
(
"isinf"
),
l1
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
auto
result
=
p
.
eval
({}).
back
();
std
::
vector
<
double
>
results_vector
;
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
double
>
gold
=
{
0
,
0
,
1
,
1
,
0
,
0
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
results_vector
,
gold
));
}
TEST_CASE
(
isinf_float_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
auto
inf_val
=
std
::
numeric_limits
<
float
>::
infinity
();
std
::
vector
<
float
>
data0
=
{
1.2
,
5.2
,
inf_val
,
-
inf_val
,
0.
,
100.
};
auto
l1
=
mm
->
add_literal
(
migraphx
::
literal
{
s
,
data0
});
mm
->
add_instruction
(
migraphx
::
make_op
(
"isinf"
),
l1
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
auto
result
=
p
.
eval
({}).
back
();
std
::
vector
<
float
>
results_vector
;
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
=
{
0
,
0
,
1
,
1
,
0
,
0
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
results_vector
,
gold
));
}
TEST_CASE
(
isinf_half_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
s
{
migraphx
::
shape
::
half_type
,
{
2
,
3
}};
auto
inf_val
=
std
::
numeric_limits
<
migraphx
::
half
>::
infinity
();
migraphx
::
half
a
{
1.2
};
migraphx
::
half
b
{
5.2
};
std
::
vector
<
migraphx
::
half
>
data0
=
{
a
,
b
,
inf_val
,
-
inf_val
,
b
,
a
};
auto
l1
=
mm
->
add_literal
(
migraphx
::
literal
{
s
,
data0
});
mm
->
add_instruction
(
migraphx
::
make_op
(
"isinf"
),
l1
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
auto
result
=
p
.
eval
({}).
back
();
std
::
vector
<
float
>
results_vector
;
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
=
{
0
,
0
,
1
,
1
,
0
,
0
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
results_vector
,
gold
));
}
TEST_CASE
(
isinf_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{{
2
,
2
},
{
3
,
8
}}};
auto
input
=
mm
->
add_parameter
(
"X"
,
s
);
auto
inf_val
=
std
::
numeric_limits
<
migraphx
::
half
>::
infinity
();
mm
->
add_instruction
(
migraphx
::
make_op
(
"isinf"
),
input
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
std
::
vector
<
float
>
input_data
=
{
1.2
,
5.2
,
inf_val
,
-
inf_val
,
0.
,
100.
};
migraphx
::
parameter_map
params0
;
migraphx
::
shape
input_fixed_shape0
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
params0
[
"X"
]
=
migraphx
::
argument
(
input_fixed_shape0
,
input_data
.
data
());
auto
result
=
p
.
eval
(
params0
).
back
();
std
::
vector
<
float
>
results_vector
;
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
=
{
0
,
0
,
1
,
1
,
0
,
0
};
EXPECT
(
migraphx
::
verify
::
verify_rms_range
(
results_vector
,
gold
));
}
test/ref/multinomial.cpp
View file @
4ea39116
...
...
@@ -24,9 +24,10 @@
#include <migraphx/instruction.hpp>
#include <migraphx/literal.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/
program
.hpp>
#include <migraphx/
onnx
.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/verify.hpp>
#include <numeric>
#include <random>
#include <test.hpp>
...
...
@@ -48,27 +49,37 @@ TEST_CASE(multinomial_test)
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
1
,
5
}};
std
::
vector
<
int
>
dist
{
15
,
25
,
15
,
25
,
20
};
std
::
vector
<
float
>
data
(
5
);
std
::
transform
(
dist
.
begin
(),
dist
.
end
(),
data
.
begin
(),
[
&
](
auto
d
)
{
return
std
::
log
(
d
);
});
auto
input
=
mm
->
add_literal
(
migraphx
::
literal
(
s
,
data
));
std
::
vector
<
float
>
sum
(
5
);
// convert to float
std
::
transform
(
dist
.
begin
(),
dist
.
end
(),
data
.
begin
(),
[
&
](
auto
d
)
{
return
d
;
});
// take cumulative sum
std
::
partial_sum
(
data
.
begin
(),
data
.
end
(),
sum
.
begin
(),
std
::
plus
<
float
>
());
// scale probabilities arbitrarily
float
odd_scale
=
10000.
;
std
::
transform
(
sum
.
begin
(),
sum
.
end
(),
data
.
begin
(),
[
&
](
auto
d
)
{
return
d
*
odd_scale
;
});
auto
maxes
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_max"
,
{{
"axes"
,
{
1
}}}),
input
);
auto
mb_maxes
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
{
1
,
5
}}}),
maxes
);
auto
cdf
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"sub"
),
input
,
mb_maxes
);
cdf
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"exp"
),
cdf
);
cdf
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"prefix_scan_sum"
,
{{
"axis"
,
1
},
{
"exclusive"
,
false
}}),
cdf
);
auto
input
=
mm
->
add_literal
(
migraphx
::
literal
(
s
,
data
));
mm
->
add_instruction
(
migraphx
::
make_op
(
"multinomial"
),
cdf
,
rs_lit
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"multinomial"
),
input
,
rs_lit
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
auto
result
=
p
.
eval
({}).
back
();
// result_vec contains an index, or category label, for each random input value
std
::
vector
<
int32_t
>
result_vec
(
sample_size
);
result
.
visit
([
&
](
auto
output
)
{
result_vec
.
assign
(
output
.
begin
(),
output
.
end
());
});
// res_dist is a count, or histogram, of the number of samples in each category. This is the
// sampled distribution.
std
::
vector
<
int
>
res_dist
(
5
,
0
);
for
(
const
auto
&
r
:
result_vec
)
res_dist
[
r
]
++
;
// To check the result, normalize the original probability distribution dist
// and the sampling result res_dist; they should be close
// Total the unnormalized probabilities
auto
dist_sum
=
std
::
accumulate
(
dist
.
begin
(),
dist
.
end
(),
0
);
// Total the number of values returned
auto
res_dist_sum
=
std
::
accumulate
(
res_dist
.
begin
(),
res_dist
.
end
(),
0
);
std
::
vector
<
float
>
norm
(
5
);
std
::
vector
<
float
>
res_norm
(
5
);
...
...
@@ -78,6 +89,204 @@ TEST_CASE(multinomial_test)
std
::
transform
(
res_dist
.
begin
(),
res_dist
.
end
(),
res_norm
.
begin
(),
[
&
](
auto
n
)
{
return
static_cast
<
double
>
(
n
)
/
res_dist_sum
;
});
EXPECT
(
migraphx
::
verify
::
verify_range_with_tolerance
(
res_norm
,
migraphx
::
verify
::
expected
{
norm
},
migraphx
::
verify
::
tolerance
{
0.01
}));
}
TEST_CASE
(
multinomial_dyn_test
)
{
// Invokes random_uniform and multinomial ops together, to verify the interface
// Dynamic Batch dimension input of 2 means there are 2 different probability
// distribution functions contained in Input_2
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
size_t
sample_size
=
100000
;
size_t
batch_size
=
2
;
// Shape of the random data
migraphx
::
shape
rs
{
migraphx
::
shape
::
float_type
,
{{
1
,
2
},
{
2
,
sample_size
+
1
}}};
auto
input
=
mm
->
add_parameter
(
"Input_1"
,
rs
);
// Runtime randomization seed
// To seed the random_uniform, we can provide a value by literal or input,
// or ask the system to auto-seed with random_seed op.
migraphx
::
shape
seed_shape
{
migraphx
::
shape
::
uint32_type
,
{
migraphx
::
shape
::
dynamic_dimension
{
0
,
1
}}};
auto
seed_input
=
mm
->
add_parameter
(
"Seed"
,
seed_shape
);
// Shape of the probability distribution, which also defines the number of categories
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{{
2
,
2
},
{
5
,
6
}}};
// Unnormalized distributions for batch size 2:
// 15, 25, 15, 15, 20
// 20, 20, 10, 25, 25
std
::
vector
<
int
>
dist
{
15
,
25
,
15
,
25
,
20
,
20
,
20
,
10
,
25
,
25
};
// Hard-coded non-normalized, accumulated distribution follows:
std
::
vector
<
float
>
data
{
.15
f
,
.40
f
,
.55
f
,
.80
f
,
1.0
f
,
20.
f
,
40.
f
,
50.
f
,
75.
f
,
100.
f
};
auto
input2
=
mm
->
add_parameter
(
"Input_2"
,
s
);
auto
randoms
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"random_uniform"
),
seed_input
,
input
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"multinomial"
),
input2
,
randoms
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
// Create a dummy input in the shape we want for the random data
std
::
vector
<
float
>
dummy
(
sample_size
,
0
);
migraphx
::
shape
input_fixed_shape1
{
migraphx
::
shape
::
float_type
,
{
batch_size
,
sample_size
}};
migraphx
::
shape
input_fixed_shape2
{
migraphx
::
shape
::
float_type
,
{
batch_size
,
5
}};
migraphx
::
parameter_map
params0
;
params0
[
"Input_1"
]
=
migraphx
::
argument
(
input_fixed_shape1
,
dummy
.
data
());
migraphx
::
shape
seed_fixed_shape
{
migraphx
::
shape
::
uint32_type
,
{
1
}};
std
::
vector
<
uint32_t
>
seed_data
=
{
4
};
params0
[
"Seed"
]
=
migraphx
::
argument
(
seed_fixed_shape
,
seed_data
.
data
());
params0
[
"Input_2"
]
=
migraphx
::
argument
(
input_fixed_shape2
,
data
.
data
());
auto
result
=
p
.
eval
(
params0
).
back
();
std
::
vector
<
float
>
result_vec
(
input_fixed_shape2
.
elements
());
result
.
visit
([
&
](
auto
output
)
{
result_vec
.
assign
(
output
.
begin
(),
output
.
end
());
});
// Make a categorical histogram of output
std
::
vector
<
int
>
res_dist
(
5
,
0
);
size_t
r
=
0
;
for
(
r
=
0
;
r
<
result_vec
.
size
()
/
2
;
r
++
)
res_dist
[
result_vec
[
r
]]
++
;
// histogram for second set of batch
std
::
vector
<
int
>
res_dist2
(
5
,
0
);
for
(;
r
<
result_vec
.
size
();
r
++
)
res_dist2
[
result_vec
[
r
]]
++
;
// Rescale or normalize both the input probability distribution and the output
// histogram, and compare. Should be close but not identical.
auto
dist_sum
=
std
::
accumulate
(
dist
.
begin
(),
dist
.
begin
()
+
5
,
0
);
auto
res_dist_sum
=
std
::
accumulate
(
res_dist
.
begin
(),
res_dist
.
end
(),
0
);
std
::
vector
<
float
>
norm
(
5
);
std
::
vector
<
float
>
res_norm
(
5
);
std
::
transform
(
dist
.
begin
(),
dist
.
begin
()
+
5
,
norm
.
begin
(),
[
&
](
auto
n
)
{
return
static_cast
<
double
>
(
n
)
/
dist_sum
;
});
std
::
transform
(
res_dist
.
begin
(),
res_dist
.
end
(),
res_norm
.
begin
(),
[
&
](
auto
n
)
{
return
static_cast
<
double
>
(
n
)
/
res_dist_sum
;
});
EXPECT
(
migraphx
::
verify
::
verify_range_with_tolerance
(
res_norm
,
migraphx
::
verify
::
expected
{
norm
},
migraphx
::
verify
::
tolerance
{
0.01
}));
// Do the same rescaling for the 2nd in batch, which has a different probability distribution
dist_sum
=
std
::
accumulate
(
dist
.
begin
()
+
5
,
dist
.
end
(),
0
);
res_dist_sum
=
std
::
accumulate
(
res_dist2
.
begin
(),
res_dist2
.
end
(),
0
);
std
::
transform
(
dist
.
begin
()
+
5
,
dist
.
end
(),
norm
.
begin
(),
[
&
](
auto
n
)
{
return
static_cast
<
double
>
(
n
)
/
dist_sum
;
});
std
::
transform
(
res_dist2
.
begin
(),
res_dist2
.
end
(),
res_norm
.
begin
(),
[
&
](
auto
n
)
{
return
static_cast
<
double
>
(
n
)
/
res_dist_sum
;
});
EXPECT
(
migraphx
::
verify
::
verify_range_with_tolerance
(
res_norm
,
migraphx
::
verify
::
expected
{
norm
},
migraphx
::
verify
::
tolerance
{
0.01
}));
}
TEST_CASE
(
multinomial_float_dyn_test
)
{
// int data type for random_uniform op and float data type for multinomial.
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
size_t
sample_size
=
100000
;
size_t
batch_size
=
2
;
// Shape of the random data
migraphx
::
shape
rs
{
migraphx
::
shape
::
int32_type
,
{{
1
,
2
},
{
2
,
sample_size
+
1
}}};
auto
input
=
mm
->
add_parameter
(
"Input_1"
,
rs
);
// Runtime randomization seed
// To seed the random_uniform, we can provide a value by literal or input,
// or ask the system to auto-seed with random_seed op.
migraphx
::
shape
seed_shape
{
migraphx
::
shape
::
uint32_type
,
{
migraphx
::
shape
::
dynamic_dimension
{
0
,
1
}}};
auto
seed_input
=
mm
->
add_parameter
(
"Seed"
,
seed_shape
);
// Shape of the probability distribution, which also defines the number of categories
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{{
2
,
2
},
{
5
,
6
}}};
// Unnormalized distributions for batch size 2:
// 15, 25, 15, 15, 20
// 20, 20, 10, 25, 25
std
::
vector
<
int
>
dist
{
15
,
25
,
15
,
25
,
20
,
20
,
20
,
10
,
25
,
25
};
// Hard-coded normalized, accumulated distribution follows:
std
::
vector
<
float
>
data
{
.15
f
,
.40
f
,
.55
f
,
.80
f
,
1.0
f
,
.20
f
,
.40
f
,
.50
f
,
.75
f
,
1.0
f
};
auto
input2
=
mm
->
add_parameter
(
"Input_2"
,
s
);
auto
randoms
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"random_uniform"
),
seed_input
,
input
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"multinomial"
,
{{
"dtype"
,
migraphx
::
shape
::
float_type
}}),
input2
,
randoms
);
p
.
compile
(
migraphx
::
make_target
(
"ref"
));
// Create a dummy input in the shape we want for the random data
std
::
vector
<
float
>
dummy
(
sample_size
,
0
);
migraphx
::
shape
input_fixed_shape1
{
migraphx
::
shape
::
float_type
,
{
batch_size
,
sample_size
}};
migraphx
::
shape
input_fixed_shape2
{
migraphx
::
shape
::
float_type
,
{
batch_size
,
5
}};
migraphx
::
parameter_map
params0
;
params0
[
"Input_1"
]
=
migraphx
::
argument
(
input_fixed_shape1
,
dummy
.
data
());
migraphx
::
shape
seed_fixed_shape
{
migraphx
::
shape
::
uint32_type
,
{
1
}};
std
::
vector
<
uint32_t
>
seed_data
=
{
4
};
params0
[
"Seed"
]
=
migraphx
::
argument
(
seed_fixed_shape
,
seed_data
.
data
());
params0
[
"Input_2"
]
=
migraphx
::
argument
(
input_fixed_shape2
,
data
.
data
());
auto
result
=
p
.
eval
(
params0
).
back
();
std
::
vector
<
float
>
result_vec
(
input_fixed_shape2
.
elements
());
result
.
visit
([
&
](
auto
output
)
{
result_vec
.
assign
(
output
.
begin
(),
output
.
end
());
});
// Make a categorical histogram of output
std
::
vector
<
int
>
res_dist
(
5
,
0
);
size_t
r
=
0
;
for
(
r
=
0
;
r
<
result_vec
.
size
()
/
2
;
r
++
)
res_dist
[
result_vec
[
r
]]
++
;
// histogram for second set of batch
std
::
vector
<
int
>
res_dist2
(
5
,
0
);
for
(;
r
<
result_vec
.
size
();
r
++
)
res_dist2
[
result_vec
[
r
]]
++
;
// Rescale or normalize both the input probability distribution and the output
// histogram, and compare. Should be close but not identical.
auto
dist_sum
=
std
::
accumulate
(
dist
.
begin
(),
dist
.
begin
()
+
5
,
0
);
auto
res_dist_sum
=
std
::
accumulate
(
res_dist
.
begin
(),
res_dist
.
end
(),
0
);
std
::
vector
<
float
>
norm
(
5
);
std
::
vector
<
float
>
res_norm
(
5
);
std
::
transform
(
dist
.
begin
(),
dist
.
begin
()
+
5
,
norm
.
begin
(),
[
&
](
auto
n
)
{
return
static_cast
<
double
>
(
n
)
/
dist_sum
;
});
std
::
transform
(
res_dist
.
begin
(),
res_dist
.
end
(),
res_norm
.
begin
(),
[
&
](
auto
n
)
{
return
static_cast
<
double
>
(
n
)
/
res_dist_sum
;
});
EXPECT
(
migraphx
::
verify
::
verify_range_with_tolerance
(
res_norm
,
migraphx
::
verify
::
expected
{
norm
},
migraphx
::
verify
::
tolerance
{
0.01
}));
// Do the same rescaling for the 2nd in batch, which has a different probability distribution
dist_sum
=
std
::
accumulate
(
dist
.
begin
()
+
5
,
dist
.
end
(),
0
);
res_dist_sum
=
std
::
accumulate
(
res_dist2
.
begin
(),
res_dist2
.
end
(),
0
);
std
::
transform
(
dist
.
begin
()
+
5
,
dist
.
end
(),
norm
.
begin
(),
[
&
](
auto
n
)
{
return
static_cast
<
double
>
(
n
)
/
dist_sum
;
});
std
::
transform
(
res_dist2
.
begin
(),
res_dist2
.
end
(),
res_norm
.
begin
(),
[
&
](
auto
n
)
{
return
static_cast
<
double
>
(
n
)
/
res_dist_sum
;
});
EXPECT
(
migraphx
::
verify
::
verify_range_with_tolerance
(
res_norm
,
migraphx
::
verify
::
expected
{
norm
},
migraphx
::
verify
::
tolerance
{
0.01
}));
}
Prev
1
…
10
11
12
13
14
15
16
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment