Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
0ef3d40d
Unverified
Commit
0ef3d40d
authored
Jun 17, 2023
by
Umang Yadav
Committed by
GitHub
Jun 17, 2023
Browse files
Merge branch 'develop' into ck-integration-tuning
parents
2ab23275
2d635f91
Changes
8
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
313 additions
and
91 deletions
+313
-91
src/common.cpp
src/common.cpp
+46
-74
src/include/migraphx/common.hpp
src/include/migraphx/common.hpp
+46
-0
src/include/migraphx/op/broadcast.hpp
src/include/migraphx/op/broadcast.hpp
+7
-4
src/include/migraphx/op/convert.hpp
src/include/migraphx/op/convert.hpp
+11
-1
src/include/migraphx/op/multibroadcast.hpp
src/include/migraphx/op/multibroadcast.hpp
+11
-12
src/include/migraphx/shape.hpp
src/include/migraphx/shape.hpp
+2
-0
test/op_shape_test.cpp
test/op_shape_test.cpp
+83
-0
test/ref_ops_test.cpp
test/ref_ops_test.cpp
+107
-0
No files found.
src/common.cpp
View file @
0ef3d40d
...
...
@@ -31,20 +31,6 @@
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
// Example:
// s0 = (3,2,4,5) and s1 = (2,1,1)
//
// In this case we need to broadcast (:,1,1) portion of
// s1 plus broadcast the 1st dimension of s1
// giving output_lens = (3,2,4,5)
//
// Another example:
// s0 = (3,2,1,5) and s1 = (2,7,5)
// In this case we need to broadcast the (:,:,1:,:) axis
// of s0 plus the 1st dimension of s1 giving
// output_lens = (3,2,7,5)
//
std
::
vector
<
std
::
size_t
>
compute_broadcasted_lens
(
std
::
vector
<
std
::
size_t
>
s0
,
std
::
vector
<
std
::
size_t
>
s1
)
{
...
...
@@ -77,32 +63,38 @@ std::vector<shape::dynamic_dimension> compute_broadcasted_dyn_dims(shape s0, sha
}
auto
offset
=
s1
.
ndim
()
-
s0
.
ndim
();
std
::
vector
<
shape
::
dynamic_dimension
>
out_dims
(
s1
.
dyn_dims
());
std
::
transform
(
s0
.
dyn_dims
().
cbegin
(),
s0
.
dyn_dims
().
cend
(),
s1
.
dyn_dims
().
cbegin
()
+
offset
,
out_dims
.
begin
()
+
offset
,
[
&
](
auto
a
,
auto
b
)
{
if
(
a
==
b
)
{
return
a
;
}
else
if
(
a
==
1
or
b
==
1
)
{
// setting optimals to empty, may need to be changed
return
shape
::
dynamic_dimension
{
std
::
max
(
a
.
min
,
b
.
min
),
std
::
max
(
a
.
max
,
b
.
max
)};
}
else
{
MIGRAPHX_THROW
(
"COMPUTE_BROADCASTED_DYN_DIMS: dynamic shapes {"
+
migraphx
::
to_string_range
(
s0
.
dyn_dims
())
+
"} and {"
+
migraphx
::
to_string_range
(
s1
.
dyn_dims
())
+
"} mismatch!"
);
}
});
std
::
transform
(
s0
.
dyn_dims
().
cbegin
(),
s0
.
dyn_dims
().
cend
(),
s1
.
dyn_dims
().
cbegin
()
+
offset
,
out_dims
.
begin
()
+
offset
,
[
&
](
auto
a
,
auto
b
)
{
if
(
a
==
b
or
b
==
1
)
{
return
a
;
}
else
if
(
a
==
1
)
{
return
b
;
}
else
{
MIGRAPHX_THROW
(
"COMPUTE_BROADCASTED_DYN_DIMS: dynamic shapes {"
+
migraphx
::
to_string_range
(
s0
.
dyn_dims
())
+
"} and {"
+
migraphx
::
to_string_range
(
s1
.
dyn_dims
())
+
"} mismatch!"
);
}
});
return
out_dims
;
}
// Compute the common (broadcasted) dimensions of a list of fixed shapes
std
::
vector
<
shape
::
dynamic_dimension
>
compute_common_dyn_dims
(
const
std
::
vector
<
shape
>&
shapes
)
{
auto
ret_shape
=
shapes
.
at
(
0
);
std
::
for_each
(
shapes
.
cbegin
()
+
1
,
shapes
.
cend
(),
[
&
](
auto
s
)
{
ret_shape
=
shape
{
ret_shape
.
type
(),
compute_broadcasted_dyn_dims
(
ret_shape
,
s
)};
});
return
ret_shape
.
dyn_dims
();
}
std
::
vector
<
std
::
size_t
>
compute_common_lens
(
const
std
::
vector
<
shape
>&
shapes
)
{
assert
(
not
shapes
.
empty
());
...
...
@@ -148,52 +140,35 @@ shape common_shape(const std::vector<shape>& shapes)
return
{
compute_common_types
(
shapes
),
compute_common_lens
(
shapes
)};
}
/**
* @brief Creates and adds instructions to convert input arguments to common shapes and types
* by adding multi-broadcast and type convert operations. This is a utility function for creating
* operations where the shape and type of inputs need to match. It supports both dynamic and
* static-shaped arguments.
*
* @param m containing module for instruction
* @param ins insertion location in instruction list
* @param inputs instructions to use as argument list; also, the shapes
* attached to each instruction_ref are considered for broadcasting
* @return std::vector<instruction_ref> a modified argument list
*/
std
::
vector
<
instruction_ref
>
insert_common_args
(
module
&
m
,
instruction_ref
ins
,
std
::
vector
<
instruction_ref
>
inputs
)
{
if
(
std
::
any_of
(
inputs
.
cbegin
(),
inputs
.
cend
(),
[](
auto
input
)
{
return
input
->
get_shape
().
dynamic
();
}))
{
// currently only handles the binary case
if
(
inputs
.
size
()
!=
2
)
{
MIGRAPHX_THROW
(
"INSERT_COMMON_OP: not handled; "
+
migraphx
::
to_string
(
inputs
.
size
())
+
" inputs. Requires exactly two inputs if any are dynamic shape"
);
}
auto
c_type
=
compute_common_types
(
to_shapes
(
inputs
));
auto
c_dyn_dims
=
compute_broadcasted_dyn_dims
(
inputs
[
0
]
->
get_shape
(),
inputs
[
1
]
->
get_shape
());
auto
input_shapes
=
to_shapes
(
inputs
);
auto
c_type
=
compute_common_types
(
input_shapes
);
auto
c_dyn_dims
=
compute_common_dyn_dims
(
input_shapes
);
// following should work for a static or dynamic shape
if
(
inputs
[
0
]
->
get_shape
().
dyn_dims
()
!=
c_dyn_dims
)
{
inputs
[
0
]
=
m
.
insert_instruction
(
ins
,
make_op
(
"multibroadcast"
,
{{
"out_dyn_dims"
,
to_value
(
c_dyn_dims
)}}),
inputs
[
0
],
inputs
[
1
]);
}
if
(
inputs
[
1
]
->
get_shape
().
dyn_dims
()
!=
c_dyn_dims
)
{
inputs
[
1
]
=
m
.
insert_instruction
(
ins
,
make_op
(
"multibroadcast"
,
{{
"out_dyn_dims"
,
to_value
(
c_dyn_dims
)}}),
inputs
[
1
],
inputs
[
0
]);
ins
,
make_op
(
"multibroadcast"
,
{{
"out_dyn_dims"
,
to_value
(
c_dyn_dims
)}}),
inputs
);
}
std
::
transform
(
inputs
.
begin
()
+
1
,
inputs
.
end
(),
inputs
.
begin
()
+
1
,
[
&
](
auto
input
)
{
// uses previous multibroadcast to avoid recalculating the common shape from the
// full set of input shapes at runtime
if
(
input
->
get_shape
().
dyn_dims
()
!=
c_dyn_dims
)
{
return
m
.
insert_instruction
(
ins
,
make_op
(
"multibroadcast"
,
{{
"out_dyn_dims"
,
to_value
(
c_dyn_dims
)}}),
input
,
inputs
[
0
]);
}
return
input
;
});
std
::
transform
(
inputs
.
begin
(),
inputs
.
end
(),
inputs
.
begin
(),
[
&
](
auto
input
)
{
if
(
input
->
get_shape
().
type
()
!=
c_type
)
{
...
...
@@ -236,9 +211,6 @@ instruction_ref insert_common_op(module& m,
return
m
.
insert_instruction
(
ins
,
op
,
insert_common_args
(
m
,
ins
,
std
::
move
(
inputs
)));
}
/**
* Wrapper for insert_common_args() which inserts operation at the end of the module.
*/
instruction_ref
add_common_op
(
module
&
m
,
const
operation
&
op
,
std
::
vector
<
instruction_ref
>
inputs
)
{
return
insert_common_op
(
m
,
m
.
end
(),
op
,
std
::
move
(
inputs
));
...
...
src/include/migraphx/common.hpp
View file @
0ef3d40d
...
...
@@ -34,6 +34,26 @@ inline namespace MIGRAPHX_INLINE_NS {
struct
module
;
struct
operation
;
/**
* Broadcasting works by comparing the shapes element-wise starting with
* the trailing (right-most) dimensions and working leftwards. This is equivalent
* to what is done in NumPy.
* example 1:
* s0 = (3,2,4,5) and s1 = (2,1,1)
* In this case we need to broadcast (:,1,1) portion of
* s1 plus broadcast the 1st dimension of s0
* giving output_lens = (3,2,4,5)
*
* example 2:
* s0 = (3,2,1,5) and s1 = (2,7,5)
* In this case we need to broadcast the (:,:,1:,:) axis
* of s0 plus the 1st dimension of s1 giving
* output_lens = (3,2,7,5)
*
* example 3:
* s0 = (4, 1, 1) and s1 = (3, 4)
* output_lens = (4, 3, 4)
*/
std
::
vector
<
std
::
size_t
>
compute_broadcasted_lens
(
std
::
vector
<
std
::
size_t
>
s0
,
std
::
vector
<
std
::
size_t
>
s1
);
...
...
@@ -41,6 +61,28 @@ std::vector<shape::dynamic_dimension> compute_broadcasted_dyn_dims(shape s0, sha
shape
common_shape
(
const
std
::
vector
<
shape
>&
shapes
);
/**
* @brief Compute the common (broadcasted) dimensions of a list of fixed shapes
*/
std
::
vector
<
std
::
size_t
>
compute_common_lens
(
const
std
::
vector
<
shape
>&
shapes
);
/**
* @ brief Compute the common (broadcasted) dynamic dimensions of a list of dynamic shapes
*/
std
::
vector
<
shape
::
dynamic_dimension
>
compute_common_dyn_dims
(
const
std
::
vector
<
shape
>&
shapes
);
/**
* @brief Creates and adds instructions to convert input arguments to common shapes and types
* by adding multi-broadcast and type convert operations. This is a utility function for creating
* operations where the shape and type of inputs need to match. It supports both dynamic and
* static-shaped arguments.
*
* @param m containing module for instruction
* @param ins insertion location in instruction list
* @param inputs instructions to use as argument list; also, the shapes
* attached to each instruction_ref are considered for broadcasting
* @return std::vector<instruction_ref> a modified argument list
*/
std
::
vector
<
instruction_ref
>
insert_common_args
(
module
&
m
,
instruction_ref
ins
,
std
::
vector
<
instruction_ref
>
inputs
);
...
...
@@ -50,6 +92,10 @@ instruction_ref insert_common_op(module& m,
instruction_ref
ins
,
const
operation
&
op
,
std
::
vector
<
instruction_ref
>
inputs
);
/**
* @brief Wrapper for insert_common_args() which inserts operation at the end of the module.
*/
instruction_ref
add_common_op
(
module
&
m
,
const
operation
&
op
,
std
::
vector
<
instruction_ref
>
inputs
);
}
// namespace MIGRAPHX_INLINE_NS
...
...
src/include/migraphx/op/broadcast.hpp
View file @
0ef3d40d
...
...
@@ -37,10 +37,13 @@ namespace op {
* 1 input version:
* Broadcasts a tensor from the original shape to the broadcast_lens by setting the stride of
* broadcasted dimensions to zero. `axis` attribute for a 1D input shape is the output dimension
* that stays the same. ex: broadcasting shape [1024] -> [4, 1024, 3] has axis = 1 For higher rank
* input shapes, axis is an offset parameter for the broadcasting. Such that this operator would
* work in the opposite direction of NumPy broadcasting. ex: broadcasting shape [2, 2] -> [2, 2, 3]
* with axis = 0
* that stays the same.
* ex: broadcasting shape [1024] -> [4, 1024, 3] has axis = 1.
*
* For higher rank input shapes, axis is an offset parameter for the broadcasting.
* Such that this operator would work in the opposite direction of NumPy broadcasting
* (left-most to rightwards element-wise comparison)
* ex: broadcasting shape [2, 2] -> [2, 2, 3] with axis = 0
*
* 2 input version:
* Broadcast the first input 1D shape into the second input shape based on the axis parameter.
...
...
src/include/migraphx/op/convert.hpp
View file @
0ef3d40d
...
...
@@ -66,7 +66,17 @@ struct convert : unary<convert>
auto
type
=
target_type
;
return
[
type
](
auto
x
)
{
auto
y
=
x
;
shape
::
visit
(
type
,
[
&
](
auto
as
)
{
y
=
std
::
min
(
std
::
max
(
as
(
x
),
as
.
min
()),
as
.
max
());
});
shape
::
visit
(
type
,
[
&
](
auto
as
)
{
// clamping value between target_type's max and min doesn't work for NaNs,
if
(
std
::
isnan
(
x
))
{
y
=
as
.
nan
();
}
else
{
y
=
std
::
min
(
std
::
max
(
as
(
x
),
as
.
min
()),
as
.
max
());
}
});
return
y
;
};
}
...
...
src/include/migraphx/op/multibroadcast.hpp
View file @
0ef3d40d
...
...
@@ -36,11 +36,9 @@ namespace op {
/**
* Broadcast multiple dimensions between two tensors.
* Two versions of this operator: one input and two inputs.
* One input version uses output_lens attribute and broadcasts to it (does not support
* dynamic shape input).
*
* Two inputs version broadcasts the first input to the common shape of the two inputs.
* Two versions of this operator: 1 input and 2+ inputs.
* One input version uses output_lens attribute and broadcasts to it.
* 2+ inputs version broadcasts first input to the common shape at evaluation time.
*/
struct
multibroadcast
{
...
...
@@ -59,12 +57,12 @@ struct multibroadcast
shape
compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
{
check_shapes
{
inputs
,
*
this
,
true
}.
has
(
1
,
2
);
check_shapes
{
inputs
,
*
this
,
true
}.
has
_at_least
(
1
);
auto
t
=
inputs
.
at
(
0
).
type
();
auto
s0
=
inputs
.
at
(
0
);
if
(
s0
.
max_lens
().
empty
()
)
if
(
s0
.
ndim
()
<
1
)
{
MIGRAPHX_THROW
(
"MULTIBROADCAST: input dimensions should be > 0"
);
}
...
...
@@ -107,19 +105,20 @@ struct multibroadcast
}
else
{
//
two
inputs
auto
s1
=
inputs
.
at
(
1
);
if
(
s0
.
dynamic
()
or
s1
.
dynamic
())
//
2+
inputs
if
(
std
::
any_of
(
inputs
.
cbegin
(),
inputs
.
cend
(),
[](
auto
input
)
{
return
input
.
dynamic
()
;
})
)
{
if
(
not
output_dyn_dims
.
empty
())
{
return
{
t
,
output_dyn_dims
};
}
return
{
t
,
compute_
broadcasted
_dyn_dims
(
s0
,
s1
)};
return
{
t
,
compute_
common
_dyn_dims
(
inputs
)};
}
else
{
auto
bcast_lens
=
compute_broadcasted_lens
(
s0
.
lens
(),
s1
.
lens
());
// output_lens will not be set for 2+ input version
auto
bcast_lens
=
compute_common_lens
(
inputs
);
auto
offset
=
bcast_lens
.
size
()
-
s0
.
lens
().
size
();
auto
bcast_strides
=
make_bcast_strides
(
bcast_lens
,
offset
);
return
{
t
,
std
::
move
(
bcast_lens
),
std
::
move
(
bcast_strides
)};
...
...
src/include/migraphx/shape.hpp
View file @
0ef3d40d
...
...
@@ -299,6 +299,8 @@ struct shape
type
min
()
const
{
return
std
::
numeric_limits
<
type
>::
lowest
();
}
type
nan
()
const
{
return
std
::
numeric_limits
<
type
>::
quiet_NaN
();
}
template
<
class
U
>
type
operator
()(
U
u
)
const
{
...
...
test/op_shape_test.cpp
View file @
0ef3d40d
...
...
@@ -1657,6 +1657,89 @@ TEST_CASE(multibroadcast_2in_static_static_error0)
throws_shape
(
migraphx
::
make_op
(
"multibroadcast"
),
b_shape
,
a_shape
);
}
TEST_CASE
(
multibroadcast_3in_static
)
{
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
3
,
6
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
3
,
6
}};
migraphx
::
shape
c_shape
{
migraphx
::
shape
::
float_type
,
{
5
,
1
,
1
,
1
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
5
,
2
,
3
,
6
},
{
0
,
0
,
6
,
1
}},
migraphx
::
make_op
(
"multibroadcast"
),
a_shape
,
b_shape
,
c_shape
);
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
5
,
2
,
3
,
6
},
{
0
,
18
,
6
,
1
}},
migraphx
::
make_op
(
"multibroadcast"
),
b_shape
,
a_shape
,
c_shape
);
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
5
,
2
,
3
,
6
},
{
1
,
0
,
0
,
0
}},
migraphx
::
make_op
(
"multibroadcast"
),
c_shape
,
a_shape
,
b_shape
);
}
TEST_CASE
(
multibroadcast_4in_static
)
{
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
3
,
6
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
6
}};
migraphx
::
shape
c_shape
{
migraphx
::
shape
::
float_type
,
{
5
,
1
,
1
,
1
}};
migraphx
::
shape
d_shape
{
migraphx
::
shape
::
float_type
,
{
6
}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
5
,
2
,
3
,
6
},
{
0
,
0
,
6
,
1
}},
migraphx
::
make_op
(
"multibroadcast"
),
a_shape
,
b_shape
,
c_shape
,
d_shape
);
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
5
,
2
,
3
,
6
},
{
0
,
18
,
6
,
1
}},
migraphx
::
make_op
(
"multibroadcast"
),
b_shape
,
a_shape
,
c_shape
,
d_shape
);
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
5
,
2
,
3
,
6
},
{
1
,
0
,
0
,
0
}},
migraphx
::
make_op
(
"multibroadcast"
),
c_shape
,
a_shape
,
b_shape
,
d_shape
);
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
5
,
2
,
3
,
6
},
{
0
,
0
,
0
,
1
}},
migraphx
::
make_op
(
"multibroadcast"
),
d_shape
,
a_shape
,
b_shape
,
c_shape
);
}
TEST_CASE
(
multibroadcast_3in_dyn_static
)
{
std
::
vector
<
migraphx
::
shape
::
dynamic_dimension
>
a
{{
1
,
4
},
{
2
,
4
,
{
2
}},
{
2
,
4
}};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
a
};
std
::
vector
<
migraphx
::
shape
::
dynamic_dimension
>
b
{{
2
,
4
,
{
2
}},
{
2
,
4
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
b
};
migraphx
::
shape
c_shape
{
migraphx
::
shape
::
float_type
,
{
5
,
1
,
1
,
1
}};
migraphx
::
shape
expected_shape
{
migraphx
::
shape
::
float_type
,
{{
5
,
5
},
{
1
,
4
},
{
2
,
4
,
{
2
}},
{
2
,
4
}}};
expect_shape
(
expected_shape
,
migraphx
::
make_op
(
"multibroadcast"
),
a_shape
,
b_shape
,
c_shape
);
expect_shape
(
expected_shape
,
migraphx
::
make_op
(
"multibroadcast"
),
b_shape
,
a_shape
,
c_shape
);
expect_shape
(
expected_shape
,
migraphx
::
make_op
(
"multibroadcast"
),
c_shape
,
a_shape
,
b_shape
);
}
TEST_CASE
(
multibroadcast_3in_dyn_dyn
)
{
std
::
vector
<
migraphx
::
shape
::
dynamic_dimension
>
a
{{
1
,
4
},
{
2
,
4
,
{
2
}},
{
2
,
4
}};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
a
};
std
::
vector
<
migraphx
::
shape
::
dynamic_dimension
>
b
{{
2
,
4
,
{
2
}},
{
2
,
4
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
b
};
std
::
vector
<
migraphx
::
shape
::
dynamic_dimension
>
c
{{
1
,
5
,
{
1
,
5
}},
{
1
,
1
},
{
2
,
4
,
{
2
}},
{
2
,
4
}};
migraphx
::
shape
c_shape
{
migraphx
::
shape
::
float_type
,
c
};
migraphx
::
shape
expected_shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
5
,
{
1
,
5
}},
{
1
,
4
},
{
2
,
4
,
{
2
}},
{
2
,
4
}}};
expect_shape
(
expected_shape
,
migraphx
::
make_op
(
"multibroadcast"
),
a_shape
,
b_shape
,
c_shape
);
expect_shape
(
expected_shape
,
migraphx
::
make_op
(
"multibroadcast"
),
b_shape
,
a_shape
,
c_shape
);
expect_shape
(
expected_shape
,
migraphx
::
make_op
(
"multibroadcast"
),
c_shape
,
a_shape
,
b_shape
);
}
TEST_CASE
(
multinomial
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
5
}};
...
...
test/ref_ops_test.cpp
View file @
0ef3d40d
...
...
@@ -1849,6 +1849,80 @@ TEST_CASE(cosh_dyn_test)
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(convert_nan_upcast_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::half_type, {2, 2}};
std::vector<migraphx::half> data(4, std::numeric_limits<migraphx::half>::quiet_NaN());
auto l = mm->add_literal(migraphx::literal{s, data});
mm->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::float_type}}), l);
p.compile(migraphx::make_target("ref"));
auto result = p.eval({}).back();
std::vector<float> results_vector(4, -1);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(std::all_of(
results_vector.begin(), results_vector.end(), [](const auto& x) { return std::isnan(x); }));
}
TEST_CASE(convert_nan_downcast_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::double_type, {2, 2}};
std::vector<double> data(4, std::numeric_limits<double>::quiet_NaN());
auto l = mm->add_literal(migraphx::literal{s, data});
mm->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::float_type}}), l);
p.compile(migraphx::make_target("ref"));
auto result = p.eval({}).back();
std::vector<float> results_vector(4, -1);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(std::all_of(
results_vector.begin(), results_vector.end(), [](const auto& x) { return std::isnan(x); }));
}
TEST_CASE(convert_nan_double_convert_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::double_type, {2, 2}};
std::vector<double> data(4, std::numeric_limits<double>::quiet_NaN());
auto l = mm->add_literal(migraphx::literal{s, data});
auto f_l = mm->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::float_type}}), l);
mm->add_instruction(migraphx::make_op("convert", {{"target_type", migraphx::shape::half_type}}),
f_l);
p.compile(migraphx::make_target("ref"));
auto result = p.eval({}).back();
std::vector<migraphx::half> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(std::all_of(
results_vector.begin(), results_vector.end(), [](const auto& x) { return std::isnan(x); }));
}
TEST_CASE(convert_nan_convert_updown_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {2, 2}};
std::vector<float> data(4, std::numeric_limits<float>::quiet_NaN());
auto l = mm->add_literal(migraphx::literal{s, data});
auto f_l = mm->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::float_type}}), l);
auto h_l = mm->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::half_type}}), f_l);
mm->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::float_type}}), h_l);
p.compile(migraphx::make_target("ref"));
auto result = p.eval({}).back();
std::vector<float> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(std::all_of(
results_vector.begin(), results_vector.end(), [](const auto& x) { return std::isnan(x); }));
}
TEST_CASE(deconv_1d_test)
{
migraphx::shape s{migraphx::shape::float_type, {1, 1, 3}};
...
...
@@ -4758,6 +4832,39 @@ TEST_CASE(multibroadcast_2in_dyn_test)
EXPECT(output(1, 1) == -3);
}
TEST_CASE(multibroadcast_3in_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape a_shape{migraphx::shape::int32_type, {{2, 4}, {2, 2}}};
migraphx::shape b_shape{migraphx::shape::int32_type, {2}};
migraphx::shape c_shape{migraphx::shape::int32_type, {{1, 4, {2, 4}}, {2, 4}, {2, 2}}};
auto l1 = mm->add_parameter("a", a_shape);
std::vector<int32_t> b_data{-2, -3};
auto l2 = mm->add_literal(migraphx::literal{b_shape, b_data});
auto l3 = mm->add_parameter("c", c_shape);
mm->add_instruction(migraphx::make_op("multibroadcast"), l2, l1, l3);
p.compile(migraphx::make_target("ref"));
std::vector<int32_t> a_data(4, 0);
std::vector<int32_t> c_data(8, 0);
migraphx::parameter_map params;
migraphx::shape input_fixed_shape_a{migraphx::shape::float_type, {2, 2}};
migraphx::shape input_fixed_shape_c{migraphx::shape::float_type, {2, 2, 2}};
params["a"] = migraphx::argument(input_fixed_shape_a, a_data.data());
params["c"] = migraphx::argument(input_fixed_shape_c, c_data.data());
auto result = p.eval(params).back();
auto output = result.get<int32_t>();
EXPECT(output(0, 0, 0) == -2);
EXPECT(output(0, 0, 1) == -3);
EXPECT(output(0, 1, 0) == -2);
EXPECT(output(0, 1, 1) == -3);
EXPECT(output(1, 0, 0) == -2);
EXPECT(output(1, 0, 1) == -3);
EXPECT(output(1, 1, 0) == -2);
EXPECT(output(1, 1, 1) == -3);
}
TEST_CASE(multinomial_test)
{
migraphx::program p;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment