Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
33a41ba0
Commit
33a41ba0
authored
Jun 14, 2019
by
Paul
Browse files
Merge branch 'develop' into batch-concat
parents
b092d017
a7bd5ded
Changes
210
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
2425 additions
and
400 deletions
+2425
-400
src/targets/gpu/softmax.cpp
src/targets/gpu/softmax.cpp
+14
-0
src/targets/gpu/tanh.cpp
src/targets/gpu/tanh.cpp
+2
-2
src/targets/gpu/target.cpp
src/targets/gpu/target.cpp
+16
-5
src/targets/gpu/write_literals.cpp
src/targets/gpu/write_literals.cpp
+7
-0
src/tf/tf.cpp
src/tf/tf.cpp
+251
-44
test/auto_contiguous_test.cpp
test/auto_contiguous_test.cpp
+4
-3
test/common_subexpression_elimination_test.cpp
test/common_subexpression_elimination_test.cpp
+1
-1
test/cpu_dot_op_test.cpp
test/cpu_dot_op_test.cpp
+1096
-0
test/cpu_ops_test.cpp
test/cpu_ops_test.cpp
+96
-306
test/cpu_rnn_ops_test.cpp
test/cpu_rnn_ops_test.cpp
+6
-1
test/dead_code_elimination_test.cpp
test/dead_code_elimination_test.cpp
+54
-1
test/eliminate_allocation_test.cpp
test/eliminate_allocation_test.cpp
+9
-1
test/eliminate_concat_test.cpp
test/eliminate_concat_test.cpp
+17
-1
test/eliminate_contiguous_test.cpp
test/eliminate_contiguous_test.cpp
+45
-1
test/eliminate_identity_test.cpp
test/eliminate_identity_test.cpp
+69
-0
test/eliminate_pad_test.cpp
test/eliminate_pad_test.cpp
+105
-0
test/fwd_conv_batchnorm_rewrite_test.cpp
test/fwd_conv_batchnorm_rewrite_test.cpp
+108
-1
test/gpu/adjust_allocation.cpp
test/gpu/adjust_allocation.cpp
+71
-0
test/gpu/miopen.cpp
test/gpu/miopen.cpp
+446
-32
test/memory_coloring_test.cpp
test/memory_coloring_test.cpp
+8
-1
No files found.
src/targets/gpu/softmax.cpp
View file @
33a41ba0
#include <migraphx/gpu/softmax.hpp>
#include <migraphx/gpu/device/softmax.hpp>
#include <migraphx/gpu/context.hpp>
namespace
migraphx
{
...
...
@@ -30,6 +31,19 @@ argument miopen_softmax::compute(context& ctx,
return
args
[
1
];
}
shape
hip_softmax
::
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
check_shapes
{
inputs
,
*
this
}.
has
(
2
).
standard
();
return
op
.
compute_shape
({
inputs
.
at
(
0
)});
}
argument
hip_softmax
::
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
{
return
device
::
softmax
(
ctx
.
get_stream
().
get
(),
output_shape
,
args
,
op
.
axis
);
}
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
src/targets/gpu/tanh.cpp
View file @
33a41ba0
...
...
@@ -7,8 +7,8 @@ namespace gpu {
shape
miopen_tanh
::
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
check_shapes
{
inputs
,
*
this
}.
has
(
2
).
not_broadcast
ed
();
return
inputs
.
at
(
1
);
check_shapes
{
inputs
,
*
this
}.
has
(
2
).
pack
ed
();
return
inputs
.
at
(
0
);
}
argument
miopen_tanh
::
compute
(
context
&
ctx
,
...
...
src/targets/gpu/target.cpp
View file @
33a41ba0
...
...
@@ -11,26 +11,34 @@
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/simplify_reshapes.hpp>
#include <migraphx/simplify_algebra.hpp>
#include <migraphx/
constant_
propagate.hpp>
#include <migraphx/propagate
_constant
.hpp>
#include <migraphx/eliminate_contiguous.hpp>
#include <migraphx/common_subexpression_elimination.hpp>
#include <migraphx/fwd_conv_batchnorm_rewrite.hpp>
#include <migraphx/rewrite_rnn.hpp>
#include <migraphx/eliminate_concat.hpp>
#include <migraphx/eliminate_identity.hpp>
#include <migraphx/gpu/concat_gpu_opt.hpp>
#include <migraphx/gpu/schedule_model.hpp>
#include <migraphx/gpu/adjust_allocation.hpp>
#include <migraphx/eliminate_pad.hpp>
#include <migraphx/schedule.hpp>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
MIGRAPHX_DECLARE_ENV_VAR
(
MIGRAPHX_ENABLE_SCHEDULE_PASS
)
std
::
vector
<
pass
>
target
::
get_passes
(
migraphx
::
context
&
gctx
)
const
{
auto
&
ctx
=
any_cast
<
context
>
(
gctx
);
// clang-format off
return
{
dead_code_elimination
{},
eliminate_identity
{},
eliminate_pad
{},
dead_code_elimination
{},
fwd_conv_batchnorm_rewrite
{},
dead_code_elimination
{},
...
...
@@ -40,26 +48,29 @@ std::vector<pass> target::get_passes(migraphx::context& gctx) const
//dead_code_elimination{},
simplify_algebra
{},
dead_code_elimination
{},
constant_
propagate
{},
propagate
_constant
{},
dead_code_elimination
{},
auto_contiguous
{},
//
simplify_reshapes{},
simplify_reshapes
{},
dead_code_elimination
{},
lowering
{
ctx
},
eliminate_concat
{
concat_gpu_optimization
{}},
dead_code_elimination
{},
eliminate_contiguous
{},
dead_code_elimination
{},
adjust_allocation
{},
dead_code_elimination
{},
fuse_ops
{
&
ctx
},
dead_code_elimination
{},
write_literals
{
&
ctx
},
schedule
{
gpu
::
schedule_model
{
ctx
.
get_current_device
().
nstreams
()}},
schedule
{
gpu
::
schedule_model
{
ctx
.
get_current_device
().
nstreams
()}
,
enabled
(
MIGRAPHX_ENABLE_SCHEDULE_PASS
{})
},
memory_coloring
{
"hip::allocate"
},
dead_code_elimination
{},
eliminate_workspace
{},
eliminate_allocation
{
"hip::allocate"
},
check_context
<
context
>
{},
dead_code_elimination
{}
dead_code_elimination
{},
eliminate_identity
{}
};
// clang-format on
}
...
...
src/targets/gpu/write_literals.cpp
View file @
33a41ba0
...
...
@@ -14,6 +14,13 @@ struct hip_load_literal
{
shape
s
;
std
::
size_t
n
=
0
;
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
pack
(
f
(
self
.
s
,
"shape"
),
f
(
self
.
n
,
"id"
));
}
std
::
string
name
()
const
{
return
"hip::load_literal"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
...
...
src/tf/tf.cpp
View file @
33a41ba0
...
...
@@ -17,6 +17,7 @@
#include <migraphx/instruction.hpp>
#include <migraphx/config.hpp>
#include <migraphx/tf.hpp>
#include <migraphx/pad_calc.hpp>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
...
...
@@ -24,7 +25,7 @@ inline namespace MIGRAPHX_INLINE_NS {
struct
tf_parser
{
using
attribute_map
=
std
::
unordered_map
<
std
::
string
,
tensorflow
::
AttrValue
>
;
using
node_map
=
std
::
unordered_
map
<
std
::
string
,
tensorflow
::
NodeDef
>
;
using
node_map
=
std
::
map
<
std
::
string
,
tensorflow
::
NodeDef
>
;
// using input_node_map = std::unordered_map<std::string, std::unordered_set<std::string>>;
using
op_func
=
std
::
function
<
instruction_ref
(
attribute_map
,
std
::
vector
<
instruction_ref
>
)
>
;
...
...
@@ -53,15 +54,16 @@ struct tf_parser
template
<
class
T
>
std
::
vector
<
T
>
parse_axes
(
std
::
vector
<
T
>
axes
)
const
{
std
::
vector
<
T
>
new_axes
;
if
(
is_nhwc
)
{
std
::
vector
<
T
>
new_axes
;
std
::
transform
(
axes
.
begin
(),
axes
.
end
(),
std
::
back_inserter
(
new_axes
),
[
&
](
size_t
axis
)
{
return
parse_axis
(
axis
);
});
return
new_axes
;
}
return
new_
axes
;
return
axes
;
}
// tf stores certain attributes such as strides, dilations, as a 4D input.
...
...
@@ -108,21 +110,27 @@ struct tf_parser
{
add_generic_op
(
"Identity"
,
op
::
identity
{});
add_generic_op
(
"Relu"
,
op
::
relu
{});
add_generic_op
(
"Relu6"
,
op
::
clip
{
6.0
,
0.0
});
add_binary_op
(
"Add"
,
op
::
add
{});
add_binary_op
(
"Mul"
,
op
::
mul
{});
add_mem_op
(
"AvgPool"
,
&
tf_parser
::
parse_pooling
);
add_mem_op
(
"BiasAdd"
,
&
tf_parser
::
parse_biasadd
);
add_mem_op
(
"ConcatV2"
,
&
tf_parser
::
parse_concat
);
add_mem_op
(
"Const"
,
&
tf_parser
::
parse_constant
);
add_mem_op
(
"Conv2D"
,
&
tf_parser
::
parse_conv
);
add_mem_op
(
"DepthwiseConv2dNative"
,
&
tf_parser
::
parse_depthwiseconv
);
add_mem_op
(
"FusedBatchNorm"
,
&
tf_parser
::
parse_batchnorm
);
add_mem_op
(
"MatMul"
,
&
tf_parser
::
parse_matmul
);
add_mem_op
(
"MaxPool"
,
&
tf_parser
::
parse_pooling
);
add_mem_op
(
"Mean"
,
&
tf_parser
::
parse_mean
);
add_mem_op
(
"Pack"
,
&
tf_parser
::
parse_pack
);
add_mem_op
(
"Pad"
,
&
tf_parser
::
parse_pad
);
add_mem_op
(
"Reshape"
,
&
tf_parser
::
parse_reshape
);
add_mem_op
(
"Softmax"
,
&
tf_parser
::
parse_softmax
);
add_mem_op
(
"Squeeze"
,
&
tf_parser
::
parse_squeeze
);
add_mem_op
(
"StridedSlice"
,
&
tf_parser
::
parse_stridedslice
);
}
template
<
class
F
>
...
...
@@ -149,7 +157,7 @@ struct tf_parser
template
<
class
T
>
void
add_binary_op
(
std
::
string
name
,
T
x
)
{
add_op
(
name
,
[
this
,
x
](
attribute_map
attributes
,
std
::
vector
<
instruction_ref
>
args
)
{
add_op
(
name
,
[
this
,
x
](
const
attribute_map
&
attributes
,
std
::
vector
<
instruction_ref
>
args
)
{
if
(
args
.
size
()
!=
2
)
MIGRAPHX_THROW
(
"binary operators should have 2 operands"
);
auto
l0
=
args
[
1
];
...
...
@@ -211,7 +219,7 @@ struct tf_parser
template
<
class
T
>
void
add_generic_op
(
std
::
string
name
,
T
x
)
{
add_op
(
name
,
[
this
,
x
](
attribute_map
,
std
::
vector
<
instruction_ref
>
args
)
{
add_op
(
name
,
[
this
,
x
](
const
attribute_map
&
,
std
::
vector
<
instruction_ref
>
args
)
{
return
prog
.
add_instruction
(
x
,
args
);
});
}
...
...
@@ -234,7 +242,7 @@ struct tf_parser
parse_biasadd
(
const
std
::
string
&
,
const
attribute_map
&
,
std
::
vector
<
instruction_ref
>
args
)
{
uint64_t
axis
=
1
;
// assume output of previous layer is in NCHW (broadcast on channel)
auto
l0
=
prog
.
add_instruction
(
op
::
broadcast
{
axis
,
args
[
0
]
->
get_shape
()},
args
[
1
]);
auto
l0
=
prog
.
add_instruction
(
op
::
broadcast
{
axis
,
args
[
0
]
->
get_shape
()
.
lens
()
},
args
[
1
]);
return
prog
.
add_instruction
(
op
::
add
{},
args
[
0
],
l0
);
}
...
...
@@ -270,12 +278,60 @@ struct tf_parser
parse_conv
(
const
std
::
string
&
,
attribute_map
attributes
,
std
::
vector
<
instruction_ref
>
args
)
{
op
::
convolution
op
;
if
(
contains
(
attributes
,
"strides"
))
{
std
::
vector
<
size_t
>
stride
;
copy
(
attributes
.
at
(
"strides"
).
list
().
i
(),
std
::
back_inserter
(
stride
));
reorder_data
(
stride
);
if
(
stride
.
size
()
!=
4
)
{
MIGRAPHX_THROW
(
"strides should have 4 values"
);
}
op
.
stride
[
0
]
=
stride
[
2
];
op
.
stride
[
1
]
=
stride
[
3
];
}
if
(
contains
(
attributes
,
"dilations"
))
{
std
::
vector
<
size_t
>
dilation
;
copy
(
attributes
.
at
(
"dilations"
).
list
().
i
(),
std
::
back_inserter
(
dilation
));
reorder_data
(
dilation
);
if
(
dilation
.
size
()
!=
4
)
{
MIGRAPHX_THROW
(
"dilation should have 4 values"
);
}
op
.
dilation
[
0
]
=
dilation
[
2
];
op
.
dilation
[
1
]
=
dilation
[
3
];
}
auto
weights
=
args
[
1
];
// check if weights are from a constant
if
(
weights
->
name
()
!=
"@param"
)
{
if
(
is_nhwc
)
{
weights
=
prog
.
add_instruction
(
op
::
transpose
{{
1
,
3
,
0
,
2
}},
args
[
1
]);
}
else
{
weights
=
prog
.
add_instruction
(
op
::
transpose
{{
3
,
2
,
0
,
1
}},
args
[
1
]);
}
}
if
(
contains
(
attributes
,
"padding"
))
{
const
std
::
string
&
pad_mode
=
attributes
.
at
(
"padding"
).
s
();
if
(
pad_mode
.
find
(
"SAME"
)
!=
std
::
string
::
npos
)
{
op
.
padding_mode
=
op
::
padding_mode_t
::
same
;
op
.
padding_mode
=
op
::
padding_mode_t
::
same
;
std
::
vector
<
size_t
>
weight_dims
=
weights
->
get_shape
().
lens
();
size_t
weight_h
=
weight_dims
[
2
];
size_t
weight_w
=
weight_dims
[
3
];
op
.
padding
[
0
]
=
calculate_padding
(
weight_h
,
op
.
dilation
[
0
]);
op
.
padding
[
1
]
=
calculate_padding
(
weight_w
,
op
.
dilation
[
1
]);
}
else
if
(
pad_mode
.
find
(
"VALID"
)
!=
std
::
string
::
npos
)
{
op
.
padding_mode
=
op
::
padding_mode_t
::
valid
;
}
else
if
(
pad_mode
.
find
(
"EXPLICIT"
)
!=
std
::
string
::
npos
)
{
...
...
@@ -293,6 +349,18 @@ struct tf_parser
op
.
padding
[
1
]
=
padding
[
1
];
}
}
return
prog
.
add_instruction
(
op
,
{
args
[
0
],
weights
});
}
instruction_ref
parse_depthwiseconv
(
const
std
::
string
&
,
attribute_map
attributes
,
std
::
vector
<
instruction_ref
>
args
)
{
op
::
convolution
op
;
size_t
num_channels
=
args
[
0
]
->
get_shape
().
lens
()[
1
];
op
.
group
=
num_channels
;
if
(
contains
(
attributes
,
"strides"
))
{
std
::
vector
<
size_t
>
stride
;
...
...
@@ -317,9 +385,9 @@ struct tf_parser
op
.
dilation
[
0
]
=
dilation
[
2
];
op
.
dilation
[
1
]
=
dilation
[
3
];
}
auto
weights
=
args
[
1
];
// check if weights are from a constant
if
(
weights
->
name
()
!=
"@param"
)
{
if
(
is_nhwc
)
...
...
@@ -332,27 +400,116 @@ struct tf_parser
}
}
return
prog
.
add_instruction
(
op
,
{
args
[
0
],
weights
});
if
(
contains
(
attributes
,
"padding"
))
{
const
std
::
string
&
pad_mode
=
attributes
.
at
(
"padding"
).
s
();
std
::
vector
<
size_t
>
weight_dims
=
weights
->
get_shape
().
lens
();
size_t
weight_h
=
weight_dims
[
2
];
size_t
weight_w
=
weight_dims
[
3
];
if
(
pad_mode
.
find
(
"SAME"
)
!=
std
::
string
::
npos
)
{
op
.
padding_mode
=
op
::
padding_mode_t
::
same
;
op
.
padding
[
0
]
=
calculate_padding
(
weight_h
,
op
.
dilation
[
0
]);
op
.
padding
[
1
]
=
calculate_padding
(
weight_w
,
op
.
dilation
[
1
]);
}
else
if
(
pad_mode
.
find
(
"VALID"
)
!=
std
::
string
::
npos
)
{
op
.
padding_mode
=
op
::
padding_mode_t
::
valid
;
}
}
std
::
vector
<
int64_t
>
new_weights_shape
;
copy
(
weights
->
get_shape
().
lens
(),
std
::
back_inserter
(
new_weights_shape
));
// weight format is (out_channels, in_channels, h, w), but in depthwise_conv,
// out_channels is equal to the multiplier. Adjust by inserting a reshape and
// setting in_channels to 1
int64_t
multiplier
=
new_weights_shape
[
0
];
int64_t
out_channels
=
num_channels
*
multiplier
;
new_weights_shape
[
0
]
=
out_channels
;
new_weights_shape
[
1
]
=
1
;
// Make sure weights are contiguous before doing reshape
auto
cweights
=
prog
.
add_instruction
(
op
::
contiguous
{},
weights
);
auto
new_weights
=
prog
.
add_instruction
(
op
::
reshape
{
new_weights_shape
},
cweights
);
return
prog
.
add_instruction
(
op
,
{
args
[
0
],
new_weights
});
}
instruction_ref
parse_m
ean
(
const
std
::
string
&
,
attribute_map
attributes
,
std
::
vector
<
instruction_ref
>
args
)
parse_m
atmul
(
const
std
::
string
&
,
attribute_map
attributes
,
std
::
vector
<
instruction_ref
>
args
)
{
bool
transa
=
false
;
bool
transb
=
false
;
if
(
contains
(
attributes
,
"transpose_a"
))
{
transa
=
attributes
.
at
(
"transpose_a"
).
b
();
}
if
(
contains
(
attributes
,
"transpose_b"
))
{
transb
=
attributes
.
at
(
"transpose_a"
).
b
();
}
std
::
vector
<
int64_t
>
perm
(
args
[
0
]
->
get_shape
().
lens
().
size
());
std
::
iota
(
perm
.
begin
(),
perm
.
end
(),
int64_t
{
0
});
// swap the last two elements
std
::
iter_swap
(
perm
.
end
()
-
1
,
perm
.
end
()
-
2
);
auto
l1
=
(
transa
)
?
prog
.
add_instruction
(
op
::
transpose
{
perm
},
args
[
0
])
:
args
[
0
];
auto
l2
=
(
transb
)
?
prog
.
add_instruction
(
op
::
transpose
{
perm
},
args
[
1
])
:
args
[
1
];
return
prog
.
add_instruction
(
op
::
dot
{},
l1
,
l2
);
}
instruction_ref
parse_mean
(
const
std
::
string
&
,
attribute_map
attributes
,
std
::
vector
<
instruction_ref
>
args
)
{
auto
axes
=
parse_axes
(
args
[
1
]
->
eval
().
get
<
int32_t
>
().
to_vector
());
bool
keep_dims
=
attributes
.
at
(
"keep_dims"
).
b
();
std
::
vector
<
int32_t
>
hw_axes
{
2
,
3
};
if
(
axes
==
hw_axes
and
keep_dims
)
// check if conditions for GlobalAvgPool are met
auto
lens
=
args
[
0
]
->
get_shape
().
lens
();
if
(
axes
==
hw_axes
and
lens
.
size
()
==
4
)
{
op
::
pooling
op
{
"average"
};
std
::
vector
<
size_t
>
input_dims
{
args
[
0
]
->
get_shape
().
lens
()};
op
.
lengths
[
0
]
=
input_dims
[
2
];
op
.
lengths
[
1
]
=
input_dims
[
3
];
return
prog
.
add_instruction
(
op
,
args
.
front
());
op
.
lengths
[
0
]
=
lens
[
2
];
op
.
lengths
[
1
]
=
lens
[
3
];
auto
l0
=
prog
.
add_instruction
(
op
,
args
.
front
());
if
(
keep_dims
)
return
l0
;
return
prog
.
add_instruction
(
op
::
squeeze
{
std
::
vector
<
int64_t
>
(
hw_axes
.
begin
(),
hw_axes
.
end
())},
l0
);
}
MIGRAPHX_THROW
(
"MIGraphX does not support mean outside of GlobalAvgPool transformation"
);
}
instruction_ref
parse_pack
(
const
std
::
string
&
,
const
attribute_map
&
attributes
,
std
::
vector
<
instruction_ref
>
args
)
{
// reinterpret as unsqueeze with concat
std
::
vector
<
instruction_ref
>
unsqueezed_args
;
int64_t
axis
=
0
;
if
(
contains
(
attributes
,
"axis"
))
axis
=
attributes
.
at
(
"axis"
).
i
();
size_t
input_size
=
args
.
front
()
->
get_shape
().
lens
().
size
();
if
(
axis
>
input_size
)
{
MIGRAPHX_THROW
(
"TF_PARSER: axis value of "
+
to_string
(
axis
)
+
" must be smaller than input size "
+
to_string
(
input_size
));
}
// check if input arg needs axis to be converted to NCHW
if
(
input_size
>=
4
)
axis
=
parse_axis
(
axis
);
std
::
transform
(
args
.
begin
(),
args
.
end
(),
std
::
back_inserter
(
unsqueezed_args
),
[
&
](
instruction_ref
arg
)
{
return
prog
.
add_instruction
(
op
::
unsqueeze
{{
axis
}},
arg
);
});
return
prog
.
add_instruction
(
op
::
concat
{
static_cast
<
size_t
>
(
axis
)},
unsqueezed_args
);
}
instruction_ref
parse_pad
(
const
std
::
string
&
,
const
attribute_map
&
,
std
::
vector
<
instruction_ref
>
args
)
{
...
...
@@ -386,18 +543,6 @@ struct tf_parser
{
op
::
pooling
op
{
starts_with
(
name
,
"Max"
)
?
"max"
:
"average"
};
if
(
contains
(
attributes
,
"padding"
))
{
const
std
::
string
&
pad_mode
=
attributes
.
at
(
"padding"
).
s
();
if
(
pad_mode
.
find
(
"SAME"
)
!=
std
::
string
::
npos
)
{
op
.
padding_mode
=
op
::
padding_mode_t
::
same
;
}
else
if
(
pad_mode
.
find
(
"VALID"
)
!=
std
::
string
::
npos
)
{
op
.
padding_mode
=
op
::
padding_mode_t
::
valid
;
}
}
if
(
contains
(
attributes
,
"strides"
))
{
std
::
vector
<
size_t
>
stride
;
...
...
@@ -422,6 +567,20 @@ struct tf_parser
op
.
lengths
[
0
]
=
ksize
[
2
];
op
.
lengths
[
1
]
=
ksize
[
3
];
}
if
(
contains
(
attributes
,
"padding"
))
{
const
std
::
string
&
pad_mode
=
attributes
.
at
(
"padding"
).
s
();
if
(
pad_mode
.
find
(
"SAME"
)
!=
std
::
string
::
npos
)
{
op
.
padding_mode
=
op
::
padding_mode_t
::
same
;
op
.
padding
[
0
]
=
calculate_padding
(
op
.
lengths
[
0
],
1
);
op
.
padding
[
1
]
=
calculate_padding
(
op
.
lengths
[
1
],
1
);
}
else
if
(
pad_mode
.
find
(
"VALID"
)
!=
std
::
string
::
npos
)
{
op
.
padding_mode
=
op
::
padding_mode_t
::
valid
;
}
}
return
prog
.
add_instruction
(
op
,
args
[
0
]);
}
...
...
@@ -480,6 +639,46 @@ struct tf_parser
return
prog
.
add_instruction
(
op
,
args
[
0
]);
}
instruction_ref
parse_stridedslice
(
const
std
::
string
&
,
const
attribute_map
&
attributes
,
std
::
vector
<
instruction_ref
>
args
)
{
op
::
slice
op
;
auto
starts
=
args
[
1
]
->
eval
().
get
<
int32_t
>
().
to_vector
();
auto
ends
=
args
[
2
]
->
eval
().
get
<
int32_t
>
().
to_vector
();
size_t
num_axes
=
args
[
0
]
->
get_shape
().
lens
().
size
();
if
(
num_axes
>=
4
)
{
reorder_data
(
starts
);
reorder_data
(
ends
);
}
op
.
starts
=
std
::
vector
<
int64_t
>
(
starts
.
begin
(),
starts
.
end
());
op
.
ends
=
std
::
vector
<
int64_t
>
(
ends
.
begin
(),
ends
.
end
());
op
.
axes
=
std
::
vector
<
int64_t
>
(
num_axes
);
std
::
iota
(
op
.
axes
.
begin
(),
op
.
axes
.
end
(),
0
);
uint32_t
shrink_axis_mask
=
0
;
uint32_t
bitwise_compare
=
1
;
std
::
vector
<
int64_t
>
squeeze_axes
;
if
(
contains
(
attributes
,
"shrink_axis_mask"
))
shrink_axis_mask
=
static_cast
<
uint32_t
>
(
attributes
.
at
(
"shrink_axis_mask"
).
i
());
for
(
size_t
i
=
0
;
i
<
num_axes
;
i
++
)
{
// the LSB corresponds to axis 0 when determining which axes to squeeze
if
(((
shrink_axis_mask
>>
i
)
&
bitwise_compare
)
==
1
)
squeeze_axes
.
push_back
(
i
);
}
if
(
num_axes
>=
4
)
{
squeeze_axes
=
parse_axes
(
squeeze_axes
);
}
auto
l0
=
prog
.
add_instruction
(
op
,
args
[
0
]);
return
prog
.
add_instruction
(
op
::
squeeze
{
squeeze_axes
},
l0
);
}
void
parse_graph
(
const
tensorflow
::
GraphDef
&
graph
)
{
nodes
=
get_nodes
(
graph
,
input_nodes
);
...
...
@@ -644,10 +843,6 @@ struct tf_parser
static
literal
parse_tensor
(
const
tensorflow
::
TensorProto
&
t
)
{
std
::
vector
<
size_t
>
dims
=
parse_dims
(
t
.
tensor_shape
());
if
(
dims
.
empty
())
{
dims
=
{
1
};
}
size_t
shape_size
=
std
::
accumulate
(
dims
.
begin
(),
dims
.
end
(),
1
,
std
::
multiplies
<
size_t
>
());
if
(
!
t
.
tensor_content
().
empty
())
// has raw data
{
...
...
@@ -658,17 +853,17 @@ struct tf_parser
case
tensorflow
::
DataType
::
DT_FLOAT
:
return
literal
{{
shape
::
float_type
,
dims
},
s
.
data
()};
case
tensorflow
::
DataType
::
DT_UINT8
:
throw
std
::
runtime_error
(
""
);
case
tensorflow
::
DataType
::
DT_INT8
:
return
literal
{{
shape
::
int
32
_type
,
dims
},
s
.
data
()};
case
tensorflow
::
DataType
::
DT_INT8
:
return
literal
{{
shape
::
int
8
_type
,
dims
},
s
.
data
()};
case
tensorflow
::
DataType
::
DT_UINT16
:
return
literal
{{
shape
::
int
32
_type
,
dims
},
s
.
data
()};
return
literal
{{
shape
::
u
int
16
_type
,
dims
},
s
.
data
()};
case
tensorflow
::
DataType
::
DT_INT16
:
return
literal
{{
shape
::
int
32
_type
,
dims
},
s
.
data
()};
return
literal
{{
shape
::
int
16
_type
,
dims
},
s
.
data
()};
case
tensorflow
::
DataType
::
DT_INT32
:
return
literal
{{
shape
::
int32_type
,
dims
},
s
.
data
()};
case
tensorflow
::
DataType
::
DT_INT64
:
return
literal
{{
shape
::
int64_type
,
dims
},
s
.
data
()};
case
tensorflow
::
DataType
::
DT_STRING
:
throw
std
::
runtime_error
(
""
);
case
tensorflow
::
DataType
::
DT_BOOL
:
return
literal
{{
shape
::
int
32
_type
,
dims
},
s
.
data
()};
case
tensorflow
::
DataType
::
DT_BOOL
:
return
literal
{{
shape
::
int
8
_type
,
dims
},
s
.
data
()};
case
tensorflow
::
DataType
::
DT_HALF
:
return
literal
{{
shape
::
half_type
,
dims
},
s
.
data
()};
case
tensorflow
::
DataType
::
DT_DOUBLE
:
return
literal
{{
shape
::
double_type
,
dims
},
s
.
data
()};
...
...
@@ -718,21 +913,23 @@ struct tf_parser
{
case
tensorflow
::
DataType
::
DT_INVALID
:
throw
std
::
runtime_error
(
""
);
case
tensorflow
::
DataType
::
DT_FLOAT
:
return
literal
{{
shape
::
float_type
,
dims
},
get_data_vals
(
t
.
float_val
(),
shape_size
)};
return
create_literal
(
shape
::
float_type
,
dims
,
get_data_vals
(
t
.
float_val
(),
shape_size
));
case
tensorflow
::
DataType
::
DT_UINT8
:
throw
std
::
runtime_error
(
""
);
case
tensorflow
::
DataType
::
DT_INT8
:
return
literal
{{
shape
::
int
32
_type
,
dims
}
,
get_data_vals
(
t
.
int_val
(),
shape_size
)
}
;
return
create_
literal
(
shape
::
int
8
_type
,
dims
,
get_data_vals
(
t
.
int_val
(),
shape_size
)
)
;
case
tensorflow
::
DataType
::
DT_UINT16
:
return
literal
{{
shape
::
int
32
_type
,
dims
}
,
get_data_vals
(
t
.
int_val
(),
shape_size
)
}
;
return
create_
literal
(
shape
::
u
int
16
_type
,
dims
,
get_data_vals
(
t
.
int_val
(),
shape_size
)
)
;
case
tensorflow
::
DataType
::
DT_INT16
:
return
literal
{{
shape
::
int
32
_type
,
dims
}
,
get_data_vals
(
t
.
int_val
(),
shape_size
)
}
;
return
create_
literal
(
shape
::
int
16
_type
,
dims
,
get_data_vals
(
t
.
int_val
(),
shape_size
)
)
;
case
tensorflow
::
DataType
::
DT_INT32
:
return
literal
{{
shape
::
int32_type
,
dims
}
,
get_data_vals
(
t
.
int_val
(),
shape_size
)
}
;
return
create_
literal
(
shape
::
int32_type
,
dims
,
get_data_vals
(
t
.
int_val
(),
shape_size
)
)
;
case
tensorflow
::
DataType
::
DT_INT64
:
return
literal
{{
shape
::
int64_type
,
dims
},
get_data_vals
(
t
.
int64_val
(),
shape_size
)};
return
create_literal
(
shape
::
int64_type
,
dims
,
get_data_vals
(
t
.
int64_val
(),
shape_size
));
case
tensorflow
::
DataType
::
DT_STRING
:
throw
std
::
runtime_error
(
""
);
case
tensorflow
::
DataType
::
DT_BOOL
:
return
literal
{{
shape
::
int32_type
,
dims
}
,
get_data_vals
(
t
.
bool_val
(),
shape_size
)
}
;
return
create_
literal
(
shape
::
int32_type
,
dims
,
get_data_vals
(
t
.
bool_val
(),
shape_size
)
)
;
case
tensorflow
::
DataType
::
DT_HALF
:
{
std
::
vector
<
int
>
data_int32
=
get_data_vals
(
t
.
half_val
(),
shape_size
);
...
...
@@ -742,7 +939,7 @@ struct tf_parser
data_uint16
.
end
(),
std
::
back_inserter
(
data_half
),
[](
uint16_t
raw_val
)
{
return
*
reinterpret_cast
<
half
*>
(
&
raw_val
);
});
return
literal
{{
shape
::
half_type
,
dims
}
,
data_half
}
;
return
create_
literal
(
shape
::
half_type
,
dims
,
data_half
)
;
}
case
tensorflow
::
DataType
::
DT_DOUBLE
:
return
literal
{{
shape
::
double_type
,
dims
},
get_data_vals
(
t
.
double_val
(),
shape_size
)};
...
...
@@ -811,9 +1008,19 @@ struct tf_parser
std
::
transform
(
input_dims
.
begin
(),
input_dims
.
end
(),
std
::
back_inserter
(
dims
),
[](
tensorflow
::
TensorShapeProto_Dim
dim
)
{
return
dim
.
size
();
});
[](
const
tensorflow
::
TensorShapeProto_Dim
&
dim
)
{
return
dim
.
size
();
});
return
dims
;
}
template
<
class
T
>
static
literal
create_literal
(
shape
::
type_t
shape_type
,
const
std
::
vector
<
size_t
>&
dims
,
std
::
vector
<
T
>
data
)
{
// assume if explicit value is mentioned in protobuf and dim size <= 1, treat as scalar
if
(
dims
.
empty
()
or
(
dims
.
size
()
==
1
and
dims
.
front
()
==
1
))
return
literal
{{
shape_type
},
data
};
return
literal
{{
shape_type
,
dims
},
data
};
}
};
program
parse_tf
(
const
std
::
string
&
name
,
bool
is_nhwc
)
...
...
test/auto_contiguous_test.cpp
View file @
33a41ba0
#include <migraphx/auto_contiguous.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/op/transpose.hpp>
#include <migraphx/op/broadcast.hpp>
#include <migraphx/instruction.hpp>
#include <basic_ops.hpp>
#include <test.hpp>
...
...
@@ -59,7 +60,7 @@ TEST_CASE(after_literal_broadcast)
auto
l2
=
p
.
add_literal
(
get_2
());
EXPECT
(
p
.
get_shape
().
standard
());
EXPECT
(
not
p
.
get_shape
().
broadcasted
());
auto
b
=
p
.
add_instruction
(
migraphx
::
op
::
broadcast
{
0
,
l1
->
get_shape
()},
l2
);
auto
b
=
p
.
add_instruction
(
migraphx
::
op
::
broadcast
{
0
,
l1
->
get_shape
()
.
lens
()
},
l2
);
p
.
add_instruction
(
pass_op
{},
b
);
EXPECT
(
not
p
.
get_shape
().
standard
());
EXPECT
(
p
.
get_shape
().
broadcasted
());
...
...
@@ -90,7 +91,7 @@ TEST_CASE(after_param_broadcast)
auto
l2
=
p
.
add_parameter
(
"2"
,
{
migraphx
::
shape
::
float_type
,
{
2
}});
EXPECT
(
p
.
get_shape
().
standard
());
EXPECT
(
not
p
.
get_shape
().
broadcasted
());
auto
b
=
p
.
add_instruction
(
migraphx
::
op
::
broadcast
{
0
,
l1
->
get_shape
()},
l2
);
auto
b
=
p
.
add_instruction
(
migraphx
::
op
::
broadcast
{
0
,
l1
->
get_shape
()
.
lens
()
},
l2
);
p
.
add_instruction
(
pass_op
{},
b
);
EXPECT
(
not
p
.
get_shape
().
standard
());
EXPECT
(
p
.
get_shape
().
broadcasted
());
...
...
test/common_subexpression_elimination_test.cpp
View file @
33a41ba0
#include <migraphx/common_subexpression_elimination.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/op
erators
.hpp>
#include <migraphx/op
/add
.hpp>
#include <basic_ops.hpp>
#include <test.hpp>
...
...
test/cpu_dot_op_test.cpp
0 → 100644
View file @
33a41ba0
#include <iostream>
#include <vector>
#include <migraphx/literal.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/cpu/target.hpp>
#include <migraphx/verify.hpp>
#include <migraphx/onnx.hpp>
#include "test.hpp"
#include <migraphx/half.hpp>
template
<
class
T
>
void
matmul_test
()
{
migraphx
::
program
p
;
std
::
vector
<
T
>
a
=
{
-
0.00925222
,
0.56250403
,
0.70107397
,
0.75402161
,
-
0.505885
,
1.33628943
,
-
0.11413
,
-
0.31270559
,
1.59336732
,
-
0.19361027
,
-
0.91620867
,
0.40108416
,
-
0.06969921
,
0.68483471
,
-
0.39906632
,
-
1.66423624
,
0.69040076
,
-
1.31490171
,
-
0.11282616
,
-
0.79391814
};
std
::
vector
<
float
>
b
=
{
6.09568541e-01
,
-
6.10527007e-01
,
3.66646462e-01
,
1.18951101e-01
,
5.58777432e-01
,
-
3.21296298e-01
,
-
5.95997198e-01
,
-
5.01425721e-01
,
-
2.84606807e-01
,
-
5.73673557e-01
,
-
8.99430260e-01
,
-
4.25103093e-01
,
1.53027987e+00
,
-
3.81407415e-04
,
-
3.29650255e-01
};
std
::
vector
<
float
>
c
=
{
-
1.56327541e+00
,
-
7.09570140e-01
,
-
5.37424982e-01
,
-
2.22994831e-01
,
-
2.15586437e+00
,
2.09177941e-03
,
-
1.47279677e+00
,
2.02627040e-01
,
-
6.04527691e-01
,
-
1.29885596e+00
,
2.16294914e+00
,
-
1.48101497e-01
};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
get_type
<
T
>
{},
{
4
,
5
}};
auto
al
=
p
.
add_literal
(
migraphx
::
literal
{
a_shape
,
a
});
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
get_type
<
T
>
{},
{
5
,
3
}};
auto
bl
=
p
.
add_literal
(
migraphx
::
literal
{
b_shape
,
b
});
p
.
add_instruction
(
migraphx
::
op
::
dot
{},
al
,
bl
);
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
T
>
results_vector
;
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify_range
(
c
,
results_vector
));
}
TEST_CASE_REGISTER
(
matmul_test
<
float
>
)
TEST_CASE_REGISTER
(
matmul_test
<
double
>
)
template
<
class
T
>
void
matmul_test_ex
()
{
migraphx
::
program
p
;
std
::
vector
<
T
>
a
=
{
-
0.00925222
,
0.56250403
,
0.70107397
,
0.75402161
,
-
0.505885
,
1.33628943
,
-
0.11413
,
-
0.31270559
,
1.59336732
,
-
0.19361027
,
-
0.91620867
,
0.40108416
,
-
0.06969921
,
0.68483471
,
-
0.39906632
,
-
1.66423624
,
0.69040076
,
-
1.31490171
,
-
0.11282616
,
-
0.79391814
};
std
::
vector
<
float
>
b
=
{
6.09568541e-01
,
-
6.10527007e-01
,
3.66646462e-01
,
1.18951101e-01
,
5.58777432e-01
,
-
3.21296298e-01
,
-
5.95997198e-01
,
-
5.01425721e-01
,
-
2.84606807e-01
,
-
5.73673557e-01
,
-
8.99430260e-01
,
-
4.25103093e-01
,
1.53027987e+00
,
-
3.81407415e-04
,
-
3.29650255e-01
};
std
::
vector
<
float
>
c
=
{
-
1.56327541e+00
,
-
7.09570140e-01
,
-
5.37424982e-01
,
-
2.22994831e-01
,
-
2.15586437e+00
,
2.09177941e-03
,
-
1.47279677e+00
,
2.02627040e-01
,
-
6.04527691e-01
,
-
1.29885596e+00
,
2.16294914e+00
,
-
1.48101497e-01
};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
get_type
<
T
>
{},
{
1
,
1
,
4
,
5
}};
auto
al
=
p
.
add_literal
(
migraphx
::
literal
{
a_shape
,
a
});
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
get_type
<
T
>
{},
{
1
,
1
,
5
,
3
}};
auto
bl
=
p
.
add_literal
(
migraphx
::
literal
{
b_shape
,
b
});
p
.
add_instruction
(
migraphx
::
op
::
dot
{},
al
,
bl
);
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
T
>
results_vector
;
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify_range
(
c
,
results_vector
));
}
TEST_CASE_REGISTER
(
matmul_test_ex
<
float
>
)
TEST_CASE_REGISTER
(
matmul_test_ex
<
double
>
)
TEST_CASE
(
matmul_mutli_dim_2
)
{
migraphx
::
program
p
;
std
::
vector
<
float
>
m1
=
{
-
0.76234141
,
0.01368910
,
-
0.86343423
,
-
0.99465282
,
0.76133268
,
0.96507140
,
-
0.55893585
,
0.02625652
,
0.75171776
,
0.23112578
,
0.25624787
,
-
1.50442161
};
migraphx
::
shape
m1_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
2
,
3
}};
std
::
vector
<
float
>
m2
=
{
-
0.15933632
,
-
0.69594712
,
-
0.06198966
,
-
1.23905184
,
-
0.83672704
,
-
1.06971832
,
-
0.12272917
,
1.07094116
,
-
0.08346820
,
1.16820693
,
-
0.95700874
,
0.24059691
,
0.43326023
,
0.78305235
,
-
0.53506601
,
-
0.69359678
,
-
0.26334436
,
1.56292796
,
-
0.33629175
,
-
1.72693469
,
0.41435494
,
1.52136843
,
-
0.40699791
,
-
1.59839430
};
migraphx
::
shape
m2_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
}};
auto
l1
=
p
.
add_literal
(
migraphx
::
literal
{
m1_shape
,
m1
});
auto
l2
=
p
.
add_literal
(
migraphx
::
literal
{
m2_shape
,
m2
});
p
.
add_instruction
(
migraphx
::
op
::
dot
{},
l1
,
l2
);
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
m
;
result
.
visit
([
&
](
auto
output
)
{
m
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
m_res
=
{
0.18208394
,
-
0.49276402
,
0.87189133
,
0.75150114
,
-
0.55909610
,
1.00521735
,
-
0.95536130
,
2.27996211
,
0.06239879
,
0.74700068
,
-
0.01570983
,
-
0.85920856
,
-
0.59070835
,
-
1.70729902
,
0.40245487
,
1.80182751
};
EXPECT
(
migraphx
::
verify_range
(
m
,
m_res
));
}
TEST_CASE
(
gemm_mutli_dim_2_beta0
)
{
migraphx
::
program
p
;
std
::
vector
<
float
>
m1
=
{
-
0.76234141
,
0.01368910
,
-
0.86343423
,
-
0.99465282
,
0.76133268
,
0.96507140
,
-
0.55893585
,
0.02625652
,
0.75171776
,
0.23112578
,
0.25624787
,
-
1.50442161
};
migraphx
::
shape
m1_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
2
,
3
}};
std
::
vector
<
float
>
m2
=
{
-
0.15933632
,
-
0.69594712
,
-
0.06198966
,
-
1.23905184
,
-
0.83672704
,
-
1.06971832
,
-
0.12272917
,
1.07094116
,
-
0.08346820
,
1.16820693
,
-
0.95700874
,
0.24059691
,
0.43326023
,
0.78305235
,
-
0.53506601
,
-
0.69359678
,
-
0.26334436
,
1.56292796
,
-
0.33629175
,
-
1.72693469
,
0.41435494
,
1.52136843
,
-
0.40699791
,
-
1.59839430
};
migraphx
::
shape
m2_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
}};
std
::
vector
<
float
>
m3
=
{
0.18208394
,
-
0.49276402
,
0.87189133
,
0.75150114
,
-
0.55909610
,
1.00521735
,
-
0.95536130
,
2.27996211
,
0.06239879
,
0.74700068
,
-
0.01570983
,
-
0.85920856
,
-
0.59070835
,
-
1.70729902
,
0.40245487
,
1.80182751
};
migraphx
::
shape
m3_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
2
,
4
}};
auto
l1
=
p
.
add_literal
(
migraphx
::
literal
{
m1_shape
,
m1
});
auto
l2
=
p
.
add_literal
(
migraphx
::
literal
{
m2_shape
,
m2
});
auto
l3
=
p
.
add_literal
(
migraphx
::
literal
{
m3_shape
,
m3
});
float
alpha
=
1.0
f
;
float
beta
=
0.0
f
;
p
.
add_instruction
(
migraphx
::
op
::
dot
{
alpha
,
beta
},
l1
,
l2
,
l3
);
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
m
;
result
.
visit
([
&
](
auto
output
)
{
m
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
m_res
=
{
0.18208394
,
-
0.49276402
,
0.87189133
,
0.75150114
,
-
0.55909610
,
1.00521735
,
-
0.95536130
,
2.27996211
,
0.06239879
,
0.74700068
,
-
0.01570983
,
-
0.85920856
,
-
0.59070835
,
-
1.70729902
,
0.40245487
,
1.80182751
};
EXPECT
(
migraphx
::
verify_range
(
m
,
m_res
));
}
TEST_CASE
(
gemm_beta_0
)
{
migraphx
::
program
p
;
std
::
vector
<
float
>
m1
=
{
-
0.76234141
,
0.01368910
,
-
0.86343423
,
-
0.99465282
,
0.76133268
,
0.96507140
};
migraphx
::
shape
m1_shape
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
3
}};
std
::
vector
<
float
>
m2
=
{
-
0.15933632
,
-
0.69594712
,
-
0.06198966
,
-
1.23905184
,
-
0.83672704
,
-
1.06971832
,
-
0.12272917
,
1.07094116
,
-
0.08346820
,
1.16820693
,
-
0.95700874
,
0.24059691
};
migraphx
::
shape
m2_shape
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
4
}};
migraphx
::
shape
m3_shape
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
4
}};
std
::
vector
<
float
>
m3
=
{
0.18208394
,
-
0.49276402
,
0.87189133
,
0.75150114
,
-
0.55909610
,
1.00521735
,
-
0.95536130
,
2.27996211
};
auto
l1
=
p
.
add_literal
(
migraphx
::
literal
{
m1_shape
,
m1
});
auto
l2
=
p
.
add_literal
(
migraphx
::
literal
{
m2_shape
,
m2
});
auto
l3
=
p
.
add_literal
(
migraphx
::
literal
{
m3_shape
,
m3
});
float
alpha
=
1.0
f
;
float
beta
=
0.0
f
;
p
.
add_instruction
(
migraphx
::
op
::
dot
{
alpha
,
beta
},
l1
,
l2
,
l3
);
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
m
;
result
.
visit
([
&
](
auto
output
)
{
m
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
m_res
=
{
0.18208394
,
-
0.49276402
,
0.87189133
,
0.75150114
,
-
0.55909610
,
1.00521735
,
-
0.95536130
,
2.27996211
};
EXPECT
(
migraphx
::
verify_range
(
m
,
m_res
));
}
TEST_CASE
(
matmul_mutli_dim_2_3
)
{
migraphx
::
program
p
;
std
::
vector
<
float
>
m1
=
{
-
1.93300070
,
0.33902698
,
-
0.45173527
,
-
0.72283069
,
-
0.17177134
,
1.62199882
,
0.87052847
,
0.14989811
,
-
0.88969184
,
-
0.18131398
,
0.72654339
,
-
0.57123693
,
0.03852506
,
-
0.72332085
,
-
1.81844083
,
-
0.33465167
,
-
0.71400352
,
0.36883161
,
0.08698452
,
0.94974586
,
0.40087323
,
-
0.05448534
,
0.03220677
,
-
1.22494296
,
0.97938472
,
-
1.43714454
,
-
0.80430904
,
-
0.08098728
,
0.31520301
,
0.49642169
,
-
1.63471091
,
0.34390096
,
2.81292176
,
-
0.22666528
,
1.54559556
,
-
1.51075762
};
migraphx
::
shape
m1_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
2
,
3
}};
std
::
vector
<
float
>
m2
=
{
-
0.33170529
,
2.26325120
,
-
0.50639461
,
0.64802947
,
0.44748888
,
0.33768068
,
-
0.53621075
,
0.34341460
,
0.58742520
,
-
1.13995790
,
-
0.99322535
,
0.35447353
,
0.01977110
,
-
0.10155016
,
-
1.02288245
,
-
0.16575791
,
-
1.47870374
,
0.29300008
,
-
0.39112198
,
1.42303608
,
-
0.02853060
,
1.52610164
,
0.53540909
,
0.75618998
,
-
0.26877787
,
-
1.90886366
,
0.30622790
,
0.59794535
,
1.29795331
,
-
0.37805803
,
-
1.58167176
,
-
1.26966832
,
0.27435891
,
0.89430347
,
0.22854926
,
-
0.50317658
};
migraphx
::
shape
m2_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
3
,
2
}};
auto
l1
=
p
.
add_literal
(
migraphx
::
literal
{
m1_shape
,
m1
});
auto
l2
=
p
.
add_literal
(
migraphx
::
literal
{
m2_shape
,
m2
});
p
.
add_instruction
(
migraphx
::
op
::
dot
{},
l1
,
l2
);
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
m
;
result
.
visit
([
&
](
auto
output
)
{
m
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
m_res
=
{
0.26735861
,
-
4.30770895
,
1.05257728
,
-
1.19954265
,
0.50493170
,
-
0.18729756
,
1.09137941
,
-
1.09298312
,
3.42956915
,
-
0.41681939
,
0.17833257
,
0.26040336
,
0.15351280
,
1.87632715
,
-
0.63545406
,
-
0.95467340
,
-
1.74728628
,
-
2.42477030
,
0.76262372
,
0.15539164
,
3.32281958
,
0.96769613
,
0.43727545
,
2.43019906
};
EXPECT
(
migraphx
::
verify_range
(
m
,
m_res
));
}
TEST_CASE
(
gemm_mutli_dim1_2_3
)
{
migraphx
::
program
p
;
std
::
vector
<
float
>
m1
=
{
1.23636469
,
-
0.47041261
,
-
0.14375651
,
-
0.48371852
,
1.16479301
,
-
0.89361055
,
-
0.18569086
,
1.10700457
,
-
1.02632638
,
0.82277012
,
0.33525769
,
0.52825145
,
-
1.00141689
,
0.45510090
,
-
0.02675039
,
-
0.60454439
,
0.38551153
,
-
0.01658514
,
0.93059292
,
-
0.54595188
,
-
0.04911005
,
-
0.91397221
,
-
0.83127477
,
-
1.57685603
,
-
1.36200452
,
2.25822236
,
-
1.23416970
,
0.12312496
,
0.76232760
,
-
0.83594234
,
1.67418145
,
-
0.19412936
,
1.05261378
,
0.66246074
,
-
1.15233398
,
0.16429736
};
migraphx
::
shape
m1_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
2
,
3
}};
std
::
vector
<
float
>
m2
=
{
-
0.87300530
,
-
0.07112838
,
0.19196860
,
-
1.04986840
,
1.20348200
,
0.31966893
,
1.04805440
,
-
2.04777729
,
-
0.67906052
,
-
1.17250760
,
0.34305044
,
-
1.01957785
,
-
1.12694862
,
0.18431338
,
-
1.63712290
,
0.27566931
,
-
1.11282021
,
1.41738919
,
0.47871283
,
-
1.01980420
,
1.00212436
,
-
0.78740444
,
-
1.65636133
,
1.51466547
,
-
0.12470397
,
0.70404393
,
-
0.15244797
,
0.74288871
,
0.07339926
,
-
1.45811623
,
0.27185845
,
0.08804596
,
0.99061977
,
-
1.61752428
,
0.29191159
,
0.87271953
};
migraphx
::
shape
m2_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
3
,
2
}};
std
::
vector
<
float
>
m3
=
{
-
1.07692443
,
0.85223457
,
-
0.37266530
,
2.31511577
,
0.04227017
,
1.13229428
,
-
0.52769242
,
0.27307182
,
-
0.47779843
,
-
0.08023168
,
-
0.22862823
,
0.81489871
,
1.13139581
,
1.13860467
,
0.24309065
,
0.26533729
,
0.49106772
,
-
1.18860493
,
0.27842449
,
1.03568141
,
0.49759611
,
0.10021662
,
0.00592602
,
0.90862000
};
migraphx
::
shape
m3_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
2
,
2
}};
auto
l1
=
p
.
add_literal
(
migraphx
::
literal
{
m1_shape
,
m1
});
auto
l2
=
p
.
add_literal
(
migraphx
::
literal
{
m2_shape
,
m2
});
auto
l3
=
p
.
add_literal
(
migraphx
::
literal
{
m3_shape
,
m3
});
float
alpha
=
0.35
;
float
beta
=
0.41
;
auto
m12_alpha
=
p
.
add_instruction
(
migraphx
::
op
::
dot
{
alpha
,
beta
},
l1
,
l2
);
auto
l_beta
=
p
.
add_literal
(
beta
);
auto
b_beta
=
p
.
add_instruction
(
migraphx
::
op
::
scalar
{
m12_alpha
->
get_shape
().
lens
()},
l_beta
);
auto
m3_beta
=
p
.
add_instruction
(
migraphx
::
op
::
mul
{},
b_beta
,
l3
);
p
.
add_instruction
(
migraphx
::
op
::
add
{},
m3_beta
,
m12_alpha
);
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
m
;
result
.
visit
([
&
](
auto
output
)
{
m
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
m_res
=
{
-
0.91147203
,
0.47540785
,
-
0.30313587
,
0.43325099
,
-
0.43711586
,
0.50928632
,
0.06919868
,
-
0.80382802
,
-
0.05125718
,
-
0.06685650
,
-
0.06972163
,
0.32407764
,
0.45677396
,
0.25909489
,
0.56911252
,
-
0.17183724
,
0.10858734
,
0.39406289
,
0.04662959
,
1.07979824
,
0.40355016
,
0.52410648
,
-
0.31728447
,
1.09550845
};
EXPECT
(
migraphx
::
verify_range
(
m
,
m_res
));
}
TEST_CASE
(
gemm_mutli_3args
)
{
migraphx
::
program
p
;
std
::
vector
<
float
>
m1
=
{
1.23636469
,
-
0.47041261
,
-
0.14375651
,
-
0.48371852
,
1.16479301
,
-
0.89361055
,
-
0.18569086
,
1.10700457
,
-
1.02632638
,
0.82277012
,
0.33525769
,
0.52825145
,
-
1.00141689
,
0.45510090
,
-
0.02675039
,
-
0.60454439
,
0.38551153
,
-
0.01658514
,
0.93059292
,
-
0.54595188
,
-
0.04911005
,
-
0.91397221
,
-
0.83127477
,
-
1.57685603
,
-
1.36200452
,
2.25822236
,
-
1.23416970
,
0.12312496
,
0.76232760
,
-
0.83594234
,
1.67418145
,
-
0.19412936
,
1.05261378
,
0.66246074
,
-
1.15233398
,
0.16429736
};
migraphx
::
shape
m1_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
2
,
3
}};
std
::
vector
<
float
>
m2
=
{
-
0.87300530
,
-
0.07112838
,
0.19196860
,
-
1.04986840
,
1.20348200
,
0.31966893
,
1.04805440
,
-
2.04777729
,
-
0.67906052
,
-
1.17250760
,
0.34305044
,
-
1.01957785
,
-
1.12694862
,
0.18431338
,
-
1.63712290
,
0.27566931
,
-
1.11282021
,
1.41738919
,
0.47871283
,
-
1.01980420
,
1.00212436
,
-
0.78740444
,
-
1.65636133
,
1.51466547
,
-
0.12470397
,
0.70404393
,
-
0.15244797
,
0.74288871
,
0.07339926
,
-
1.45811623
,
0.27185845
,
0.08804596
,
0.99061977
,
-
1.61752428
,
0.29191159
,
0.87271953
};
migraphx
::
shape
m2_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
3
,
2
}};
std
::
vector
<
float
>
m3
=
{
-
1.07692443
,
0.85223457
,
-
0.37266530
,
2.31511577
,
0.04227017
,
1.13229428
,
-
0.52769242
,
0.27307182
,
-
0.47779843
,
-
0.08023168
,
-
0.22862823
,
0.81489871
,
1.13139581
,
1.13860467
,
0.24309065
,
0.26533729
,
0.49106772
,
-
1.18860493
,
0.27842449
,
1.03568141
,
0.49759611
,
0.10021662
,
0.00592602
,
0.90862000
};
migraphx
::
shape
m3_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
2
,
2
}};
auto
l1
=
p
.
add_literal
(
migraphx
::
literal
{
m1_shape
,
m1
});
auto
l2
=
p
.
add_literal
(
migraphx
::
literal
{
m2_shape
,
m2
});
auto
l3
=
p
.
add_literal
(
migraphx
::
literal
{
m3_shape
,
m3
});
float
alpha
=
0.35
;
float
beta
=
0.41
;
p
.
add_instruction
(
migraphx
::
op
::
dot
{
alpha
,
beta
},
l1
,
l2
,
l3
);
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
m
;
result
.
visit
([
&
](
auto
output
)
{
m
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
m_res
=
{
-
0.91147203
,
0.47540785
,
-
0.30313587
,
0.43325099
,
-
0.43711586
,
0.50928632
,
0.06919868
,
-
0.80382802
,
-
0.05125718
,
-
0.06685650
,
-
0.06972163
,
0.32407764
,
0.45677396
,
0.25909489
,
0.56911252
,
-
0.17183724
,
0.10858734
,
0.39406289
,
0.04662959
,
1.07979824
,
0.40355016
,
0.52410648
,
-
0.31728447
,
1.09550845
};
EXPECT
(
migraphx
::
verify_range
(
m
,
m_res
));
}
TEST_CASE
(
gemm_3args
)
{
{
migraphx
::
program
p
;
std
::
vector
<
float
>
a
=
{
-
0.86217194
,
-
1.04129542
,
-
0.64850364
,
-
0.97078327
,
-
0.40516386
,
0.83136927
,
0.37717502
,
0.42271939
,
1.10062165
,
-
0.92239359
,
0.40403076
,
-
0.43935377
};
std
::
vector
<
float
>
b
=
{
0.76084386
,
1.89201125
,
1.73218067
,
0.7148568
,
-
0.55578914
,
0.05799101
,
-
1.24090721
,
-
0.51151978
,
1.13255803
,
0.21540723
,
-
1.10459009
,
0.45580331
};
std
::
vector
<
float
>
c
=
{
-
0.80473623
,
0.35154171
,
-
2.73077756
,
-
0.09093885
,
-
1.88850472
,
-
0.03375556
,
-
0.41798276
,
2.87368099
,
2.11031439
};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
3
,
4
}};
auto
al
=
p
.
add_literal
(
migraphx
::
literal
{
a_shape
,
a
});
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
4
,
3
}};
auto
bl
=
p
.
add_literal
(
migraphx
::
literal
{
b_shape
,
b
});
migraphx
::
shape
c_shape
{
migraphx
::
shape
::
float_type
,
{
3
,
3
}};
auto
cl
=
p
.
add_literal
(
migraphx
::
literal
{
c_shape
,
c
});
p
.
add_instruction
(
migraphx
::
op
::
dot
{},
al
,
bl
,
cl
);
std
::
vector
<
float
>
gold
=
{
-
1.60947
,
0.703083
,
-
5.46156
,
-
0.181878
,
-
3.77701
,
-
0.0675112
,
-
0.835966
,
5.74736
,
4.22063
};
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
m
;
result
.
visit
([
&
](
auto
output
)
{
m
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify_range
(
m
,
gold
));
}
}
TEST_CASE
(
matmul_vv_inner_product
)
{
{
migraphx
::
program
p
;
std
::
vector
<
float
>
a
=
{
0.7481789
,
0.02906279
,
1.01193836
,
1.60222907
,
1.89135978
,
0.30054158
,
-
0.4892588
,
-
0.27027533
};
std
::
vector
<
float
>
b
=
{
-
0.25829116
,
0.27908929
,
-
1.27888957
,
0.21152361
,
0.08593658
,
0.52163899
,
1.38343824
,
-
0.2342857
};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
8
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
8
}};
auto
al
=
p
.
add_literal
(
migraphx
::
literal
{
a_shape
,
a
});
auto
bl
=
p
.
add_literal
(
migraphx
::
literal
{
b_shape
,
b
});
auto
ual
=
p
.
add_instruction
(
migraphx
::
op
::
unsqueeze
{{
0
}},
al
);
auto
ubl
=
p
.
add_instruction
(
migraphx
::
op
::
unsqueeze
{{
1
}},
bl
);
p
.
add_instruction
(
migraphx
::
op
::
dot
{},
ual
,
ubl
);
std
::
vector
<
float
>
gold
=
{
-
1.43461
};
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
m
;
result
.
visit
([
&
](
auto
output
)
{
m
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify_range
(
m
,
gold
));
}
{
migraphx
::
program
p
;
std
::
vector
<
float
>
a
=
{
0.7481789
,
0.02906279
,
1.01193836
,
1.60222907
,
1.89135978
,
0.30054158
,
-
0.4892588
,
-
0.27027533
};
std
::
vector
<
float
>
b
=
{
-
0.25829116
,
0.27908929
,
-
1.27888957
,
0.21152361
,
0.08593658
,
0.52163899
,
1.38343824
,
-
0.2342857
};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
8
}};
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
8
}};
auto
al
=
p
.
add_literal
(
migraphx
::
literal
{
a_shape
,
a
});
auto
bl
=
p
.
add_literal
(
migraphx
::
literal
{
b_shape
,
b
});
auto
ual
=
p
.
add_instruction
(
migraphx
::
op
::
unsqueeze
{{
0
}},
al
);
auto
ubl
=
p
.
add_instruction
(
migraphx
::
op
::
unsqueeze
{{
1
}},
bl
);
float
alpha
=
0.32
f
;
p
.
add_instruction
(
migraphx
::
op
::
dot
{
alpha
},
ual
,
ubl
);
std
::
vector
<
float
>
gold
=
{
-
0.4590752
};
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
m
;
result
.
visit
([
&
](
auto
output
)
{
m
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify_range
(
m
,
gold
));
}
}
TEST_CASE
(
matmul_vm
)
{
{
migraphx
::
program
p
;
std
::
vector
<
float
>
a
=
{
1.49530002
,
-
0.07181969
,
0.44593846
,
-
0.8645019
,
0.52992304
,
-
0.4910338
,
-
2.12179422
,
-
0.45962977
};
std
::
vector
<
float
>
b
=
{
-
0.06210242
,
0.0187149
,
1.47482984
,
-
1.19590602
,
-
0.45601701
,
0.36934488
,
-
0.83913193
,
0.75350964
,
0.80707019
,
0.35923582
,
-
2.18480722
,
-
0.85608682
,
0.75849199
,
0.49103473
,
-
0.91329477
,
-
0.36364322
,
-
0.69688937
,
0.07165814
,
-
0.15505523
,
0.52221663
,
-
0.98631192
,
-
0.37353654
,
-
1.89818706
,
-
0.87209739
,
-
0.33942003
,
0.11390353
,
0.78181162
,
-
0.18395337
,
-
0.34743419
,
-
0.08091231
,
1.21119765
,
1.23869861
,
1.42169414
,
0.86412382
,
1.05898002
,
-
0.31918307
,
1.08546695
,
1.50682711
,
-
0.66083538
,
-
0.32683929
};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
8
}};
auto
al
=
p
.
add_literal
(
migraphx
::
literal
{
a_shape
,
a
});
auto
ual
=
p
.
add_instruction
(
migraphx
::
op
::
unsqueeze
{{
0
}},
al
);
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
8
,
5
}};
auto
bl
=
p
.
add_literal
(
migraphx
::
literal
{
b_shape
,
b
});
p
.
add_instruction
(
migraphx
::
op
::
dot
{},
ual
,
bl
);
std
::
vector
<
float
>
gold
=
{
-
3.78111
,
-
3.40007
,
-
2.1972
,
-
3.31448
,
-
3.80326
};
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
m
;
result
.
visit
([
&
](
auto
output
)
{
m
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify_range
(
m
,
gold
));
}
{
migraphx
::
program
p
;
std
::
vector
<
float
>
a
=
{
1.49530002
,
-
0.07181969
,
0.44593846
,
-
0.8645019
,
0.52992304
,
-
0.4910338
,
-
2.12179422
,
-
0.45962977
};
std
::
vector
<
float
>
b
=
{
-
0.06210242
,
0.0187149
,
1.47482984
,
-
1.19590602
,
-
0.45601701
,
0.36934488
,
-
0.83913193
,
0.75350964
,
0.80707019
,
0.35923582
,
-
2.18480722
,
-
0.85608682
,
0.75849199
,
0.49103473
,
-
0.91329477
,
-
0.36364322
,
-
0.69688937
,
0.07165814
,
-
0.15505523
,
0.52221663
,
-
0.98631192
,
-
0.37353654
,
-
1.89818706
,
-
0.87209739
,
-
0.33942003
,
0.11390353
,
0.78181162
,
-
0.18395337
,
-
0.34743419
,
-
0.08091231
,
1.21119765
,
1.23869861
,
1.42169414
,
0.86412382
,
1.05898002
,
-
0.31918307
,
1.08546695
,
1.50682711
,
-
0.66083538
,
-
0.32683929
};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
8
}};
auto
al
=
p
.
add_literal
(
migraphx
::
literal
{
a_shape
,
a
});
auto
ual
=
p
.
add_instruction
(
migraphx
::
op
::
unsqueeze
{{
0
}},
al
);
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
8
,
5
}};
auto
bl
=
p
.
add_literal
(
migraphx
::
literal
{
b_shape
,
b
});
float
alpha
=
0.5
f
;
p
.
add_instruction
(
migraphx
::
op
::
dot
{
alpha
},
ual
,
bl
);
std
::
vector
<
float
>
gold
=
{
-
1.89056
,
-
1.70003
,
-
1.0986
,
-
1.65724
,
-
1.90163
};
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
m
;
result
.
visit
([
&
](
auto
output
)
{
m
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify_range
(
m
,
gold
));
}
{
migraphx
::
program
p
;
std
::
vector
<
float
>
a
=
{
-
1.7468318
,
-
0.38900251
,
1.00183915
,
0.06016438
,
0.08295905
,
1.5830535
};
std
::
vector
<
float
>
b
=
{
1.2459538
,
0.39586199
,
-
0.77035574
,
0.22689828
,
0.3289835
,
1.02804361
,
-
0.22941113
,
-
0.33940324
,
0.80078249
,
1.0319152
,
0.80034948
,
-
0.11631159
,
0.36899208
,
-
0.28506697
,
-
1.2211584
,
-
0.55678377
,
-
0.3618498
,
0.34857264
,
-
0.38700147
,
-
0.43434611
,
1.73029783
,
-
0.71578372
,
0.09777723
,
0.06616614
,
-
1.66721186
,
-
0.16046032
,
-
1.64581663
,
1.09373609
,
-
0.14127692
,
-
0.01938473
,
-
0.67310303
,
-
1.56154787
,
-
1.0665462
,
0.68538535
,
-
1.53920085
,
-
0.35710272
,
0.06887234
,
0.17474616
,
1.08194804
,
-
0.19990148
,
-
0.91149488
,
0.95303646
,
0.95448717
,
-
0.49332393
,
-
1.762213
,
-
0.56571194
,
-
1.69704968
,
-
0.82798066
,
0.65531872
,
1.5007798
,
0.99877355
,
0.53386114
,
-
0.88150609
,
-
1.0756985
,
0.50962511
,
-
0.68019002
,
0.1583068
,
2.83988407
,
-
1.10292457
,
0.02126969
,
0.21129951
,
0.25690146
,
-
1.6490316
,
0.55261771
,
-
1.70504303
,
-
0.02870394
,
-
0.18205627
,
0.29446203
,
-
1.91360924
,
0.46102174
,
0.44977568
,
-
0.48113321
};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
6
}};
auto
al
=
p
.
add_literal
(
migraphx
::
literal
{
a_shape
,
a
});
auto
ual
=
p
.
add_instruction
(
migraphx
::
op
::
unsqueeze
{{
0
}},
al
);
auto
bual
=
p
.
add_instruction
(
migraphx
::
op
::
multibroadcast
{{
3
,
1
,
6
}},
ual
);
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
3
,
6
,
4
}};
auto
bl
=
p
.
add_literal
(
migraphx
::
literal
{
b_shape
,
b
});
p
.
add_instruction
(
migraphx
::
op
::
dot
{},
bual
,
bl
);
std
::
vector
<
float
>
gold
=
{
1.22914
,
-
1.17896
,
2.28596
,
-
0.345637
,
-
0.962362
,
0.168508
,
-
0.947471
,
-
3.02458
,
-
3.80131
,
1.38484
,
-
2.45019
,
-
1.35064
};
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
m
;
result
.
visit
([
&
](
auto
output
)
{
m
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify_range
(
m
,
gold
));
}
{
migraphx
::
program
p
;
std
::
vector
<
float
>
a
=
{
-
1.7468318
,
-
0.38900251
,
1.00183915
,
0.06016438
,
0.08295905
,
1.5830535
};
std
::
vector
<
float
>
b
=
{
1.2459538
,
0.39586199
,
-
0.77035574
,
0.22689828
,
0.3289835
,
1.02804361
,
-
0.22941113
,
-
0.33940324
,
0.80078249
,
1.0319152
,
0.80034948
,
-
0.11631159
,
0.36899208
,
-
0.28506697
,
-
1.2211584
,
-
0.55678377
,
-
0.3618498
,
0.34857264
,
-
0.38700147
,
-
0.43434611
,
1.73029783
,
-
0.71578372
,
0.09777723
,
0.06616614
,
-
1.66721186
,
-
0.16046032
,
-
1.64581663
,
1.09373609
,
-
0.14127692
,
-
0.01938473
,
-
0.67310303
,
-
1.56154787
,
-
1.0665462
,
0.68538535
,
-
1.53920085
,
-
0.35710272
,
0.06887234
,
0.17474616
,
1.08194804
,
-
0.19990148
,
-
0.91149488
,
0.95303646
,
0.95448717
,
-
0.49332393
,
-
1.762213
,
-
0.56571194
,
-
1.69704968
,
-
0.82798066
,
0.65531872
,
1.5007798
,
0.99877355
,
0.53386114
,
-
0.88150609
,
-
1.0756985
,
0.50962511
,
-
0.68019002
,
0.1583068
,
2.83988407
,
-
1.10292457
,
0.02126969
,
0.21129951
,
0.25690146
,
-
1.6490316
,
0.55261771
,
-
1.70504303
,
-
0.02870394
,
-
0.18205627
,
0.29446203
,
-
1.91360924
,
0.46102174
,
0.44977568
,
-
0.48113321
};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
6
}};
auto
al
=
p
.
add_literal
(
migraphx
::
literal
{
a_shape
,
a
});
auto
ual
=
p
.
add_instruction
(
migraphx
::
op
::
unsqueeze
{{
0
}},
al
);
auto
bual
=
p
.
add_instruction
(
migraphx
::
op
::
multibroadcast
{{
3
,
1
,
6
}},
ual
);
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
3
,
6
,
4
}};
auto
bl
=
p
.
add_literal
(
migraphx
::
literal
{
b_shape
,
b
});
p
.
add_instruction
(
migraphx
::
op
::
dot
{
0.21
f
},
bual
,
bl
);
std
::
vector
<
float
>
gold
=
{
0.25812
,
-
0.247582
,
0.480051
,
-
0.0725837
,
-
0.202096
,
0.0353867
,
-
0.198969
,
-
0.635161
,
-
0.798275
,
0.290817
,
-
0.514539
,
-
0.283635
};
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
m
;
result
.
visit
([
&
](
auto
output
)
{
m
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify_range
(
m
,
gold
));
}
}
TEST_CASE
(
matmul_mv
)
{
{
migraphx
::
program
p
;
std
::
vector
<
float
>
a
=
{
0.1612524
,
0.61266466
,
-
0.19212896
,
1.34228825
,
-
1.09746949
,
0.4680955
,
-
0.431748
,
-
0.89791241
,
-
2.19078702
,
-
0.13767058
,
-
1.66105228
,
-
0.91834613
,
0.59199744
,
1.41967261
,
0.76237423
};
std
::
vector
<
float
>
b
=
{
0.14365572
,
0.23401411
,
-
0.8970094
,
-
0.12526676
,
-
1.04703286
};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
3
,
5
}};
auto
al
=
p
.
add_literal
(
migraphx
::
literal
{
a_shape
,
a
});
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
5
}};
auto
bl
=
p
.
add_literal
(
migraphx
::
literal
{
b_shape
,
b
});
auto
ubl
=
p
.
add_instruction
(
migraphx
::
op
::
unsqueeze
{{
1
}},
bl
);
p
.
add_instruction
(
migraphx
::
op
::
dot
{},
al
,
ubl
);
std
::
vector
<
float
>
gold
=
{
1.31982
,
1.19022
,
-
1.96062
};
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
m
;
result
.
visit
([
&
](
auto
output
)
{
m
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify_range
(
m
,
gold
));
}
{
migraphx
::
program
p
;
std
::
vector
<
float
>
a
=
{
0.1612524
,
0.61266466
,
-
0.19212896
,
1.34228825
,
-
1.09746949
,
0.4680955
,
-
0.431748
,
-
0.89791241
,
-
2.19078702
,
-
0.13767058
,
-
1.66105228
,
-
0.91834613
,
0.59199744
,
1.41967261
,
0.76237423
};
std
::
vector
<
float
>
b
=
{
0.14365572
,
0.23401411
,
-
0.8970094
,
-
0.12526676
,
-
1.04703286
};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
3
,
5
}};
auto
al
=
p
.
add_literal
(
migraphx
::
literal
{
a_shape
,
a
});
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
5
}};
auto
bl
=
p
.
add_literal
(
migraphx
::
literal
{
b_shape
,
b
});
auto
ubl
=
p
.
add_instruction
(
migraphx
::
op
::
unsqueeze
{{
1
}},
bl
);
float
alpha
=
0.3
f
;
p
.
add_instruction
(
migraphx
::
op
::
dot
{
alpha
},
al
,
ubl
);
std
::
vector
<
float
>
gold
=
{
0.395946
,
0.357067
,
-
0.588187
};
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
m
;
result
.
visit
([
&
](
auto
output
)
{
m
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify_range
(
m
,
gold
));
}
{
migraphx
::
program
p
;
std
::
vector
<
float
>
a
=
{
1.24593227
,
-
0.84351316
,
0.27882229
,
-
0.42518484
,
-
1.11391528
,
0.59141834
,
1.34198714
,
2.25884063
,
-
1.32093452
,
0.44766336
,
-
0.09306479
,
0.47526699
,
0.25858488
,
1.30820392
,
1.17186787
,
0.31530864
,
-
1.19159424
,
-
0.24100903
,
-
1.03857886
,
1.54453427
,
0.05041654
,
1.67108177
,
0.965805
,
0.52958924
,
-
1.61243992
,
0.02941846
,
0.77523836
,
1.97963853
,
-
2.51093596
,
0.21882645
,
-
2.60193574
,
1.1899952
,
1.70883519
,
0.94586745
,
2.65002512
,
-
1.42427102
,
1.0143951
,
-
1.34115312
,
1.63833732
,
-
1.46477355
,
0.44014877
,
0.58032696
,
-
1.63874372
,
-
0.82834423
,
1.81131778
,
-
0.52393379
,
1.16721943
,
0.39488835
,
0.23947128
,
-
0.15733194
,
0.19451158
,
1.21315445
,
0.44594897
,
0.40809135
,
-
0.64252994
,
0.7541716
,
-
0.97203195
,
0.69208485
,
0.34350988
,
0.9836842
};
std
::
vector
<
float
>
b
=
{
0.05013914
,
1.39932885
,
2.56616476
,
1.02225623
,
-
0.03977829
};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
2
,
3
,
5
}};
auto
al
=
p
.
add_literal
(
migraphx
::
literal
{
a_shape
,
a
});
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
5
}};
auto
bl
=
p
.
add_literal
(
migraphx
::
literal
{
b_shape
,
b
});
auto
ubl
=
p
.
add_instruction
(
migraphx
::
op
::
unsqueeze
{{
1
}},
bl
);
auto
bubl
=
p
.
add_instruction
(
migraphx
::
op
::
multibroadcast
{{
2
,
2
,
5
,
1
}},
ubl
);
p
.
add_instruction
(
migraphx
::
op
::
dot
{},
al
,
bubl
);
std
::
vector
<
float
>
gold
=
{
-
0.792717
,
6.33595
,
2.61466
,
-
3.39322
,
5.42485
,
3.59084
,
6.78139
,
-
0.360492
,
-
4.28998
,
2.87146
,
3.29447
,
0.765651
};
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
m
;
result
.
visit
([
&
](
auto
output
)
{
m
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify_range
(
m
,
gold
));
}
}
TEST_CASE
(
matmul_mm1
)
{
{
migraphx
::
program
p
;
std
::
vector
<
float
>
a
=
{
-
0.49450006
,
-
1.07431991
,
-
0.02796692
,
-
0.99631927
,
0.20040449
,
-
1.39709437
,
-
0.15695328
,
0.08208373
,
-
0.09746386
,
0.77923021
,
-
0.1849151
,
0.14419043
,
-
0.25798175
,
-
0.2504807
,
-
1.11134383
,
-
0.71030613
,
-
0.20234025
,
0.90229168
,
0.62643053
,
-
0.83512638
,
1.66051254
,
0.05941673
,
0.73081559
,
0.27111867
,
0.55060745
,
0.34999583
,
1.02236619
,
0.60178395
,
1.49646162
,
1.93255155
,
-
3.65357913
,
-
1.38059906
,
-
0.46302398
,
0.19847152
,
0.39785875
,
1.47004861
,
-
1.24482133
,
-
0.01954702
,
0.36073898
,
1.56055978
,
-
0.10344603
,
-
0.34283135
,
-
0.56482649
,
1.80861249
,
-
0.92268202
,
0.94371182
,
-
0.02373232
,
-
0.75441145
,
0.43325034
,
0.4057425
,
-
0.48844822
,
-
0.36390512
,
0.74110406
,
1.25158366
,
0.52196654
,
1.43461691
,
-
0.57530864
,
-
0.66716206
,
-
1.76516289
,
0.96582849
};
std
::
vector
<
float
>
b
=
{
0.49899375
,
-
2.20168661
,
1.08895066
,
-
0.01135643
,
0.90570669
,
-
1.43550963
,
-
1.73033377
,
0.21338776
,
0.96962508
,
0.38913968
,
-
0.32822861
,
0.88222863
,
0.93330718
,
-
1.24265228
,
-
1.62587164
};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
2
,
3
,
5
}};
auto
al
=
p
.
add_literal
(
migraphx
::
literal
{
a_shape
,
a
});
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
5
,
3
}};
auto
bl
=
p
.
add_literal
(
migraphx
::
literal
{
b_shape
,
b
});
auto
bbl
=
p
.
add_instruction
(
migraphx
::
op
::
multibroadcast
{{
2
,
2
,
5
,
3
}},
bl
);
p
.
add_instruction
(
migraphx
::
op
::
dot
{},
al
,
bbl
);
std
::
vector
<
float
>
gold
=
{
-
0.386828
,
0.187735
,
-
0.22822
,
-
0.148057
,
2.015
,
-
2.56938
,
-
0.782212
,
1.9459
,
0.927426
,
-
2.44907
,
2.40531
,
2.30232
,
0.182745
,
-
4.21937
,
1.77551
,
1.50775
,
-
2.60888
,
-
2.32484
,
-
0.557691
,
6.13527
,
-
2.91743
,
2.37836
,
-
6.42584
,
1.14979
,
0.77227
,
0.349659
,
2.92759
,
2.32384
,
-
2.90664
,
0.0527679
,
-
0.547761
,
-
0.155467
,
0.964619
,
2.09133
,
-
4.44281
,
-
1.3864
};
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
m
;
result
.
visit
([
&
](
auto
output
)
{
m
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify_range
(
m
,
gold
));
}
{
migraphx
::
program
p
;
std
::
vector
<
float
>
a
=
{
-
0.0309568
,
-
1.57294749
,
-
0.00768606
,
1.5786921
,
0.50519718
,
0.10530702
,
-
0.05302112
,
-
0.06503757
,
0.4079716
,
0.0799132
,
-
0.82624962
,
0.49341502
};
std
::
vector
<
float
>
b
=
{
0.3664867
,
0.24649534
,
1.14728076
,
1.09911548
,
-
1.23711247
,
-
0.49436419
,
-
0.67557879
,
-
0.84180575
,
-
1.09754376
,
0.07807351
,
0.74349043
,
-
0.92084701
,
0.50267885
,
0.78709401
,
0.80598159
,
-
0.51269589
,
-
0.40337193
,
0.29457878
,
1.25447301
,
-
1.66251457
,
-
1.54652239
,
-
0.35067765
,
-
0.5214464
,
-
0.7866878
,
1.11128573
,
0.26927291
,
-
0.0929818
,
0.07523954
,
0.3256776
,
-
1.08617826
,
0.89294253
,
-
0.91007619
,
-
2.42825765
,
-
1.76805581
,
1.08136334
,
-
0.14521253
,
-
1.32061148
,
0.60663124
,
-
1.19835255
,
-
0.98803563
,
-
1.06927896
,
-
0.51967419
,
-
0.98974639
,
1.01287011
,
1.34910394
,
0.1203349
,
0.67387452
,
-
0.32447465
,
1.15187449
,
-
0.82253807
,
0.22302433
,
0.46434695
,
0.319647
,
1.56459445
,
0.15664012
,
0.03998102
,
0.62981041
,
0.11831296
,
0.47824434
,
-
0.93941882
,
-
0.34674036
,
1.17071104
,
0.59203806
,
2.75817738
,
-
0.69300013
,
1.30971899
,
-
0.14231862
,
-
1.90915568
,
-
0.06895489
,
0.20160375
,
0.01945916
,
0.03586956
};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
3
,
4
}};
auto
al
=
p
.
add_literal
(
migraphx
::
literal
{
a_shape
,
a
});
auto
bal
=
p
.
add_instruction
(
migraphx
::
op
::
multibroadcast
{{
2
,
3
,
3
,
4
}},
al
);
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
,
3
}};
auto
bl
=
p
.
add_literal
(
migraphx
::
literal
{
b_shape
,
b
});
p
.
add_instruction
(
migraphx
::
op
::
dot
{},
bal
,
bl
);
std
::
vector
<
float
>
gold
=
{
-
1.61175
,
3.11849
,
-
0.703205
,
0.331635
,
-
0.00946922
,
0.645626
,
0.834069
,
1.06409
,
0.881037
,
0.227628
,
-
0.200308
,
-
1.71836
,
0.156255
,
0.477222
,
0.571363
,
-
1.04543
,
1.40524
,
1.24201
,
-
2.95083
,
1.19352
,
1.5008
,
0.636987
,
0.148256
,
-
0.0231631
,
-
1.15079
,
1.42139
,
1.80996
,
1.79259
,
2.7192
,
0.331902
,
-
0.726565
,
0.0963351
,
-
0.710558
,
0.259424
,
-
0.342345
,
-
1.80522
,
-
0.580476
,
0.277368
,
-
3.95582
,
0.614823
,
-
0.415107
,
0.305138
,
0.435993
,
-
0.107089
,
-
0.767885
,
-
4.00837
,
1.09921
,
-
2.02129
,
0.109717
,
0.618422
,
0.438342
,
0.29602
,
2.00928
,
0.420871
};
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
m
;
result
.
visit
([
&
](
auto
output
)
{
m
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify_range
(
m
,
gold
));
}
}
TEST_CASE
(
matmul_mm2
)
{
{
migraphx
::
program
p
;
std
::
vector
<
float
>
a
=
{
-
0.49450006
,
-
1.07431991
,
-
0.02796692
,
-
0.99631927
,
0.20040449
,
-
1.39709437
,
-
0.15695328
,
0.08208373
,
-
0.09746386
,
0.77923021
,
-
0.1849151
,
0.14419043
,
-
0.25798175
,
-
0.2504807
,
-
1.11134383
,
-
0.71030613
,
-
0.20234025
,
0.90229168
,
0.62643053
,
-
0.83512638
,
1.66051254
,
0.05941673
,
0.73081559
,
0.27111867
,
0.55060745
,
0.34999583
,
1.02236619
,
0.60178395
,
1.49646162
,
1.93255155
,
-
3.65357913
,
-
1.38059906
,
-
0.46302398
,
0.19847152
,
0.39785875
,
1.47004861
,
-
1.24482133
,
-
0.01954702
,
0.36073898
,
1.56055978
,
-
0.10344603
,
-
0.34283135
,
-
0.56482649
,
1.80861249
,
-
0.92268202
,
0.94371182
,
-
0.02373232
,
-
0.75441145
,
0.43325034
,
0.4057425
,
-
0.48844822
,
-
0.36390512
,
0.74110406
,
1.25158366
,
0.52196654
,
1.43461691
,
-
0.57530864
,
-
0.66716206
,
-
1.76516289
,
0.96582849
};
std
::
vector
<
float
>
b
=
{
-
1.12211357
,
1.74720423
,
0.60382572
,
-
0.61090125
,
-
0.3315936
,
0.30924675
,
-
0.28906435
,
0.64039247
,
-
1.2822253
,
0.55899286
,
2.14013013
,
1.00944809
,
0.21660017
,
-
0.75465098
,
0.12097934
,
-
1.64006315
,
0.43582108
,
-
0.64348541
,
0.43101069
,
1.30191386
,
1.7746011
,
0.24935804
,
0.42830791
,
-
0.13593643
,
0.38749427
,
1.39776254
,
-
0.42911717
,
-
1.3537624
,
-
0.81999648
,
-
0.1754485
};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
2
,
3
,
5
}};
auto
al
=
p
.
add_literal
(
migraphx
::
literal
{
a_shape
,
a
});
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
1
,
5
,
3
}};
auto
bl
=
p
.
add_literal
(
migraphx
::
literal
{
b_shape
,
b
});
auto
bbl
=
p
.
add_instruction
(
migraphx
::
op
::
multibroadcast
{{
2
,
2
,
5
,
3
}},
bl
);
std
::
vector
<
float
>
gold
=
{
0.70574512
,
-
2.80915314
,
-
1.57644969
,
1.75415381
,
-
3.13303087
,
-
1.00150259
,
-
0.18675123
,
-
0.23349122
,
-
0.12357225
,
0.82911538
,
1.37473744
,
-
1.11709934
,
-
1.84001907
,
3.51427391
,
0.42425673
,
0.0638482
,
2.40210271
,
1.50027643
,
4.81988916
,
-
3.63687142
,
-
0.19101717
,
-
4.92522092
,
-
1.76377022
,
-
3.58095615
,
1.83096922
,
2.5512663
,
-
1.07926588
,
-
2.12749134
,
0.33014536
,
-
0.80393025
,
0.60740202
,
0.95217761
,
-
1.06087445
,
-
4.75868152
,
-
3.6687713
,
-
1.26539821
};
p
.
add_instruction
(
migraphx
::
op
::
dot
{},
al
,
bbl
);
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
m
;
result
.
visit
([
&
](
auto
output
)
{
m
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify_range
(
m
,
gold
));
}
{
migraphx
::
program
p
;
std
::
vector
<
float
>
a
=
{
-
0.19276159
,
-
1.2568421
,
-
0.321242
,
1.21471077
,
-
0.4927751
,
0.69446894
,
-
0.1786371
,
-
1.00763473
,
-
0.10279314
,
3.02931355
,
1.08359235
,
-
0.35190132
,
-
0.00639111
,
0.78989113
,
1.23538029
,
0.4590747
,
0.17304142
,
0.42512412
,
0.21076913
,
-
0.01724556
,
-
0.17763898
,
0.12852236
,
-
0.00459301
,
1.34498824
,
0.02907823
,
0.1784464
,
-
0.20790355
,
-
0.52336699
,
0.45804085
,
1.06025801
};
std
::
vector
<
float
>
b
=
{
-
1.12211357
,
1.74720423
,
0.60382572
,
-
0.61090125
,
-
0.3315936
,
0.30924675
,
-
0.28906435
,
0.64039247
,
-
1.2822253
,
0.55899286
,
2.14013013
,
1.00944809
,
0.21660017
,
-
0.75465098
,
0.12097934
,
-
1.64006315
,
0.43582108
,
-
0.64348541
,
0.43101069
,
1.30191386
,
1.7746011
,
0.24935804
,
0.42830791
,
-
0.13593643
,
0.38749427
,
1.39776254
,
-
0.42911717
,
-
1.3537624
,
-
0.81999648
,
-
0.1754485
};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
3
,
5
}};
auto
al
=
p
.
add_literal
(
migraphx
::
literal
{
a_shape
,
a
});
auto
bal
=
p
.
add_instruction
(
migraphx
::
op
::
multibroadcast
{{
2
,
2
,
3
,
5
}},
al
);
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
1
,
5
,
3
}};
auto
bl
=
p
.
add_literal
(
migraphx
::
literal
{
b_shape
,
b
});
auto
bbl
=
p
.
add_instruction
(
migraphx
::
op
::
multibroadcast
{{
2
,
2
,
5
,
3
}},
bl
);
p
.
add_instruction
(
migraphx
::
op
::
dot
{},
bal
,
bbl
);
std
::
vector
<
float
>
gold
=
{
1.64924590e+00
,
2.84575831e+00
,
1.07340773e+00
,
2.19817080e-01
,
-
1.87873283e+00
,
1.91883003e+00
,
-
2.89962196e-01
,
2.76404142e+00
,
1.50048102e+00
,
-
6.29650347e-01
,
1.48105185e+00
,
-
3.71716505e-03
,
8.80281500e-01
,
2.50057585e+00
,
1.29958508e+00
,
5.63751779e-01
,
2.25703781e-01
,
1.30516919e+00
,
8.32118386e-01
,
2.44050864e-01
,
-
2.49748221e+00
,
-
5.60803176e+00
,
-
2.98919069e+00
,
-
1.11429417e+00
,
-
3.29675989e+00
,
1.02442564e-01
,
-
1.87659303e+00
,
-
4.67302454e-01
,
9.16189968e-01
,
-
1.33537175e-01
,
8.27398578e-01
,
1.94406914e+00
,
-
2.39250915e-01
,
-
1.77062701e+00
,
-
6.46239534e-01
,
-
7.95202750e-01
};
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
m
;
result
.
visit
([
&
](
auto
output
)
{
m
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify_range
(
m
,
gold
));
}
{
migraphx
::
program
p
;
std
::
vector
<
float
>
a
=
{
-
0.55248691
,
0.70275958
,
0.56967633
,
0.88206033
,
-
0.85088547
,
0.05689149
,
-
0.20084703
,
0.18024434
,
1.0730491
,
0.15913531
,
0.93621628
,
0.35072771
,
1.28616952
,
1.55384379
,
0.30376261
,
-
1.12356544
,
-
0.64271552
,
-
2.50703079
,
-
0.23994372
,
0.8166084
,
0.06542249
,
-
0.17472336
,
-
0.37665211
,
0.16342699
,
0.07645941
,
0.65024333
,
-
1.19883423
,
-
0.40536776
,
-
0.31132765
,
0.78113691
,
-
0.16887638
,
2.30797418
,
-
0.36241233
,
0.33552153
,
-
1.05343996
,
-
0.16909699
,
-
1.22608815
,
1.64165613
,
0.96260828
,
-
0.16733976
,
0.84211199
,
1.31243813
,
0.89258549
,
-
0.48250384
,
-
1.06005206
,
1.37021342
,
-
0.35658565
,
0.26879188
};
std
::
vector
<
float
>
b
=
{
0.17111129
,
-
0.82134741
,
-
1.58001178
,
-
1.46759447
,
0.31522514
,
-
0.11567352
,
-
0.038978
,
-
0.3601414
,
-
0.84379876
,
0.24848939
,
-
0.37080544
,
0.00838631
,
1.51316241
,
0.42385344
,
2.06043846
,
1.82348849
,
1.07180434
,
0.6567393
,
1.41164561
,
0.73091185
,
-
0.33541302
,
-
0.98082287
,
-
0.06605479
,
0.82219717
,
-
1.41619634
,
0.51326658
,
0.26916313
,
0.79819769
,
0.85583702
,
0.07876046
,
-
0.42375545
,
-
0.7758751
,
1.14334296
,
-
0.14211708
,
-
1.54520411
,
-
0.55244869
,
-
0.48478899
,
0.10782164
,
-
0.20879552
,
-
0.99019754
,
1.78783102
,
-
1.31610052
,
1.73510175
,
-
0.48360172
,
0.62367417
,
-
1.34180545
,
-
0.37512931
,
-
1.50521357
,
0.08383314
,
0.76165608
,
-
0.4961646
,
0.95821311
,
-
0.68407191
,
0.48299435
,
-
0.24323988
,
0.34793412
,
0.37908669
,
1.19083454
,
1.30218795
,
-
0.26731035
,
-
0.34544132
,
-
0.09595373
,
0.50951334
,
0.48896956
,
0.38753818
,
-
0.4939919
,
0.02352126
,
0.42013764
,
0.07027765
,
0.21169851
,
-
0.24411376
,
-
1.77793736
,
-
0.88370924
,
0.95294025
,
-
0.08208804
,
-
0.95943892
,
0.30280474
,
1.1967013
,
-
1.17700948
,
0.29533973
};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
2
,
3
,
4
}};
auto
al
=
p
.
add_literal
(
migraphx
::
literal
{
a_shape
,
a
});
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
2
,
4
,
5
}};
auto
bl
=
p
.
add_literal
(
migraphx
::
literal
{
b_shape
,
b
});
p
.
add_instruction
(
migraphx
::
op
::
dot
{},
al
,
bl
);
std
::
vector
<
float
>
gold
=
{
1.22136035
,
1.3765651
,
2.0611395
,
1.70445494
,
1.8189619
,
0.2509717
,
0.88815736
,
1.13837946
,
1.37006127
,
-
0.53617378
,
0.45759693
,
-
0.503786
,
-
0.10575749
,
-
0.81715738
,
2.56316255
,
0.85812927
,
-
0.53425671
,
1.38147704
,
2.57874755
,
-
1.05591061
,
-
1.42065674
,
-
0.25412658
,
-
2.14494165
,
-
2.81045272
,
0.27491485
,
-
0.04229986
,
0.10181043
,
-
0.55680682
,
-
0.07633866
,
0.313767
,
-
0.28202571
,
-
1.64696179
,
-
0.50872733
,
-
1.08935912
,
0.94291084
,
-
0.71792156
,
0.82981387
,
1.14797592
,
3.13989358
,
-
0.17507726
,
-
0.63429162
,
-
0.72241531
,
-
0.61459168
,
-
0.52561056
,
0.3309648
,
-
0.46185697
,
-
1.60586695
,
-
0.98590829
,
0.63012062
,
-
0.25606052
,
-
0.69419352
,
-
1.78299913
,
-
0.38572706
,
1.92249442
,
0.3884186
,
-
0.48153048
,
0.84932351
,
0.67234919
,
-
1.07821322
,
-
0.01208216
};
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
m
;
result
.
visit
([
&
](
auto
output
)
{
m
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify_range
(
m
,
gold
));
}
{
migraphx
::
program
p
;
std
::
vector
<
float
>
a
=
{
-
0.55248691
,
0.70275958
,
0.56967633
,
0.88206033
,
-
0.85088547
,
0.05689149
,
-
0.20084703
,
0.18024434
,
1.0730491
,
0.15913531
,
0.93621628
,
0.35072771
,
1.28616952
,
1.55384379
,
0.30376261
,
-
1.12356544
,
-
0.64271552
,
-
2.50703079
,
-
0.23994372
,
0.8166084
,
0.06542249
,
-
0.17472336
,
-
0.37665211
,
0.16342699
,
0.07645941
,
0.65024333
,
-
1.19883423
,
-
0.40536776
,
-
0.31132765
,
0.78113691
,
-
0.16887638
,
2.30797418
,
-
0.36241233
,
0.33552153
,
-
1.05343996
,
-
0.16909699
,
-
1.22608815
,
1.64165613
,
0.96260828
,
-
0.16733976
,
0.84211199
,
1.31243813
,
0.89258549
,
-
0.48250384
,
-
1.06005206
,
1.37021342
,
-
0.35658565
,
0.26879188
};
std
::
vector
<
float
>
b
=
{
-
0.33734601
,
0.66386073
,
0.41425048
,
0.40190389
,
-
0.99645073
,
-
0.10017067
,
-
0.58542118
,
0.48636962
,
0.06301405
,
1.14669128
,
-
0.06526677
,
0.23172741
,
-
1.49693143
,
-
0.44464233
,
-
0.12775566
,
-
1.32038007
,
1.1812471
,
1.22362746
,
-
0.49013843
,
0.25339836
,
1.31698705
,
1.54256669
,
0.11211132
,
-
0.18005487
,
0.36730145
,
0.97705953
,
-
0.18909084
,
0.544932
,
0.32891878
,
0.64250015
,
-
0.41381398
,
0.47402562
,
1.22286761
,
1.07573211
,
-
0.92988077
,
-
0.36340925
,
-
1.76152377
,
-
0.96642674
,
-
0.79231929
,
0.11517073
};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
2
,
3
,
4
}};
auto
al
=
p
.
add_literal
(
migraphx
::
literal
{
a_shape
,
a
});
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
4
,
5
}};
auto
bl
=
p
.
add_literal
(
migraphx
::
literal
{
b_shape
,
b
});
auto
bbl
=
p
.
add_instruction
(
migraphx
::
op
::
multibroadcast
{{
2
,
2
,
4
,
5
}},
bl
);
p
.
add_instruction
(
migraphx
::
op
::
dot
{},
al
,
bbl
);
std
::
vector
<
float
>
gold
=
{
-
1.08585245
,
0.39575611
,
0.33947977
,
-
0.86339678
,
1.50710753
,
0.05646156
,
-
0.43180359
,
0.19639674
,
-
0.33742881
,
0.98443538
,
-
0.9021272
,
1.25043704
,
-
0.45038184
,
-
0.14689614
,
-
0.91749459
,
3.49467934
,
3.81336312
,
2.4482385
,
1.49649707
,
1.05889193
,
-
3.49343731
,
-
2.06958956
,
-
2.52082858
,
-
1.61401519
,
-
1.52966956
,
0.01191848
,
-
0.33246613
,
-
0.70641362
,
-
0.60391255
,
0.28083355
,
0.52255496
,
-
1.08655006
,
1.64648546
,
0.80344255
,
0.71987865
,
-
3.00960296
,
2.02318221
,
3.32785057
,
-
1.13203844
,
1.81235734
,
0.38067585
,
-
0.88086897
,
1.38307367
,
0.42677257
,
0.83759966
,
-
0.34827442
,
-
1.45067092
,
2.09599671
,
1.92882983
,
-
0.30996324
,
2.19736278
,
2.32389426
,
2.36741832
,
1.62253915
,
0.26698225
,
-
0.00741609
,
-
2.53680983
,
-
0.0679954
,
0.04499683
,
0.85354276
};
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
m
;
result
.
visit
([
&
](
auto
output
)
{
m
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify_range
(
m
,
gold
));
}
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/cpu_ops_test.cpp
View file @
33a41ba0
...
...
@@ -3,6 +3,7 @@
#include <migraphx/literal.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/quantization.hpp>
#include <migraphx/cpu/target.hpp>
#include <migraphx/verify.hpp>
#include <migraphx/onnx.hpp>
...
...
@@ -651,7 +652,7 @@ TEST_CASE(broadcast_test)
uint64_t
axis
=
0
;
auto
l1
=
p
.
add_literal
(
migraphx
::
literal
{
a_shape
,
a_data
});
auto
l2
=
p
.
add_literal
(
migraphx
::
literal
{
b_shape
,
b_data
});
p
.
add_instruction
(
migraphx
::
op
::
broadcast
{
axis
,
l1
->
get_shape
()},
l2
);
p
.
add_instruction
(
migraphx
::
op
::
broadcast
{
axis
,
l1
->
get_shape
()
.
lens
()
},
l2
);
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
auto
output
=
result
.
get
<
int32_t
>
();
...
...
@@ -671,7 +672,7 @@ TEST_CASE(add_broadcast_test)
uint64_t
axis
=
0
;
auto
l1
=
p
.
add_literal
(
migraphx
::
literal
{
a_shape
,
a_data
});
auto
l2
=
p
.
add_literal
(
migraphx
::
literal
{
b_shape
,
b_data
});
auto
l3
=
p
.
add_instruction
(
migraphx
::
op
::
broadcast
{
axis
,
l1
->
get_shape
()},
l2
);
auto
l3
=
p
.
add_instruction
(
migraphx
::
op
::
broadcast
{
axis
,
l1
->
get_shape
()
.
lens
()
},
l2
);
p
.
add_instruction
(
migraphx
::
op
::
add
{},
l1
,
l3
);
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
...
...
@@ -809,11 +810,11 @@ TEST_CASE(imagescaler_test)
0.35
,
0.45
}});
auto
scale_val
=
p
.
add_literal
(
2.
f
);
auto
scaled_tensor
=
p
.
add_instruction
(
migraphx
::
op
::
scalar
{
s
},
scale_val
);
auto
scaled_tensor
=
p
.
add_instruction
(
migraphx
::
op
::
scalar
{
s
.
lens
()
},
scale_val
);
auto
img_scaled
=
p
.
add_instruction
(
migraphx
::
op
::
mul
{},
img
,
scaled_tensor
);
auto
bias_vals
=
p
.
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
3
}},
{
0.01
,
0.02
,
0.03
}});
auto
bias_bcast
=
p
.
add_instruction
(
migraphx
::
op
::
broadcast
{
1
,
s
},
bias_vals
);
auto
bias_bcast
=
p
.
add_instruction
(
migraphx
::
op
::
broadcast
{
1
,
s
.
lens
()
},
bias_vals
);
p
.
add_instruction
(
migraphx
::
op
::
add
{},
img_scaled
,
bias_bcast
);
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
...
...
@@ -876,242 +877,6 @@ TEST_CASE(reshape_test)
}
}
template
<
class
T
>
void
gemm_test
()
{
migraphx
::
program
p
;
std
::
vector
<
T
>
a
=
{
-
0.00925222
,
0.56250403
,
0.70107397
,
0.75402161
,
-
0.505885
,
1.33628943
,
-
0.11413
,
-
0.31270559
,
1.59336732
,
-
0.19361027
,
-
0.91620867
,
0.40108416
,
-
0.06969921
,
0.68483471
,
-
0.39906632
,
-
1.66423624
,
0.69040076
,
-
1.31490171
,
-
0.11282616
,
-
0.79391814
};
std
::
vector
<
float
>
b
=
{
6.09568541e-01
,
-
6.10527007e-01
,
3.66646462e-01
,
1.18951101e-01
,
5.58777432e-01
,
-
3.21296298e-01
,
-
5.95997198e-01
,
-
5.01425721e-01
,
-
2.84606807e-01
,
-
5.73673557e-01
,
-
8.99430260e-01
,
-
4.25103093e-01
,
1.53027987e+00
,
-
3.81407415e-04
,
-
3.29650255e-01
};
std
::
vector
<
float
>
c
=
{
-
1.56327541e+00
,
-
7.09570140e-01
,
-
5.37424982e-01
,
-
2.22994831e-01
,
-
2.15586437e+00
,
2.09177941e-03
,
-
1.47279677e+00
,
2.02627040e-01
,
-
6.04527691e-01
,
-
1.29885596e+00
,
2.16294914e+00
,
-
1.48101497e-01
};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
get_type
<
T
>
{},
{
4
,
5
}};
auto
al
=
p
.
add_literal
(
migraphx
::
literal
{
a_shape
,
a
});
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
get_type
<
T
>
{},
{
5
,
3
}};
auto
bl
=
p
.
add_literal
(
migraphx
::
literal
{
b_shape
,
b
});
p
.
add_instruction
(
migraphx
::
op
::
dot
{},
al
,
bl
);
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
T
>
results_vector
(
12
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify_range
(
c
,
results_vector
));
}
TEST_CASE_REGISTER
(
gemm_test
<
float
>
)
TEST_CASE_REGISTER
(
gemm_test
<
double
>
)
template
<
class
T
>
void
gemm_test_ex
()
{
migraphx
::
program
p
;
std
::
vector
<
T
>
a
=
{
-
0.00925222
,
0.56250403
,
0.70107397
,
0.75402161
,
-
0.505885
,
1.33628943
,
-
0.11413
,
-
0.31270559
,
1.59336732
,
-
0.19361027
,
-
0.91620867
,
0.40108416
,
-
0.06969921
,
0.68483471
,
-
0.39906632
,
-
1.66423624
,
0.69040076
,
-
1.31490171
,
-
0.11282616
,
-
0.79391814
};
std
::
vector
<
float
>
b
=
{
6.09568541e-01
,
-
6.10527007e-01
,
3.66646462e-01
,
1.18951101e-01
,
5.58777432e-01
,
-
3.21296298e-01
,
-
5.95997198e-01
,
-
5.01425721e-01
,
-
2.84606807e-01
,
-
5.73673557e-01
,
-
8.99430260e-01
,
-
4.25103093e-01
,
1.53027987e+00
,
-
3.81407415e-04
,
-
3.29650255e-01
};
std
::
vector
<
float
>
c
=
{
-
1.56327541e+00
,
-
7.09570140e-01
,
-
5.37424982e-01
,
-
2.22994831e-01
,
-
2.15586437e+00
,
2.09177941e-03
,
-
1.47279677e+00
,
2.02627040e-01
,
-
6.04527691e-01
,
-
1.29885596e+00
,
2.16294914e+00
,
-
1.48101497e-01
};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
get_type
<
T
>
{},
{
1
,
1
,
4
,
5
}};
auto
al
=
p
.
add_literal
(
migraphx
::
literal
{
a_shape
,
a
});
migraphx
::
shape
b_shape
{
migraphx
::
shape
::
get_type
<
T
>
{},
{
1
,
1
,
5
,
3
}};
auto
bl
=
p
.
add_literal
(
migraphx
::
literal
{
b_shape
,
b
});
p
.
add_instruction
(
migraphx
::
op
::
dot
{},
al
,
bl
);
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
T
>
results_vector
(
12
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify_range
(
c
,
results_vector
));
}
TEST_CASE_REGISTER
(
gemm_test_ex
<
float
>
)
TEST_CASE_REGISTER
(
gemm_test_ex
<
double
>
)
TEST_CASE
(
gemm_mutli_dim_2
)
{
migraphx
::
program
p
;
std
::
vector
<
float
>
m1
=
{
-
0.76234141
,
0.01368910
,
-
0.86343423
,
-
0.99465282
,
0.76133268
,
0.96507140
,
-
0.55893585
,
0.02625652
,
0.75171776
,
0.23112578
,
0.25624787
,
-
1.50442161
};
migraphx
::
shape
m1_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
2
,
3
}};
std
::
vector
<
float
>
m2
=
{
-
0.15933632
,
-
0.69594712
,
-
0.06198966
,
-
1.23905184
,
-
0.83672704
,
-
1.06971832
,
-
0.12272917
,
1.07094116
,
-
0.08346820
,
1.16820693
,
-
0.95700874
,
0.24059691
,
0.43326023
,
0.78305235
,
-
0.53506601
,
-
0.69359678
,
-
0.26334436
,
1.56292796
,
-
0.33629175
,
-
1.72693469
,
0.41435494
,
1.52136843
,
-
0.40699791
,
-
1.59839430
};
migraphx
::
shape
m2_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
}};
auto
l1
=
p
.
add_literal
(
migraphx
::
literal
{
m1_shape
,
m1
});
auto
l2
=
p
.
add_literal
(
migraphx
::
literal
{
m2_shape
,
m2
});
p
.
add_instruction
(
migraphx
::
op
::
dot
{},
l1
,
l2
);
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
m
;
result
.
visit
([
&
](
auto
output
)
{
m
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
m_res
=
{
0.18208394
,
-
0.49276402
,
0.87189133
,
0.75150114
,
-
0.55909610
,
1.00521735
,
-
0.95536130
,
2.27996211
,
0.06239879
,
0.74700068
,
-
0.01570983
,
-
0.85920856
,
-
0.59070835
,
-
1.70729902
,
0.40245487
,
1.80182751
};
EXPECT
(
migraphx
::
verify_range
(
m
,
m_res
));
}
TEST_CASE
(
gemm_mutli_dim_2_3
)
{
migraphx
::
program
p
;
std
::
vector
<
float
>
m1
=
{
-
1.93300070
,
0.33902698
,
-
0.45173527
,
-
0.72283069
,
-
0.17177134
,
1.62199882
,
0.87052847
,
0.14989811
,
-
0.88969184
,
-
0.18131398
,
0.72654339
,
-
0.57123693
,
0.03852506
,
-
0.72332085
,
-
1.81844083
,
-
0.33465167
,
-
0.71400352
,
0.36883161
,
0.08698452
,
0.94974586
,
0.40087323
,
-
0.05448534
,
0.03220677
,
-
1.22494296
,
0.97938472
,
-
1.43714454
,
-
0.80430904
,
-
0.08098728
,
0.31520301
,
0.49642169
,
-
1.63471091
,
0.34390096
,
2.81292176
,
-
0.22666528
,
1.54559556
,
-
1.51075762
};
migraphx
::
shape
m1_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
2
,
3
}};
std
::
vector
<
float
>
m2
=
{
-
0.33170529
,
2.26325120
,
-
0.50639461
,
0.64802947
,
0.44748888
,
0.33768068
,
-
0.53621075
,
0.34341460
,
0.58742520
,
-
1.13995790
,
-
0.99322535
,
0.35447353
,
0.01977110
,
-
0.10155016
,
-
1.02288245
,
-
0.16575791
,
-
1.47870374
,
0.29300008
,
-
0.39112198
,
1.42303608
,
-
0.02853060
,
1.52610164
,
0.53540909
,
0.75618998
,
-
0.26877787
,
-
1.90886366
,
0.30622790
,
0.59794535
,
1.29795331
,
-
0.37805803
,
-
1.58167176
,
-
1.26966832
,
0.27435891
,
0.89430347
,
0.22854926
,
-
0.50317658
};
migraphx
::
shape
m2_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
3
,
2
}};
auto
l1
=
p
.
add_literal
(
migraphx
::
literal
{
m1_shape
,
m1
});
auto
l2
=
p
.
add_literal
(
migraphx
::
literal
{
m2_shape
,
m2
});
p
.
add_instruction
(
migraphx
::
op
::
dot
{},
l1
,
l2
);
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
m
;
result
.
visit
([
&
](
auto
output
)
{
m
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
m_res
=
{
0.26735861
,
-
4.30770895
,
1.05257728
,
-
1.19954265
,
0.50493170
,
-
0.18729756
,
1.09137941
,
-
1.09298312
,
3.42956915
,
-
0.41681939
,
0.17833257
,
0.26040336
,
0.15351280
,
1.87632715
,
-
0.63545406
,
-
0.95467340
,
-
1.74728628
,
-
2.42477030
,
0.76262372
,
0.15539164
,
3.32281958
,
0.96769613
,
0.43727545
,
2.43019906
};
EXPECT
(
migraphx
::
verify_range
(
m
,
m_res
));
}
TEST_CASE
(
gemm_mutli_dim1_2_3
)
{
migraphx
::
program
p
;
std
::
vector
<
float
>
m1
=
{
1.23636469
,
-
0.47041261
,
-
0.14375651
,
-
0.48371852
,
1.16479301
,
-
0.89361055
,
-
0.18569086
,
1.10700457
,
-
1.02632638
,
0.82277012
,
0.33525769
,
0.52825145
,
-
1.00141689
,
0.45510090
,
-
0.02675039
,
-
0.60454439
,
0.38551153
,
-
0.01658514
,
0.93059292
,
-
0.54595188
,
-
0.04911005
,
-
0.91397221
,
-
0.83127477
,
-
1.57685603
,
-
1.36200452
,
2.25822236
,
-
1.23416970
,
0.12312496
,
0.76232760
,
-
0.83594234
,
1.67418145
,
-
0.19412936
,
1.05261378
,
0.66246074
,
-
1.15233398
,
0.16429736
};
migraphx
::
shape
m1_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
2
,
3
}};
std
::
vector
<
float
>
m2
=
{
-
0.87300530
,
-
0.07112838
,
0.19196860
,
-
1.04986840
,
1.20348200
,
0.31966893
,
1.04805440
,
-
2.04777729
,
-
0.67906052
,
-
1.17250760
,
0.34305044
,
-
1.01957785
,
-
1.12694862
,
0.18431338
,
-
1.63712290
,
0.27566931
,
-
1.11282021
,
1.41738919
,
0.47871283
,
-
1.01980420
,
1.00212436
,
-
0.78740444
,
-
1.65636133
,
1.51466547
,
-
0.12470397
,
0.70404393
,
-
0.15244797
,
0.74288871
,
0.07339926
,
-
1.45811623
,
0.27185845
,
0.08804596
,
0.99061977
,
-
1.61752428
,
0.29191159
,
0.87271953
};
migraphx
::
shape
m2_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
3
,
2
}};
std
::
vector
<
float
>
m3
=
{
-
1.07692443
,
0.85223457
,
-
0.37266530
,
2.31511577
,
0.04227017
,
1.13229428
,
-
0.52769242
,
0.27307182
,
-
0.47779843
,
-
0.08023168
,
-
0.22862823
,
0.81489871
,
1.13139581
,
1.13860467
,
0.24309065
,
0.26533729
,
0.49106772
,
-
1.18860493
,
0.27842449
,
1.03568141
,
0.49759611
,
0.10021662
,
0.00592602
,
0.90862000
};
migraphx
::
shape
m3_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
2
,
2
}};
auto
l1
=
p
.
add_literal
(
migraphx
::
literal
{
m1_shape
,
m1
});
auto
l2
=
p
.
add_literal
(
migraphx
::
literal
{
m2_shape
,
m2
});
auto
l3
=
p
.
add_literal
(
migraphx
::
literal
{
m3_shape
,
m3
});
float
alpha
=
0.35
;
float
beta
=
0.41
;
auto
m12_alpha
=
p
.
add_instruction
(
migraphx
::
op
::
dot
{
alpha
,
beta
},
l1
,
l2
);
auto
l_beta
=
p
.
add_literal
(
beta
);
auto
b_beta
=
p
.
add_instruction
(
migraphx
::
op
::
scalar
{
m12_alpha
->
get_shape
()},
l_beta
);
auto
m3_beta
=
p
.
add_instruction
(
migraphx
::
op
::
mul
{},
b_beta
,
l3
);
p
.
add_instruction
(
migraphx
::
op
::
add
{},
m3_beta
,
m12_alpha
);
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
m
;
result
.
visit
([
&
](
auto
output
)
{
m
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
m_res
=
{
-
0.91147203
,
0.47540785
,
-
0.30313587
,
0.43325099
,
-
0.43711586
,
0.50928632
,
0.06919868
,
-
0.80382802
,
-
0.05125718
,
-
0.06685650
,
-
0.06972163
,
0.32407764
,
0.45677396
,
0.25909489
,
0.56911252
,
-
0.17183724
,
0.10858734
,
0.39406289
,
0.04662959
,
1.07979824
,
0.40355016
,
0.52410648
,
-
0.31728447
,
1.09550845
};
EXPECT
(
migraphx
::
verify_range
(
m
,
m_res
));
}
TEST_CASE
(
maxpool_test
)
{
migraphx
::
program
p
;
...
...
@@ -1164,6 +929,24 @@ TEST_CASE(maxpool_test)
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
c
));
}
TEST_CASE
(
softmax_simple_test
)
{
migraphx
::
program
p
;
std
::
vector
<
float
>
a
=
{
0.25
,
0.75
};
std
::
vector
<
float
>
s
=
{
0.377541
,
0.622459
};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
1
,
2
}};
auto
al
=
p
.
add_literal
(
migraphx
::
literal
{
a_shape
,
a
});
p
.
add_instruction
(
migraphx
::
op
::
softmax
{
1
},
al
);
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
2
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
for
(
auto
v
:
results_vector
)
std
::
cout
<<
v
<<
"
\t
"
;
std
::
cout
<<
std
::
endl
;
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
s
));
}
TEST_CASE
(
softmax_test
)
{
migraphx
::
program
p
;
...
...
@@ -1237,14 +1020,13 @@ TEST_CASE(logsoftmax_test_axis_0)
-
0.99628491
,
1.04314606
,
-
1.22943315
,
0.76930403
,
0.31106618
};
std
::
vector
<
float
>
s
=
{
-
2.71138556
,
-
5.85030702
,
-
3.74063578
,
-
4.22915517
,
-
6.15821977
,
-
5.96072346
,
-
3.57208097
,
-
5.78313166
,
-
5.51435497
,
-
3.67224195
,
-
3.88393048
,
-
2.57061599
,
-
5.54431083
,
-
6.27880025
,
-
5.1878749
,
-
6.1318955
,
-
5.29178545
,
-
4.22537886
,
-
3.75693516
,
-
7.07047099
,
-
4.45763333
,
-
4.66281846
,
-
6.18290503
,
-
4.11886536
,
-
6.17408292
,
-
4.18030052
,
-
4.64570814
,
-
4.64354473
,
-
3.06629525
,
-
3.80807681
,
-
4.69162374
,
-
5.53605222
,
-
3.20969275
,
-
4.82645674
,
-
6.63942356
,
-
4.73634471
,
-
3.86003866
,
-
5.32738981
,
-
4.22249802
,
-
4.51258693
,
-
2.41455206
,
-
3.48343199
,
-
5.86215889
,
-
4.93435935
,
-
4.83713408
,
-
2.97471885
,
-
2.16666459
,
-
3.69133151
,
-
4.71640968
,
-
5.64652924
,
-
3.60709827
,
-
5.87967748
,
-
3.8809403
,
-
4.33917815
};
-
0.135261
,
-
2.843968
,
-
0.659995
,
-
0.488413
,
-
1.051857
,
-
2.812936
,
-
0.250956
,
-
0.353985
,
-
1.155980
,
-
0.603651
,
-
0.211969
,
-
0.175371
,
-
1.336552
,
-
3.885010
,
-
1.871544
,
-
0.837083
,
-
0.887745
,
-
0.433338
,
-
1.158864
,
-
4.911197
,
-
1.147972
,
-
0.666711
,
-
0.996874
,
-
0.981418
,
-
0.851145
,
-
0.853988
,
-
0.858112
,
-
2.067420
,
-
0.059956
,
-
0.727436
,
-
0.950881
,
-
0.429689
,
-
0.061906
,
-
1.505332
,
-
1.210277
,
-
0.377970
,
-
0.791448
,
-
1.655428
,
-
1.827253
,
-
0.304828
,
-
0.020762
,
-
0.167101
,
-
0.567346
,
-
0.530319
,
-
1.045094
,
-
0.376648
,
-
0.007391
,
-
0.381670
,
-
0.720302
,
-
0.460499
,
-
0.469651
,
-
0.556740
,
-
0.554628
,
-
0.551582
};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
3
,
3
}};
auto
al
=
p
.
add_literal
(
migraphx
::
literal
{
a_shape
,
a
});
...
...
@@ -1271,14 +1053,13 @@ TEST_CASE(logsoftmax_test_axis_1)
-
0.99628491
,
1.04314606
,
-
1.22943315
,
0.76930403
,
0.31106618
};
std
::
vector
<
float
>
s
=
{
-
1.77931988
,
-
4.91824134
,
-
2.80857010
,
-
3.29708949
,
-
5.22615409
,
-
5.02865778
,
-
2.64001529
,
-
4.85106598
,
-
4.58228929
,
-
2.74017627
,
-
2.95186480
,
-
1.63855031
,
-
4.61224515
,
-
5.34673457
,
-
4.25580922
,
-
5.19982982
,
-
4.35971977
,
-
3.29331318
,
-
2.82486948
,
-
6.13840531
,
-
3.52556765
,
-
3.73075278
,
-
5.25083935
,
-
3.18679968
,
-
5.24201724
,
-
3.24823484
,
-
3.71364246
,
-
4.14309917
,
-
2.56584969
,
-
3.30763125
,
-
4.19117818
,
-
5.03560666
,
-
2.70924719
,
-
4.32601118
,
-
6.13897800
,
-
4.23589915
,
-
3.35959310
,
-
4.82694425
,
-
3.72205246
,
-
4.01214137
,
-
1.91410650
,
-
2.98298643
,
-
5.36171333
,
-
4.43391379
,
-
4.33668852
,
-
2.47427329
,
-
1.66621903
,
-
3.19088595
,
-
4.21596412
,
-
5.14608368
,
-
3.10665271
,
-
5.37923192
,
-
3.38049474
,
-
3.83873259
};
-
0.550468
,
-
2.132973
,
-
1.549746
,
-
0.650533
,
-
1.051529
,
-
2.248570
,
-
0.141017
,
-
2.028357
,
-
1.947730
,
-
1.511324
,
-
0.166597
,
-
0.379726
,
-
1.965689
,
-
1.172109
,
-
1.475721
,
-
2.700831
,
-
1.537011
,
-
0.658754
,
-
1.596017
,
-
3.353137
,
-
2.266743
,
-
1.084197
,
-
1.076214
,
-
0.406712
,
-
2.743019
,
-
0.425526
,
-
1.079083
,
-
2.139486
,
-
1.270584
,
-
1.024088
,
-
1.154231
,
-
3.201762
,
-
0.888957
,
-
0.532855
,
-
3.103583
,
-
1.221339
,
-
1.355980
,
-
3.531678
,
-
1.438510
,
-
0.975194
,
-
0.080261
,
-
1.162697
,
-
1.568557
,
-
1.398519
,
-
1.322129
,
-
0.470660
,
-
0.370953
,
-
0.907343
,
-
1.179017
,
-
3.312239
,
-
1.286363
,
-
1.586076
,
-
0.345100
,
-
0.824173
};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
3
,
3
}};
auto
al
=
p
.
add_literal
(
migraphx
::
literal
{
a_shape
,
a
});
...
...
@@ -1305,14 +1086,13 @@ TEST_CASE(logsoftmax_test_axis_2)
-
0.99628491
,
1.04314606
,
-
1.22943315
,
0.76930403
,
0.31106618
};
std
::
vector
<
float
>
s
=
{
-
0.79763715
,
-
3.93655861
,
-
1.82688737
,
-
2.31540676
,
-
4.24447136
,
-
4.04697505
,
-
1.65833256
,
-
3.86938325
,
-
3.60060656
,
-
1.81223672
,
-
2.02392525
,
-
0.71061076
,
-
3.68430560
,
-
4.41879502
,
-
3.32786967
,
-
4.27189027
,
-
3.43178022
,
-
2.36537363
,
-
1.35498658
,
-
4.66852241
,
-
2.05568475
,
-
2.26086988
,
-
3.78095645
,
-
1.71691678
,
-
3.77213434
,
-
1.77835194
,
-
2.24375956
,
-
2.74631770
,
-
1.16906822
,
-
1.91084978
,
-
2.79439671
,
-
3.63882519
,
-
1.31246572
,
-
2.92922971
,
-
4.74219653
,
-
2.83911768
,
-
2.19738500
,
-
3.66473615
,
-
2.55984436
,
-
2.84993327
,
-
0.75189840
,
-
1.82077833
,
-
4.19950523
,
-
3.27170569
,
-
3.17448042
,
-
1.65286841
,
-
0.84481415
,
-
2.36948107
,
-
3.39455924
,
-
4.32467880
,
-
2.28524783
,
-
4.55782704
,
-
2.55908986
,
-
3.01732771
};
-
0.495957
,
-
1.031212
,
-
0.245531
,
-
2.013726
,
-
1.339125
,
-
2.465619
,
-
1.356652
,
-
0.964037
,
-
2.019250
,
-
0.214522
,
-
0.289569
,
-
0.234392
,
-
2.086591
,
-
2.684439
,
-
2.851651
,
-
2.674176
,
-
1.697424
,
-
1.889155
,
-
0.401029
,
-
3.064586
,
-
1.173030
,
-
1.306912
,
-
2.177020
,
-
0.834262
,
-
2.818177
,
-
0.174415
,
-
1.361105
,
-
1.024571
,
-
0.106766
,
-
1.167645
,
-
1.072650
,
-
2.576522
,
-
0.569261
,
-
1.207483
,
-
3.679894
,
-
2.095913
,
-
0.504264
,
-
3.039291
,
-
1.290559
,
-
1.156812
,
-
0.126453
,
-
0.551493
,
-
2.506384
,
-
2.646261
,
-
1.905195
,
-
0.206994
,
-
0.191369
,
-
0.959754
,
-
1.948685
,
-
3.671233
,
-
0.875521
,
-
3.111952
,
-
1.905644
,
-
1.6076011
};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
3
,
3
}};
auto
al
=
p
.
add_literal
(
migraphx
::
literal
{
a_shape
,
a
});
...
...
@@ -1339,14 +1119,13 @@ TEST_CASE(logsoftmax_test_axis_3)
-
0.99628491
,
1.04314606
,
-
1.22943315
,
0.76930403
,
0.31106618
};
std
::
vector
<
float
>
s
=
{
-
0.33690375
,
-
3.47582521
,
-
1.36615397
,
-
0.27936556
,
-
2.20843016
,
-
2.01093385
,
-
0.22551114
,
-
2.43656183
,
-
2.16778514
,
-
1.57241522
,
-
1.78410375
,
-
0.47078926
,
-
1.06745881
,
-
1.80194823
,
-
0.71102288
,
-
2.30719726
,
-
1.46708721
,
-
0.40068062
,
-
0.42698261
,
-
3.74051844
,
-
1.12768078
,
-
1.07891856
,
-
2.59900513
,
-
0.53496546
,
-
2.56139951
,
-
0.56761711
,
-
1.03302473
,
-
2.09771276
,
-
0.52046328
,
-
1.26224484
,
-
1.76322959
,
-
2.60765807
,
-
0.28129860
,
-
0.81424303
,
-
2.62720985
,
-
0.72413100
,
-
0.65570381
,
-
2.12305496
,
-
1.01816317
,
-
2.48063402
,
-
0.38259915
,
-
1.45147908
,
-
1.84310238
,
-
0.91530284
,
-
0.81807757
,
-
1.31692881
,
-
0.50887455
,
-
2.03354147
,
-
1.48767160
,
-
2.41779116
,
-
0.37836019
,
-
2.56853147
,
-
0.56979429
,
-
1.02803214
};
-
0.336904
,
-
3.475825
,
-
1.366154
,
-
0.279366
,
-
2.208430
,
-
2.010934
,
-
0.225511
,
-
2.436562
,
-
2.167785
,
-
1.572415
,
-
1.784104
,
-
0.470789
,
-
1.067459
,
-
1.801948
,
-
0.711023
,
-
2.307197
,
-
1.467087
,
-
0.400681
,
-
0.426983
,
-
3.740518
,
-
1.127681
,
-
1.078919
,
-
2.599005
,
-
0.534965
,
-
2.561400
,
-
0.567617
,
-
1.033025
,
-
2.097713
,
-
0.520463
,
-
1.262245
,
-
1.763230
,
-
2.607658
,
-
0.281299
,
-
0.814243
,
-
2.627210
,
-
0.724131
,
-
0.655704
,
-
2.123055
,
-
1.018163
,
-
2.480634
,
-
0.382599
,
-
1.451479
,
-
1.843102
,
-
0.915303
,
-
0.818078
,
-
1.316929
,
-
0.508875
,
-
2.033541
,
-
1.487672
,
-
2.417791
,
-
0.378360
,
-
2.568531
,
-
0.569794
,
-
1.028032
};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
3
,
3
}};
auto
al
=
p
.
add_literal
(
migraphx
::
literal
{
a_shape
,
a
});
...
...
@@ -1359,40 +1138,6 @@ TEST_CASE(logsoftmax_test_axis_3)
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
s
));
}
TEST_CASE
(
logsoftmax_test_axis_4
)
{
migraphx
::
program
p
;
std
::
vector
<
float
>
a
=
{
1.93885877
,
-
1.20006269
,
0.90960855
,
0.42108916
,
-
1.50797544
,
-
1.31047913
,
1.07816336
,
-
1.13288733
,
-
0.86411064
,
0.97800238
,
0.76631385
,
2.07962834
,
-
0.8940665
,
-
1.62855592
,
-
0.53763057
,
-
1.48165117
,
-
0.64154112
,
0.42486547
,
0.89330917
,
-
2.42022666
,
0.192611
,
-
0.01257413
,
-
1.5326607
,
0.53137897
,
-
1.52383859
,
0.46994381
,
0.00453619
,
0.0066996
,
1.58394908
,
0.84216752
,
-
0.04137941
,
-
0.88580789
,
1.44055158
,
-
0.17621241
,
-
1.98917923
,
-
0.08610038
,
0.79020567
,
-
0.67714548
,
0.42774631
,
0.1376574
,
2.23569227
,
1.16681234
,
-
1.21191456
,
-
0.28411502
,
-
0.18688975
,
1.67552548
,
2.48357974
,
0.95891282
,
-
0.06616535
,
-
0.99628491
,
1.04314606
,
-
1.22943315
,
0.76930403
,
0.31106618
};
std
::
vector
<
float
>
s
=
{
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
3
,
3
}};
auto
al
=
p
.
add_literal
(
migraphx
::
literal
{
a_shape
,
a
});
int
axis
=
4
;
p
.
add_instruction
(
migraphx
::
op
::
logsoftmax
{
axis
},
al
);
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
;
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
s
));
}
TEST_CASE
(
conv2d_test
)
{
migraphx
::
program
p
;
...
...
@@ -1793,4 +1538,49 @@ TEST_CASE(fp16_test)
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
gold
));
}
TEST_CASE
(
fp32_fp16_test
)
{
auto
create_program
=
[]
{
migraphx
::
program
p
;
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
std
::
vector
<
float
>
data
(
2
*
3
);
std
::
iota
(
data
.
begin
(),
data
.
end
(),
1.0
f
);
auto
l1
=
p
.
add_literal
(
migraphx
::
literal
(
s
,
data
));
auto
l2
=
p
.
add_literal
(
migraphx
::
literal
(
s
,
data
));
p
.
add_instruction
(
migraphx
::
op
::
add
{},
l1
,
l2
);
return
p
;
};
auto
test_case
=
[
&
](
std
::
vector
<
std
::
string
>&&
op_names
)
{
std
::
vector
<
float
>
gold_res
=
{
2.0
,
4.0
,
6.0
,
8.0
,
10.0
,
12.0
};
auto
p
=
create_program
();
migraphx
::
quantize
(
p
,
op_names
);
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
res
;
result
.
visit
([
&
](
auto
output
)
{
res
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify_range
(
res
,
gold_res
));
};
test_case
({
"all"
});
test_case
({
"add"
});
}
TEST_CASE
(
clip_test
)
{
migraphx
::
program
p
;
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
3
}};
auto
l
=
p
.
add_literal
(
migraphx
::
literal
{
s
,
{
-
1.0
,
0.0
,
10.0
}});
migraphx
::
op
::
clip
op
;
op
.
max_val
=
6.0
;
op
.
min_val
=
0.0
;
p
.
add_instruction
(
op
,
l
);
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
=
{
0.0
,
0.0
,
6.0
};
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
gold
));
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/cpu_rnn_ops_test.cpp
View file @
33a41ba0
#include <iostream>
#include <vector>
#include <migraphx/literal.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/op/rnn.hpp>
#include <migraphx/op/gru.hpp>
#include <migraphx/op/lstm.hpp>
#include <migraphx/op/rnn_last_output.hpp>
#include <migraphx/op/rnn_last_cell_output.hpp>
#include <migraphx/op/abnormal_ops.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/cpu/target.hpp>
#include <migraphx/verify.hpp>
...
...
test/dead_code_elimination_test.cpp
View file @
33a41ba0
#include <migraphx/dead_code_elimination.hpp>
#include <basic_ops.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/op/abnormal_ops.hpp>
#include <migraphx/op/add.hpp>
#include <migraphx/op/identity.hpp>
#include <test.hpp>
struct
dce_target
...
...
@@ -129,4 +131,55 @@ TEST_CASE(undefined_test)
EXPECT
(
result
!=
migraphx
::
literal
{
4
});
}
TEST_CASE
(
duplicate_args1
)
{
migraphx
::
program
p
;
auto
l0
=
p
.
add_literal
(
0
);
auto
l3
=
p
.
add_literal
(
3
);
p
.
add_instruction
(
migraphx
::
op
::
add
{},
l3
,
l3
);
p
.
add_instruction
(
migraphx
::
op
::
identity
{},
l0
);
auto
count
=
std
::
distance
(
p
.
begin
(),
p
.
end
());
p
.
compile
(
dce_target
{});
EXPECT
(
std
::
distance
(
p
.
begin
(),
p
.
end
())
!=
count
);
EXPECT
(
std
::
distance
(
p
.
begin
(),
p
.
end
())
==
2
);
auto
result
=
p
.
eval
({});
EXPECT
(
result
==
migraphx
::
literal
{
0
});
}
TEST_CASE
(
duplicate_args2
)
{
migraphx
::
program
p
;
auto
l0
=
p
.
add_literal
(
0
);
auto
l3
=
p
.
add_literal
(
3
);
auto
sum1
=
p
.
add_instruction
(
migraphx
::
op
::
add
{},
l0
,
l3
);
p
.
add_instruction
(
migraphx
::
op
::
add
{},
sum1
,
l3
);
p
.
add_instruction
(
migraphx
::
op
::
identity
{},
l0
);
auto
count
=
std
::
distance
(
p
.
begin
(),
p
.
end
());
p
.
compile
(
dce_target
{});
EXPECT
(
std
::
distance
(
p
.
begin
(),
p
.
end
())
!=
count
);
EXPECT
(
std
::
distance
(
p
.
begin
(),
p
.
end
())
==
2
);
auto
result
=
p
.
eval
({});
EXPECT
(
result
==
migraphx
::
literal
{
0
});
}
TEST_CASE
(
duplicate_args3
)
{
migraphx
::
program
p
;
auto
l0
=
p
.
add_literal
(
0
);
auto
l3
=
p
.
add_literal
(
3
);
auto
sum1
=
p
.
add_instruction
(
migraphx
::
op
::
add
{},
l0
,
l3
);
auto
sum2
=
p
.
add_instruction
(
migraphx
::
op
::
add
{},
l0
,
sum1
);
p
.
add_instruction
(
migraphx
::
op
::
add
{},
sum2
,
l3
);
p
.
add_instruction
(
migraphx
::
op
::
identity
{},
l0
);
auto
count
=
std
::
distance
(
p
.
begin
(),
p
.
end
());
p
.
compile
(
dce_target
{});
EXPECT
(
std
::
distance
(
p
.
begin
(),
p
.
end
())
!=
count
);
EXPECT
(
std
::
distance
(
p
.
begin
(),
p
.
end
())
==
2
);
auto
result
=
p
.
eval
({});
EXPECT
(
result
==
migraphx
::
literal
{
0
});
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/eliminate_allocation_test.cpp
View file @
33a41ba0
#include <migraphx/eliminate_allocation.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/check_shapes.hpp>
#include <migraphx/argument.hpp>
#include <basic_ops.hpp>
#include <test.hpp>
...
...
@@ -19,6 +20,13 @@ struct eliminate_allocation_target
struct
allocate
{
migraphx
::
shape
s
{};
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
migraphx
::
pack
(
f
(
self
.
s
,
"shape"
));
}
std
::
string
name
()
const
{
return
"allocate"
;
}
migraphx
::
shape
compute_shape
(
const
std
::
vector
<
migraphx
::
shape
>&
inputs
)
const
{
...
...
test/eliminate_concat_test.cpp
View file @
33a41ba0
#include <migraphx/eliminate_concat.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/op/concat.hpp>
#include <migraphx/op/load.hpp>
#include <migraphx/op/identity.hpp>
#include <basic_ops.hpp>
#include <test.hpp>
...
...
@@ -8,6 +10,13 @@ struct concat
{
concat
(
std
::
size_t
axis
)
{
op
.
axis
=
axis
;
}
migraphx
::
op
::
concat
op
;
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
migraphx
::
reflect
(
self
.
op
,
f
);
}
std
::
string
name
()
const
{
return
"eliminate_concat::concat"
;
}
migraphx
::
shape
compute_shape
(
std
::
vector
<
migraphx
::
shape
>
inputs
)
const
{
...
...
@@ -49,6 +58,13 @@ struct eliminate_concat_target
struct
allocate
{
migraphx
::
shape
s
{};
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
migraphx
::
pack
(
f
(
self
.
s
,
"shape"
));
}
std
::
string
name
()
const
{
return
"allocate"
;
}
migraphx
::
shape
compute_shape
(
const
std
::
vector
<
migraphx
::
shape
>&
inputs
)
const
{
...
...
test/eliminate_contiguous_test.cpp
View file @
33a41ba0
#include <migraphx/eliminate_contiguous.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/op/identity.hpp>
#include <migraphx/op/dot.hpp>
#include <migraphx/op/sin.hpp>
#include <migraphx/op/slice.hpp>
#include <migraphx/op/transpose.hpp>
#include <migraphx/op/contiguous.hpp>
#include <basic_ops.hpp>
#include <test.hpp>
...
...
@@ -35,7 +40,46 @@ TEST_CASE(non_standard_op)
p
.
add_instruction
(
pass_op
{},
c
);
auto
count
=
std
::
distance
(
p
.
begin
(),
p
.
end
());
p
.
compile
(
eliminate_contiguous_target
{});
EXPECT
(
std
::
distance
(
p
.
begin
(),
p
.
end
())
==
count
);
}
TEST_CASE
(
transpose_gemm
)
{
migraphx
::
program
p
;
auto
l
=
p
.
add_literal
(
get_2x2
());
auto
t
=
p
.
add_instruction
(
migraphx
::
op
::
transpose
{{
1
,
0
}},
l
);
auto
c
=
p
.
add_instruction
(
migraphx
::
op
::
contiguous
{},
t
);
auto
ic
=
p
.
add_instruction
(
migraphx
::
op
::
identity
{},
c
);
p
.
add_instruction
(
migraphx
::
op
::
dot
{},
ic
,
l
);
auto
count
=
std
::
distance
(
p
.
begin
(),
p
.
end
());
p
.
compile
(
eliminate_contiguous_target
{});
EXPECT
(
std
::
distance
(
p
.
begin
(),
p
.
end
())
==
(
count
-
1
));
}
TEST_CASE
(
transpose_standard_op
)
{
migraphx
::
program
p
;
auto
l
=
p
.
add_literal
(
get_2x2
());
auto
t
=
p
.
add_instruction
(
migraphx
::
op
::
transpose
{{
1
,
0
}},
l
);
auto
c
=
p
.
add_instruction
(
migraphx
::
op
::
contiguous
{},
t
);
auto
sn
=
p
.
add_instruction
(
migraphx
::
op
::
sin
{},
c
);
p
.
add_instruction
(
pass_standard_op
{},
sn
);
auto
count
=
std
::
distance
(
p
.
begin
(),
p
.
end
());
p
.
compile
(
eliminate_contiguous_target
{});
EXPECT
(
std
::
distance
(
p
.
begin
(),
p
.
end
())
==
count
);
}
TEST_CASE
(
no_packed_unary_op
)
{
migraphx
::
program
p
;
auto
l
=
p
.
add_literal
(
get_2x2
());
auto
t
=
p
.
add_instruction
(
migraphx
::
op
::
slice
{{
1
},
{
1
},
{
2
}},
l
);
auto
c
=
p
.
add_instruction
(
migraphx
::
op
::
contiguous
{},
t
);
auto
sn
=
p
.
add_instruction
(
migraphx
::
op
::
sin
{},
c
);
p
.
add_instruction
(
pass_standard_op
{},
sn
);
auto
count
=
std
::
distance
(
p
.
begin
(),
p
.
end
());
p
.
compile
(
eliminate_contiguous_target
{});
EXPECT
(
std
::
distance
(
p
.
begin
(),
p
.
end
())
==
count
-
1
);
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/eliminate_identity_test.cpp
0 → 100644
View file @
33a41ba0
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/eliminate_identity.hpp>
#include <migraphx/instruction.hpp>
#include <basic_ops.hpp>
#include <migraphx/op/identity.hpp>
#include <test.hpp>
struct
eliminate_identity_target
{
std
::
string
name
()
const
{
return
"eliminate_identity"
;
}
std
::
vector
<
migraphx
::
pass
>
get_passes
(
migraphx
::
context
&
)
const
{
return
{
migraphx
::
eliminate_identity
{}};
}
migraphx
::
context
get_context
()
const
{
return
{};
}
};
TEST_CASE
(
simple_test
)
{
migraphx
::
program
p
;
auto
one
=
p
.
add_literal
(
1
);
auto
one_identity
=
p
.
add_instruction
(
migraphx
::
op
::
identity
{},
one
);
auto
two
=
p
.
add_literal
(
2
);
auto
two_identity
=
p
.
add_instruction
(
migraphx
::
op
::
identity
{},
two
);
p
.
add_instruction
(
sum_op
{},
one_identity
,
two_identity
);
p
.
compile
(
eliminate_identity_target
{});
EXPECT
(
std
::
none_of
(
p
.
begin
(),
p
.
end
(),
[](
const
migraphx
::
instruction
&
ins
)
{
return
ins
.
name
()
==
"identity"
;
}));
auto
result
=
p
.
eval
({});
EXPECT
(
result
==
migraphx
::
literal
{
3
});
}
TEST_CASE
(
simple_test_end
)
{
migraphx
::
program
p
;
auto
one
=
p
.
add_literal
(
1
);
auto
two
=
p
.
add_literal
(
2
);
auto
ans
=
p
.
add_instruction
(
sum_op
{},
one
,
two
);
p
.
add_instruction
(
migraphx
::
op
::
identity
{},
ans
);
p
.
compile
(
eliminate_identity_target
{});
EXPECT
(
std
::
none_of
(
p
.
begin
(),
p
.
end
(),
[](
const
migraphx
::
instruction
&
ins
)
{
return
ins
.
name
()
==
"identity"
;
}));
auto
result
=
p
.
eval
({});
EXPECT
(
result
==
migraphx
::
literal
{
3
});
}
TEST_CASE
(
simple_test_end_dependency
)
{
migraphx
::
program
p
;
auto
one
=
p
.
add_literal
(
1.0
);
auto
two
=
p
.
add_literal
(
2.0
);
auto
three
=
p
.
add_literal
(
3.0
);
auto
ans
=
p
.
add_instruction
(
sum_op
{},
one
,
two
);
p
.
add_instruction
(
sum_op
{},
ans
,
three
);
p
.
add_instruction
(
migraphx
::
op
::
identity
{},
ans
);
p
.
compile
(
eliminate_identity_target
{});
EXPECT
(
std
::
any_of
(
p
.
begin
(),
p
.
end
(),
[](
const
migraphx
::
instruction
&
ins
)
{
return
ins
.
name
()
==
"identity"
;
}));
auto
result
=
p
.
eval
({});
EXPECT
(
result
==
migraphx
::
literal
{
3.0
});
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/eliminate_pad_test.cpp
0 → 100644
View file @
33a41ba0
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/eliminate_pad.hpp>
#include <migraphx/instruction.hpp>
#include <basic_ops.hpp>
#include <migraphx/operators.hpp>
#include <test.hpp>
struct
eliminate_pad_target
{
std
::
string
name
()
const
{
return
"eliminate_pad"
;
}
std
::
vector
<
migraphx
::
pass
>
get_passes
(
migraphx
::
context
&
)
const
{
return
{
migraphx
::
eliminate_pad
{},
migraphx
::
dead_code_elimination
{}};
}
migraphx
::
context
get_context
()
const
{
return
{};
}
};
migraphx
::
instruction_ref
create_im2col
(
migraphx
::
instruction_ref
&
l_img
,
size_t
channels
,
migraphx
::
program
&
p
)
{
size_t
f
[
2
]
=
{
1
,
1
};
std
::
vector
<
int32_t
>
weights
(
channels
*
f
[
0
]
*
f
[
1
]);
migraphx
::
shape
s_weights
{
migraphx
::
shape
::
int32_type
,
{
1
,
channels
,
f
[
0
],
f
[
1
]}};
auto
l_weights
=
p
.
add_literal
(
migraphx
::
literal
{
s_weights
,
weights
});
return
p
.
add_instruction
(
migraphx
::
op
::
im2col
{},
l_img
,
l_weights
);
}
migraphx
::
instruction_ref
create_conv
(
migraphx
::
instruction_ref
&
l_img
,
size_t
channels
,
migraphx
::
program
&
p
,
migraphx
::
op
::
padding_mode_t
padding_mode
=
migraphx
::
op
::
padding_mode_t
::
default_
)
{
migraphx
::
shape
s_weights
{
migraphx
::
shape
::
int32_type
,
{
4
,
channels
,
3
,
3
}};
std
::
vector
<
int32_t
>
weights
(
4
*
channels
*
3
*
3
);
auto
l_weights
=
p
.
add_literal
(
migraphx
::
literal
{
s_weights
,
weights
});
migraphx
::
op
::
convolution
op
;
op
.
padding_mode
=
padding_mode
;
return
p
.
add_instruction
(
op
,
l_img
,
l_weights
);
}
TEST_CASE
(
rewrite_test
)
{
migraphx
::
program
p
;
size_t
img_dim
[
2
]
=
{
2
,
2
};
size_t
channels
=
1
;
std
::
vector
<
int32_t
>
input
(
channels
*
img_dim
[
0
]
*
img_dim
[
1
]);
std
::
iota
(
input
.
begin
(),
input
.
end
(),
0
);
migraphx
::
shape
s_img
{
migraphx
::
shape
::
int32_type
,
{
1
,
channels
,
img_dim
[
0
],
img_dim
[
1
]}};
auto
l_img
=
p
.
add_literal
(
migraphx
::
literal
{
s_img
,
input
});
auto
padded_img
=
p
.
add_instruction
(
migraphx
::
op
::
pad
{{
0
,
0
,
1
,
1
,
0
,
0
,
1
,
1
}},
l_img
);
auto
l0
=
create_im2col
(
padded_img
,
channels
,
p
);
auto
l1
=
create_conv
(
padded_img
,
channels
,
p
);
auto
l2
=
p
.
add_instruction
(
migraphx
::
op
::
pooling
{},
padded_img
);
p
.
add_instruction
(
migraphx
::
op
::
identity
{},
l0
,
l1
,
l2
);
p
.
compile
(
eliminate_pad_target
{});
EXPECT
(
std
::
none_of
(
p
.
begin
(),
p
.
end
(),
[](
const
migraphx
::
instruction
&
ins
)
{
return
ins
.
name
()
==
"pad"
;
}));
}
TEST_CASE
(
rewrite_test_asymmetric
)
{
migraphx
::
program
p
;
size_t
img_dim
[
2
]
=
{
2
,
2
};
size_t
channels
=
1
;
std
::
vector
<
int32_t
>
input
(
channels
*
img_dim
[
0
]
*
img_dim
[
1
]);
std
::
iota
(
input
.
begin
(),
input
.
end
(),
0
);
migraphx
::
shape
s_img
{
migraphx
::
shape
::
int32_type
,
{
1
,
channels
,
img_dim
[
0
],
img_dim
[
1
]}};
auto
l_img
=
p
.
add_literal
(
migraphx
::
literal
{
s_img
,
input
});
auto
padded_img
=
p
.
add_instruction
(
migraphx
::
op
::
pad
{{
0
,
0
,
0
,
0
,
0
,
0
,
2
,
2
}},
l_img
);
create_im2col
(
padded_img
,
channels
,
p
);
p
.
compile
(
eliminate_pad_target
{});
EXPECT
(
std
::
any_of
(
p
.
begin
(),
p
.
end
(),
[](
const
migraphx
::
instruction
&
ins
)
{
return
ins
.
name
()
==
"pad"
;
}));
}
TEST_CASE
(
rewrite_test_same_padding
)
{
migraphx
::
program
p
;
size_t
img_dim
[
2
]
=
{
2
,
2
};
size_t
channels
=
1
;
std
::
vector
<
int32_t
>
input
(
channels
*
img_dim
[
0
]
*
img_dim
[
1
]);
std
::
iota
(
input
.
begin
(),
input
.
end
(),
0
);
migraphx
::
shape
s_img
{
migraphx
::
shape
::
int32_type
,
{
1
,
channels
,
img_dim
[
0
],
img_dim
[
1
]}};
auto
l_img
=
p
.
add_literal
(
migraphx
::
literal
{
s_img
,
input
});
auto
padded_img
=
p
.
add_instruction
(
migraphx
::
op
::
pad
{{
0
,
0
,
1
,
1
,
0
,
0
,
1
,
1
}},
l_img
);
create_conv
(
padded_img
,
channels
,
p
,
migraphx
::
op
::
padding_mode_t
::
same
);
p
.
compile
(
eliminate_pad_target
{});
EXPECT
(
std
::
any_of
(
p
.
begin
(),
p
.
end
(),
[](
const
migraphx
::
instruction
&
ins
)
{
return
ins
.
name
()
==
"pad"
;
}));
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/fwd_conv_batchnorm_rewrite_test.cpp
View file @
33a41ba0
#include <migraphx/fwd_conv_batchnorm_rewrite.hpp>
#include <migraphx/program.hpp>
#include <migraphx/cpu/target.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/op/convolution.hpp>
#include <migraphx/op/reshape.hpp>
#include <migraphx/op/batch_norm.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/ranges.hpp>
#include <test.hpp>
#include <migraphx/verify.hpp>
bool
is_batch_norm
(
migraphx
::
instruction
&
ins
)
{
return
ins
.
name
()
==
"batch_norm_inference"
;
}
TEST_CASE
(
fwd_conv_batchnorm_rewrite_test
)
{
std
::
vector
<
float
>
xdata
=
{
...
...
@@ -65,4 +71,105 @@ TEST_CASE(fwd_conv_batchnorm_rewrite_test)
EXPECT
(
migraphx
::
verify_range
(
results_vector1
,
results_vector2
));
}
TEST_CASE
(
non_literal
)
{
migraphx
::
shape
xs
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
8
,
8
}};
migraphx
::
shape
ws
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
1
,
1
}};
migraphx
::
shape
vars
{
migraphx
::
shape
::
float_type
,
{
4
}};
auto
create_program
=
[
&
]()
{
migraphx
::
program
p
;
auto
x
=
p
.
add_parameter
(
"x"
,
xs
);
auto
w
=
p
.
add_parameter
(
"w"
,
ws
);
auto
conv
=
p
.
add_instruction
(
migraphx
::
op
::
convolution
{},
x
,
w
);
auto
scale
=
p
.
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
1
)));
auto
bias
=
p
.
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
2
)));
auto
mean
=
p
.
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
3
)));
auto
variance
=
p
.
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
4
)));
p
.
add_instruction
(
migraphx
::
op
::
batch_norm_inference
{},
conv
,
scale
,
bias
,
mean
,
variance
);
return
p
;
};
migraphx
::
program
p1
=
create_program
();
migraphx
::
program
p2
=
create_program
();
migraphx
::
fwd_conv_batchnorm_rewrite
opt
;
opt
.
apply
(
p2
);
EXPECT
(
any_of
(
p1
,
&
is_batch_norm
));
EXPECT
(
any_of
(
p2
,
&
is_batch_norm
));
}
TEST_CASE
(
as_literal
)
{
migraphx
::
shape
xs
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
8
,
8
}};
migraphx
::
shape
ws
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
1
,
1
}};
migraphx
::
shape
vars
{
migraphx
::
shape
::
float_type
,
{
4
}};
auto
create_program
=
[
&
]()
{
migraphx
::
program
p
;
auto
x
=
p
.
add_literal
(
migraphx
::
generate_literal
(
xs
,
1
));
auto
w
=
p
.
add_literal
(
migraphx
::
generate_literal
(
ws
,
1
));
auto
conv
=
p
.
add_instruction
(
migraphx
::
op
::
convolution
{},
x
,
w
);
auto
scale
=
p
.
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
1
)));
auto
bias
=
p
.
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
2
)));
auto
mean
=
p
.
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
3
)));
auto
variance
=
p
.
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
4
)));
p
.
add_instruction
(
migraphx
::
op
::
batch_norm_inference
{},
conv
,
scale
,
bias
,
mean
,
variance
);
return
p
;
};
migraphx
::
program
p1
=
create_program
();
migraphx
::
program
p2
=
create_program
();
migraphx
::
fwd_conv_batchnorm_rewrite
opt
;
opt
.
apply
(
p2
);
EXPECT
(
any_of
(
p1
,
&
is_batch_norm
));
EXPECT
(
none_of
(
p2
,
&
is_batch_norm
));
p1
.
compile
(
migraphx
::
cpu
::
target
{});
p2
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result1
=
p1
.
eval
({});
auto
result2
=
p2
.
eval
({});
visit_all
(
result1
,
result2
)([
&
](
auto
r1
,
auto
r2
)
{
EXPECT
(
migraphx
::
verify_range
(
r1
,
r2
));
});
}
TEST_CASE
(
literal_reshape
)
{
migraphx
::
shape
xs
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
8
,
8
}};
migraphx
::
shape
ws
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
1
,
1
}};
migraphx
::
shape
vars
{
migraphx
::
shape
::
float_type
,
{
4
}};
auto
create_program
=
[
&
]()
{
migraphx
::
program
p
;
auto
reshape
=
[
&
](
auto
ins
)
{
return
p
.
add_instruction
(
migraphx
::
op
::
reshape
{{
1
,
4
,
1
,
1
}},
ins
);
};
auto
x
=
p
.
add_literal
(
migraphx
::
generate_literal
(
xs
,
1
));
auto
w
=
p
.
add_literal
(
migraphx
::
generate_literal
(
ws
,
1
));
auto
conv
=
p
.
add_instruction
(
migraphx
::
op
::
convolution
{},
x
,
w
);
auto
scale
=
reshape
(
p
.
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
1
))));
auto
bias
=
reshape
(
p
.
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
2
))));
auto
mean
=
reshape
(
p
.
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
3
))));
auto
variance
=
reshape
(
p
.
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
4
))));
p
.
add_instruction
(
migraphx
::
op
::
batch_norm_inference
{},
conv
,
scale
,
bias
,
mean
,
variance
);
return
p
;
};
migraphx
::
program
p1
=
create_program
();
migraphx
::
program
p2
=
create_program
();
migraphx
::
fwd_conv_batchnorm_rewrite
opt
;
opt
.
apply
(
p2
);
EXPECT
(
any_of
(
p1
,
&
is_batch_norm
));
EXPECT
(
none_of
(
p2
,
&
is_batch_norm
));
p1
.
compile
(
migraphx
::
cpu
::
target
{});
p2
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result1
=
p1
.
eval
({});
auto
result2
=
p2
.
eval
({});
visit_all
(
result1
,
result2
)([
&
](
auto
r1
,
auto
r2
)
{
EXPECT
(
migraphx
::
verify_range
(
r1
,
r2
));
});
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/gpu/adjust_allocation.cpp
0 → 100644
View file @
33a41ba0
#include <migraphx/gpu/adjust_allocation.hpp>
#include <migraphx/gpu/target.hpp>
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/auto_contiguous.hpp>
#include <migraphx/eliminate_contiguous.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/op/add.hpp>
#include <migraphx/op/transpose.hpp>
#include <migraphx/op/contiguous.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/op/tanh.hpp>
#include <basic_ops.hpp>
#include <test.hpp>
struct
lowering_target
{
std
::
string
name
()
const
{
return
"gpu::lowering"
;
}
std
::
vector
<
migraphx
::
pass
>
get_passes
(
migraphx
::
context
&
gctx
)
const
{
auto
&
ctx
=
migraphx
::
any_cast
<
migraphx
::
gpu
::
context
>
(
gctx
);
return
{
migraphx
::
auto_contiguous
{},
migraphx
::
gpu
::
lowering
{
ctx
},
migraphx
::
dead_code_elimination
{},
migraphx
::
eliminate_contiguous
{},
migraphx
::
dead_code_elimination
{}};
}
migraphx
::
gpu
::
context
get_context
()
const
{
return
migraphx
::
gpu
::
context
{};
}
};
TEST_CASE
(
tanh_shape
)
{
auto
create_program
=
[]
{
migraphx
::
program
p
;
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
auto
x
=
p
.
add_parameter
(
"x"
,
s
);
auto
tx
=
p
.
add_instruction
(
migraphx
::
op
::
transpose
{{
1
,
0
}},
x
);
auto
txh
=
p
.
add_instruction
(
migraphx
::
op
::
tanh
{},
tx
);
auto
sum
=
p
.
add_instruction
(
migraphx
::
op
::
add
{},
txh
,
txh
);
p
.
add_instruction
(
migraphx
::
op
::
contiguous
{},
sum
);
return
p
;
};
auto
p1
=
create_program
();
auto
p2
=
create_program
();
EXPECT
(
p1
==
p2
);
p1
.
compile
(
lowering_target
{});
p2
.
compile
(
lowering_target
());
EXPECT
(
p1
==
p2
);
for
(
auto
ins
:
iterator_for
(
p1
))
{
if
(
ins
->
name
()
==
"hip::allocate"
)
{
migraphx
::
shape
new_s
{
migraphx
::
shape
::
float_type
,
{
3
,
2
},
{
1
,
3
}};
ins
->
replace
(
migraphx
::
gpu
::
hip_allocate
{
new_s
});
}
}
EXPECT
(
p1
!=
p2
);
migraphx
::
run_passes
(
p2
,
{
migraphx
::
gpu
::
adjust_allocation
{},
migraphx
::
dead_code_elimination
{}});
EXPECT
(
p1
==
p2
);
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/gpu/miopen.cpp
View file @
33a41ba0
...
...
@@ -10,6 +10,7 @@
#include <migraphx/type_name.hpp>
#include <migraphx/verify_args.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/quantization.hpp>
#include <miopen/miopen.h>
...
...
@@ -236,8 +237,7 @@ struct test_exp : verify_program<test_exp>
{
migraphx
::
program
p
;
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
6
}};
std
::
vector
<
float
>
data
{
0.1
f
,
0.2
f
,
1.
f
,
2.
f
,
0.6
f
,
10.
f
};
auto
x
=
p
.
add_literal
(
s
,
data
);
auto
x
=
p
.
add_instruction
(
migraphx
::
op
::
abs
{},
p
.
add_parameter
(
"x"
,
s
));
p
.
add_instruction
(
migraphx
::
op
::
exp
{},
x
);
return
p
;
}
...
...
@@ -249,8 +249,7 @@ struct test_log : verify_program<test_log>
{
migraphx
::
program
p
;
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
6
}};
std
::
vector
<
float
>
data
{
0.1
f
,
0.2
f
,
1.
f
,
2.
f
,
0.6
f
,
100.
f
};
auto
x
=
p
.
add_literal
(
s
,
data
);
auto
x
=
p
.
add_instruction
(
migraphx
::
op
::
abs
{},
p
.
add_parameter
(
"x"
,
s
));
p
.
add_instruction
(
migraphx
::
op
::
log
{},
x
);
return
p
;
}
...
...
@@ -327,6 +326,34 @@ struct test_tanh : verify_program<test_tanh>
}
};
struct
test_trans_tanh
:
verify_program
<
test_trans_tanh
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
x
=
p
.
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}});
auto
tx
=
p
.
add_instruction
(
migraphx
::
op
::
transpose
{{
0
,
1
,
3
,
2
}},
x
);
auto
tanhx
=
p
.
add_instruction
(
migraphx
::
op
::
tanh
{},
tx
);
auto
r
=
p
.
add_instruction
(
migraphx
::
op
::
add
{},
tanhx
,
tanhx
);
p
.
add_instruction
(
migraphx
::
op
::
contiguous
{},
r
);
return
p
;
}
};
struct
test_slice_sin
:
verify_program
<
test_slice_sin
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
l
=
p
.
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
2
,
2
}});
auto
t
=
p
.
add_instruction
(
migraphx
::
op
::
slice
{{
1
},
{
1
},
{
2
}},
l
);
p
.
add_instruction
(
migraphx
::
op
::
sin
{},
t
);
return
p
;
}
};
struct
test_asin
:
verify_program
<
test_asin
>
{
migraphx
::
program
create_program
()
const
...
...
@@ -371,7 +398,7 @@ struct test_scale : verify_program<test_scale>
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
3
}};
auto
x
=
p
.
add_parameter
(
"x"
,
s
);
auto
y
=
p
.
add_parameter
(
"y"
,
migraphx
::
shape
::
float_type
);
auto
scale
=
p
.
add_instruction
(
migraphx
::
op
::
scalar
{
s
},
y
);
auto
scale
=
p
.
add_instruction
(
migraphx
::
op
::
scalar
{
s
.
lens
()
},
y
);
p
.
add_instruction
(
migraphx
::
op
::
mul
{},
x
,
scale
);
return
p
;
}
...
...
@@ -417,7 +444,7 @@ struct test_triadd2 : verify_program<test_triadd2>
auto
x
=
p
.
add_parameter
(
"x"
,
s
);
auto
y
=
p
.
add_parameter
(
"y"
,
s
);
auto
z
=
p
.
add_parameter
(
"z"
,
b
);
auto
zb
=
p
.
add_instruction
(
migraphx
::
op
::
broadcast
{
1
,
s
},
z
);
auto
zb
=
p
.
add_instruction
(
migraphx
::
op
::
broadcast
{
1
,
s
.
lens
()
},
z
);
auto
sum
=
p
.
add_instruction
(
migraphx
::
op
::
add
{},
x
,
y
);
p
.
add_instruction
(
migraphx
::
op
::
add
{},
sum
,
zb
);
return
p
;
...
...
@@ -432,7 +459,7 @@ struct test_add_broadcast : verify_program<test_add_broadcast>
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
3
}};
auto
x
=
p
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
2
,
2
,
3
}});
auto
y
=
p
.
add_parameter
(
"y"
,
{
migraphx
::
shape
::
float_type
,
{
2
,
2
}});
auto
by
=
p
.
add_instruction
(
migraphx
::
op
::
broadcast
{
0
,
x
->
get_shape
()},
y
);
auto
by
=
p
.
add_instruction
(
migraphx
::
op
::
broadcast
{
0
,
x
->
get_shape
()
.
lens
()
},
y
);
p
.
add_instruction
(
migraphx
::
op
::
add
{},
x
,
by
);
return
p
;
}
...
...
@@ -446,7 +473,7 @@ struct test_add_broadcast2 : verify_program<test_add_broadcast2>
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
3
}};
auto
x
=
p
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
}});
auto
y
=
p
.
add_parameter
(
"y"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
by
=
p
.
add_instruction
(
migraphx
::
op
::
broadcast
{
1
,
x
->
get_shape
()},
y
);
auto
by
=
p
.
add_instruction
(
migraphx
::
op
::
broadcast
{
1
,
x
->
get_shape
()
.
lens
()
},
y
);
p
.
add_instruction
(
migraphx
::
op
::
add
{},
x
,
by
);
return
p
;
}
...
...
@@ -460,7 +487,7 @@ struct test_add_broadcast3 : verify_program<test_add_broadcast3>
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
3
}};
auto
x
=
p
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
2
,
4
,
5
}});
auto
y
=
p
.
add_parameter
(
"y"
,
{
migraphx
::
shape
::
float_type
,
{
4
}});
auto
by
=
p
.
add_instruction
(
migraphx
::
op
::
broadcast
{
1
,
x
->
get_shape
()},
y
);
auto
by
=
p
.
add_instruction
(
migraphx
::
op
::
broadcast
{
1
,
x
->
get_shape
()
.
lens
()
},
y
);
p
.
add_instruction
(
migraphx
::
op
::
add
{},
x
,
by
);
return
p
;
}
...
...
@@ -474,7 +501,7 @@ struct test_add_broadcast4 : verify_program<test_add_broadcast4>
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
3
}};
auto
x
=
p
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
5
}});
auto
y
=
p
.
add_parameter
(
"y"
,
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
by
=
p
.
add_instruction
(
migraphx
::
op
::
broadcast
{
1
,
x
->
get_shape
()},
y
);
auto
by
=
p
.
add_instruction
(
migraphx
::
op
::
broadcast
{
1
,
x
->
get_shape
()
.
lens
()
},
y
);
p
.
add_instruction
(
migraphx
::
op
::
add
{},
x
,
by
);
return
p
;
}
...
...
@@ -488,7 +515,7 @@ struct test_add_broadcast5 : verify_program<test_add_broadcast5>
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
3
}};
auto
x
=
p
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
2
,
4
,
8
}});
auto
y
=
p
.
add_parameter
(
"y"
,
{
migraphx
::
shape
::
float_type
,
{
4
}});
auto
by
=
p
.
add_instruction
(
migraphx
::
op
::
broadcast
{
1
,
x
->
get_shape
()},
y
);
auto
by
=
p
.
add_instruction
(
migraphx
::
op
::
broadcast
{
1
,
x
->
get_shape
()
.
lens
()
},
y
);
p
.
add_instruction
(
migraphx
::
op
::
add
{},
x
,
by
);
return
p
;
}
...
...
@@ -503,7 +530,7 @@ struct test_triadd_broadcast : verify_program<test_triadd_broadcast>
auto
x
=
p
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
2
,
2
,
3
}});
auto
y
=
p
.
add_parameter
(
"y"
,
{
migraphx
::
shape
::
float_type
,
{
2
,
2
}});
auto
z
=
p
.
add_parameter
(
"z"
,
{
migraphx
::
shape
::
float_type
,
{
2
,
2
,
3
}});
auto
by
=
p
.
add_instruction
(
migraphx
::
op
::
broadcast
{
0
,
x
->
get_shape
()},
y
);
auto
by
=
p
.
add_instruction
(
migraphx
::
op
::
broadcast
{
0
,
x
->
get_shape
()
.
lens
()
},
y
);
auto
sum
=
p
.
add_instruction
(
migraphx
::
op
::
add
{},
x
,
by
);
p
.
add_instruction
(
migraphx
::
op
::
add
{},
sum
,
z
);
return
p
;
...
...
@@ -535,20 +562,20 @@ struct test_sub2 : verify_program<test_sub2>
auto
x
=
p
.
add_parameter
(
"x"
,
s
);
auto
y
=
p
.
add_parameter
(
"y"
,
s
);
auto
z
=
p
.
add_parameter
(
"z"
,
b
);
auto
zb
=
p
.
add_instruction
(
migraphx
::
op
::
broadcast
{
1
,
s
},
z
);
auto
zb
=
p
.
add_instruction
(
migraphx
::
op
::
broadcast
{
1
,
s
.
lens
()
},
z
);
auto
diff
=
p
.
add_instruction
(
migraphx
::
op
::
sub
{},
x
,
y
);
p
.
add_instruction
(
migraphx
::
op
::
sub
{},
diff
,
zb
);
return
p
;
}
};
struct
test_softmax
:
verify_program
<
test_softmax
>
struct
test_softmax
1
:
verify_program
<
test_softmax
1
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
x
=
p
.
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
5
,
3
,
4
,
2
}});
p
.
add_instruction
(
migraphx
::
op
::
softmax
{},
x
);
auto
x
=
p
.
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
5
,
3
,
3
,
4
}});
p
.
add_instruction
(
migraphx
::
op
::
softmax
{
0
},
x
);
return
p
;
}
};
...
...
@@ -565,6 +592,25 @@ struct test_softmax2 : verify_program<test_softmax2>
}
};
template
<
int
Axis
>
struct
test_softmax
:
verify_program
<
test_softmax
<
Axis
>>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
5
,
6
}};
auto
param
=
p
.
add_parameter
(
"0"
,
s
);
p
.
add_instruction
(
migraphx
::
op
::
softmax
{
Axis
},
param
);
return
p
;
}
};
template
struct
test_softmax
<
0
>;
template
struct
test_softmax
<
1
>;
template
struct
test_softmax
<
2
>;
template
struct
test_softmax
<
3
>;
struct
test_conv
:
verify_program
<
test_conv
>
{
migraphx
::
program
create_program
()
const
...
...
@@ -674,6 +720,21 @@ struct test_abs : verify_program<test_abs>
}
};
struct
test_trans_abs
:
verify_program
<
test_trans_abs
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
x
=
p
.
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}});
auto
tx
=
p
.
add_instruction
(
migraphx
::
op
::
transpose
{{
0
,
1
,
3
,
2
}},
x
);
auto
absx
=
p
.
add_instruction
(
migraphx
::
op
::
abs
{},
tx
);
auto
r
=
p
.
add_instruction
(
migraphx
::
op
::
add
{},
absx
,
absx
);
p
.
add_instruction
(
migraphx
::
op
::
contiguous
{},
r
);
return
p
;
}
};
struct
test_leaky_relu
:
verify_program
<
test_leaky_relu
>
{
migraphx
::
program
create_program
()
const
...
...
@@ -870,7 +931,7 @@ struct test_gemm_transposeab : verify_program<test_gemm_transposeab>
}
};
struct
gemm_mu
t
li_dim_2
struct
gemm_mul
ti_dim_2
:
verify_program
<
gemm_mult
i_dim_2
>
{
migraphx
::
program
create_program
()
const
{
...
...
@@ -886,7 +947,127 @@ struct gemm_mutli_dim_2
}
};
struct
gemm_mutli_dim_2_3
struct
gemm_2args_mm_1
:
verify_program
<
gemm_2args_mm_1
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
migraphx
::
shape
m1_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
2
,
3
}};
migraphx
::
shape
m2_shape
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
4
}};
auto
l1
=
p
.
add_parameter
(
"1"
,
m1_shape
);
auto
l2
=
p
.
add_parameter
(
"2"
,
m2_shape
);
auto
bl2
=
p
.
add_instruction
(
migraphx
::
op
::
multibroadcast
{{
2
,
3
,
4
}},
l2
);
p
.
add_instruction
(
migraphx
::
op
::
dot
{},
l1
,
bl2
);
return
p
;
}
};
struct
gemm_2args_mm_2
:
verify_program
<
gemm_2args_mm_2
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
migraphx
::
shape
m1_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
2
,
3
}};
migraphx
::
shape
m2_shape
{
migraphx
::
shape
::
float_type
,
{
3
,
4
}};
auto
l1
=
p
.
add_parameter
(
"1"
,
m1_shape
);
auto
l2
=
p
.
add_parameter
(
"2"
,
m2_shape
);
auto
bl2
=
p
.
add_instruction
(
migraphx
::
op
::
multibroadcast
{{
2
,
3
,
4
}},
l2
);
p
.
add_instruction
(
migraphx
::
op
::
dot
{},
l1
,
bl2
);
return
p
;
}
};
struct
gemm_2args_mm_3
:
verify_program
<
gemm_2args_mm_3
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
migraphx
::
shape
m1_shape
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
3
}};
migraphx
::
shape
m2_shape
{
migraphx
::
shape
::
float_type
,
{
3
,
3
,
4
}};
auto
l1
=
p
.
add_parameter
(
"1"
,
m1_shape
);
auto
bl1
=
p
.
add_instruction
(
migraphx
::
op
::
multibroadcast
{{
3
,
2
,
3
}},
l1
);
auto
l2
=
p
.
add_parameter
(
"2"
,
m2_shape
);
p
.
add_instruction
(
migraphx
::
op
::
dot
{},
bl1
,
l2
);
return
p
;
}
};
struct
gemm_2args_mm_4
:
verify_program
<
gemm_2args_mm_4
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
migraphx
::
shape
m1_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
migraphx
::
shape
m2_shape
{
migraphx
::
shape
::
float_type
,
{
3
,
3
,
4
}};
auto
l1
=
p
.
add_parameter
(
"1"
,
m1_shape
);
auto
bl1
=
p
.
add_instruction
(
migraphx
::
op
::
multibroadcast
{{
3
,
2
,
3
}},
l1
);
auto
l2
=
p
.
add_parameter
(
"2"
,
m2_shape
);
p
.
add_instruction
(
migraphx
::
op
::
dot
{},
bl1
,
l2
);
return
p
;
}
};
struct
gemm_2args_mm_5
:
verify_program
<
gemm_2args_mm_5
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
migraphx
::
shape
m1_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
1
,
2
,
3
}};
migraphx
::
shape
m2_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
3
,
4
}};
auto
l1
=
p
.
add_parameter
(
"1"
,
m1_shape
);
auto
bl1
=
p
.
add_instruction
(
migraphx
::
op
::
multibroadcast
{{
2
,
3
,
2
,
3
}},
l1
);
auto
l2
=
p
.
add_parameter
(
"2"
,
m2_shape
);
p
.
add_instruction
(
migraphx
::
op
::
dot
{},
bl1
,
l2
);
return
p
;
}
};
struct
gemm_2args_mm_6
:
verify_program
<
gemm_2args_mm_6
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
migraphx
::
shape
m1_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
1
,
2
,
3
}};
migraphx
::
shape
m2_shape
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
3
,
4
}};
auto
l1
=
p
.
add_parameter
(
"1"
,
m1_shape
);
auto
bl1
=
p
.
add_instruction
(
migraphx
::
op
::
multibroadcast
{{
2
,
3
,
2
,
3
}},
l1
);
auto
l2
=
p
.
add_parameter
(
"2"
,
m2_shape
);
auto
bl2
=
p
.
add_instruction
(
migraphx
::
op
::
multibroadcast
{{
2
,
3
,
3
,
4
}},
l2
);
p
.
add_instruction
(
migraphx
::
op
::
dot
{},
bl1
,
bl2
);
return
p
;
}
};
struct
gemm_2args_mm_7
:
verify_program
<
gemm_2args_mm_7
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
migraphx
::
shape
m1_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
migraphx
::
shape
m2_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
3
,
4
}};
auto
l1
=
p
.
add_parameter
(
"1"
,
m1_shape
);
auto
bl1
=
p
.
add_instruction
(
migraphx
::
op
::
multibroadcast
{{
2
,
3
,
2
,
3
}},
l1
);
auto
l2
=
p
.
add_parameter
(
"2"
,
m2_shape
);
p
.
add_instruction
(
migraphx
::
op
::
dot
{},
bl1
,
l2
);
return
p
;
}
};
struct
gemm_multi_dim_2_3
:
verify_program
<
gemm_multi_dim_2_3
>
{
migraphx
::
program
create_program
()
const
{
...
...
@@ -902,31 +1083,189 @@ struct gemm_mutli_dim_2_3
}
};
struct
test_contiguous
:
verify_program
<
test_contiguous
>
struct
gemm_2args_vv
:
verify_program
<
gemm_2args_vv
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
4
,
4
,
4
,
3
},
{
48
,
4
,
1
,
16
}};
auto
x
=
p
.
add_parameter
(
"x"
,
s
);
p
.
add_instruction
(
migraphx
::
op
::
contiguous
{},
x
);
EXPECT
(
p
.
get_shape
().
standard
());
migraphx
::
shape
m1_shape
{
migraphx
::
shape
::
float_type
,
{
8
}};
migraphx
::
shape
m2_shape
{
migraphx
::
shape
::
float_type
,
{
8
}};
auto
l1
=
p
.
add_parameter
(
"1"
,
m1_shape
);
auto
ul1
=
p
.
add_instruction
(
migraphx
::
op
::
unsqueeze
{{
0
}},
l1
);
auto
l2
=
p
.
add_parameter
(
"2"
,
m2_shape
);
auto
ul2
=
p
.
add_instruction
(
migraphx
::
op
::
unsqueeze
{{
1
}},
l2
);
float
alpha
=
0.23
f
;
auto
res
=
p
.
add_instruction
(
migraphx
::
op
::
dot
{
alpha
},
ul1
,
ul2
);
auto
sres
=
p
.
add_instruction
(
migraphx
::
op
::
squeeze
{{
0
}},
res
);
p
.
add_instruction
(
migraphx
::
op
::
squeeze
{{
0
}},
sres
);
return
p
;
}
};
struct
gemm_2args_mv
:
verify_program
<
gemm_2args_mv
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
migraphx
::
shape
m1_shape
{
migraphx
::
shape
::
float_type
,
{
3
,
5
}};
migraphx
::
shape
m2_shape
{
migraphx
::
shape
::
float_type
,
{
5
}};
auto
l1
=
p
.
add_parameter
(
"1"
,
m1_shape
);
auto
l2
=
p
.
add_parameter
(
"2"
,
m2_shape
);
auto
ul2
=
p
.
add_instruction
(
migraphx
::
op
::
unsqueeze
{{
1
}},
l2
);
p
.
add_instruction
(
migraphx
::
op
::
dot
{},
l1
,
ul2
);
return
p
;
}
};
struct
gemm_2args_bmv
:
verify_program
<
gemm_2args_bmv
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
migraphx
::
shape
m1_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
3
,
5
}};
migraphx
::
shape
m2_shape
{
migraphx
::
shape
::
float_type
,
{
5
}};
auto
l1
=
p
.
add_parameter
(
"1"
,
m1_shape
);
auto
l2
=
p
.
add_parameter
(
"2"
,
m2_shape
);
auto
ul2
=
p
.
add_instruction
(
migraphx
::
op
::
unsqueeze
{{
1
}},
l2
);
auto
bul2
=
p
.
add_instruction
(
migraphx
::
op
::
multibroadcast
{{
2
,
3
,
5
,
1
}},
ul2
);
p
.
add_instruction
(
migraphx
::
op
::
dot
{},
l1
,
bul2
);
return
p
;
}
};
struct
gemm_2args_vm
:
verify_program
<
gemm_2args_vm
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
migraphx
::
shape
m1_shape
{
migraphx
::
shape
::
float_type
,
{
5
}};
migraphx
::
shape
m2_shape
{
migraphx
::
shape
::
float_type
,
{
5
,
4
}};
auto
l1
=
p
.
add_parameter
(
"1"
,
m1_shape
);
auto
ul1
=
p
.
add_instruction
(
migraphx
::
op
::
unsqueeze
{{
0
}},
l1
);
auto
l2
=
p
.
add_parameter
(
"2"
,
m2_shape
);
auto
res
=
p
.
add_instruction
(
migraphx
::
op
::
dot
{},
ul1
,
l2
);
p
.
add_instruction
(
migraphx
::
op
::
squeeze
{{
0
}},
res
);
return
p
;
}
};
struct
gemm_2args_vbm
:
verify_program
<
gemm_2args_vbm
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
migraphx
::
shape
m1_shape
{
migraphx
::
shape
::
float_type
,
{
5
}};
migraphx
::
shape
m2_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
2
,
5
,
4
}};
auto
l1
=
p
.
add_parameter
(
"1"
,
m1_shape
);
auto
ul1
=
p
.
add_instruction
(
migraphx
::
op
::
unsqueeze
{{
0
}},
l1
);
auto
bul1
=
p
.
add_instruction
(
migraphx
::
op
::
multibroadcast
{{
2
,
2
,
1
,
5
}},
ul1
);
auto
l2
=
p
.
add_parameter
(
"2"
,
m2_shape
);
auto
res
=
p
.
add_instruction
(
migraphx
::
op
::
dot
{},
bul1
,
l2
);
p
.
add_instruction
(
migraphx
::
op
::
squeeze
{{
2
}},
res
);
return
p
;
}
};
struct
gemm_multi_3args
:
verify_program
<
gemm_multi_3args
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
migraphx
::
shape
m1_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
2
,
3
}};
migraphx
::
shape
m2_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
3
,
2
}};
migraphx
::
shape
m3_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
2
,
2
}};
auto
l1
=
p
.
add_parameter
(
"1"
,
m1_shape
);
auto
l2
=
p
.
add_parameter
(
"2"
,
m2_shape
);
auto
l3
=
p
.
add_parameter
(
"3"
,
m3_shape
);
float
alpha
=
0.35
;
float
beta
=
0.41
;
p
.
add_instruction
(
migraphx
::
op
::
dot
{
alpha
,
beta
},
l1
,
l2
,
l3
);
return
p
;
}
};
struct
gemm_multi_3args_c25
:
verify_program
<
gemm_multi_3args_c25
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
migraphx
::
shape
m1_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
migraphx
::
shape
m2_shape
{
migraphx
::
shape
::
float_type
,
{
3
,
5
}};
migraphx
::
shape
m3_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
5
}};
auto
l1
=
p
.
add_parameter
(
"1"
,
m1_shape
);
auto
l2
=
p
.
add_parameter
(
"2"
,
m2_shape
);
auto
l3
=
p
.
add_parameter
(
"3"
,
m3_shape
);
float
alpha
=
0.35
;
float
beta
=
0.41
;
p
.
add_instruction
(
migraphx
::
op
::
dot
{
alpha
,
beta
},
l1
,
l2
,
l3
);
return
p
;
}
};
struct
gemm_multi_3args_beta0
:
verify_program
<
gemm_multi_3args_beta0
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
migraphx
::
shape
m1_shape
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
3
}};
migraphx
::
shape
m2_shape
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
4
}};
migraphx
::
shape
m3_shape
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
4
}};
auto
l1
=
p
.
add_parameter
(
"1"
,
m1_shape
);
auto
l2
=
p
.
add_parameter
(
"2"
,
m2_shape
);
auto
l3
=
p
.
add_parameter
(
"3"
,
m3_shape
);
float
alpha
=
1.0
f
;
float
beta
=
0.0
f
;
p
.
add_instruction
(
migraphx
::
op
::
dot
{
alpha
,
beta
},
l1
,
l2
,
l3
);
return
p
;
}
};
struct
test_eliminate_contiguous
:
verify_program
<
test_eliminate_contiguous
>
struct
gemm_multi_3args_alpha0
:
verify_program
<
gemm_multi_3args_alpha0
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
,
5
}};
auto
seq
=
p
.
add_parameter
(
"seq"
,
s
);
std
::
vector
<
int64_t
>
perm
{
0
,
2
,
1
,
3
};
auto
tran_seq
=
p
.
add_instruction
(
migraphx
::
op
::
transpose
{
perm
},
seq
);
std
::
vector
<
int64_t
>
out_shape
{
0
,
0
,
-
1
};
p
.
add_instruction
(
migraphx
::
op
::
reshape
{
out_shape
},
tran_seq
);
migraphx
::
shape
m1_shape
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
3
}};
migraphx
::
shape
m2_shape
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
4
}};
migraphx
::
shape
m3_shape
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
4
}};
auto
l1
=
p
.
add_parameter
(
"1"
,
m1_shape
);
auto
l2
=
p
.
add_parameter
(
"2"
,
m2_shape
);
auto
l3
=
p
.
add_parameter
(
"3"
,
m3_shape
);
float
alpha
=
0.0
f
;
float
beta
=
1.0
f
;
p
.
add_instruction
(
migraphx
::
op
::
dot
{
alpha
,
beta
},
l1
,
l2
,
l3
);
return
p
;
}
};
struct
test_contiguous
:
verify_program
<
test_contiguous
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
4
,
4
,
4
,
3
},
{
48
,
4
,
1
,
16
}};
auto
x
=
p
.
add_parameter
(
"x"
,
s
);
p
.
add_instruction
(
migraphx
::
op
::
contiguous
{},
x
);
EXPECT
(
p
.
get_shape
().
standard
());
return
p
;
}
};
...
...
@@ -991,6 +1330,17 @@ struct test_batchnorm_inference : verify_program<test_batchnorm_inference>
}
};
struct
test_clip
:
verify_program
<
test_clip
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
x
=
p
.
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
3
}});
p
.
add_instruction
(
migraphx
::
op
::
clip
{
6.0
,
0.0
},
x
);
return
p
;
}
};
struct
test_conv_bn
:
verify_program
<
test_conv_bn
>
{
migraphx
::
program
create_program
()
const
...
...
@@ -2995,7 +3345,6 @@ template struct test_logsoftmax<0>;
template
struct
test_logsoftmax
<
1
>;
template
struct
test_logsoftmax
<
2
>;
template
struct
test_logsoftmax
<
3
>;
template
struct
test_logsoftmax
<
4
>;
template
<
int
Axis
>
struct
test_logsoftmax_1
:
verify_program
<
test_logsoftmax_1
<
Axis
>>
...
...
@@ -3012,6 +3361,71 @@ struct test_logsoftmax_1 : verify_program<test_logsoftmax_1<Axis>>
};
template
struct
test_logsoftmax_1
<
0
>;
template
struct
test_logsoftmax_1
<
1
>;
struct
test_fp32_fp16_lall
:
verify_program
<
test_fp32_fp16_lall
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
std
::
vector
<
float
>
data
(
2
*
3
);
std
::
iota
(
data
.
begin
(),
data
.
end
(),
1.0
f
);
auto
l1
=
p
.
add_literal
(
migraphx
::
literal
(
s
,
data
));
auto
l2
=
p
.
add_parameter
(
"p2"
,
s
);
p
.
add_instruction
(
migraphx
::
op
::
add
{},
l1
,
l2
);
migraphx
::
quantize
(
p
,
{
"all"
});
return
p
;
};
};
struct
test_fp32_fp16_ladd
:
verify_program
<
test_fp32_fp16_ladd
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
std
::
vector
<
float
>
data
(
2
*
3
);
std
::
iota
(
data
.
begin
(),
data
.
end
(),
1.0
f
);
auto
l1
=
p
.
add_literal
(
migraphx
::
literal
(
s
,
data
));
auto
l2
=
p
.
add_parameter
(
"p2"
,
s
);
p
.
add_instruction
(
migraphx
::
op
::
add
{},
l1
,
l2
);
migraphx
::
quantize
(
p
,
{
"add"
});
return
p
;
};
};
struct
test_fp32_fp16_add
:
verify_program
<
test_fp32_fp16_add
>
{
migraphx
::
program
create_program
()
{
migraphx
::
program
p
;
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
auto
p1
=
p
.
add_parameter
(
"x"
,
s
);
auto
p2
=
p
.
add_parameter
(
"y"
,
s
);
auto
sum
=
p
.
add_instruction
(
migraphx
::
op
::
add
{},
p1
,
p2
);
auto
diff
=
p
.
add_instruction
(
migraphx
::
op
::
sub
{},
sum
,
p2
);
p
.
add_instruction
(
migraphx
::
op
::
add
{},
diff
,
p1
);
migraphx
::
quantize
(
p
,
{
"add"
});
return
p
;
};
};
struct
test_fp32_fp16_sub
:
verify_program
<
test_fp32_fp16_sub
>
{
migraphx
::
program
create_program
()
{
migraphx
::
program
p
;
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
auto
p1
=
p
.
add_parameter
(
"x"
,
s
);
auto
p2
=
p
.
add_parameter
(
"y"
,
s
);
auto
sum
=
p
.
add_instruction
(
migraphx
::
op
::
add
{},
p1
,
p2
);
auto
diff
=
p
.
add_instruction
(
migraphx
::
op
::
sub
{},
sum
,
p2
);
p
.
add_instruction
(
migraphx
::
op
::
add
{},
diff
,
p1
);
migraphx
::
quantize
(
p
,
{
"sub"
});
return
p
;
};
};
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/memory_coloring_test.cpp
View file @
33a41ba0
#include <migraphx/memory_coloring.hpp>
#include <migraphx/
operator
s.hpp>
#include <migraphx/
check_shape
s.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/instruction.hpp>
#include <basic_ops.hpp>
...
...
@@ -18,6 +18,13 @@ struct memory_coloring_target
struct
allocate
{
migraphx
::
shape
s
{};
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
migraphx
::
pack
(
f
(
self
.
s
,
"shape"
));
}
std
::
string
name
()
const
{
return
"allocate"
;
}
migraphx
::
shape
compute_shape
(
const
std
::
vector
<
migraphx
::
shape
>&
inputs
)
const
{
...
...
Prev
1
…
5
6
7
8
9
10
11
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment