Commit 3a848f0d authored by Paul's avatar Paul
Browse files

Merge branch 'develop' into doc2

parents 64e8e30a d1e945da
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_REDUCE_PROD_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_REDUCE_PROD_HPP
#include <migraphx/argument.hpp>
#include <migraphx/config.hpp>
#include <hip/hip_runtime_api.h>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void reduce_prod(hipStream_t stream, const argument& result, const argument& arg);
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
......@@ -10,7 +10,7 @@ inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void softmax(hipStream_t stream, const argument& result, const argument& arg, int axis);
void softmax(hipStream_t stream, const argument& result, const argument& arg, int64_t axis);
} // namespace device
} // namespace gpu
......
......@@ -94,6 +94,24 @@ inline convolution_descriptor make_conv(const T& op)
return c;
}
template <class T>
inline convolution_descriptor make_deconv(const T& op)
{
auto c = make_obj<convolution_descriptor>(&miopenCreateConvolutionDescriptor);
miopenConvolutionMode_t c_mode = miopenTranspose;
miopenInitConvolutionDescriptor(c.get(),
c_mode,
op.padding[0],
op.padding[1],
op.stride[0],
op.stride[1],
op.dilation[0],
op.dilation[1]);
if(op.group > 1)
miopenSetConvolutionGroupCount(c.get(), op.group);
return c;
}
inline pooling_descriptor make_pooling(const migraphx::op::pooling& op)
{
miopenPoolingMode_t mode;
......
#ifndef MIGRAPHX_GUARD_RTGLIB_PRELU_HPP
#define MIGRAPHX_GUARD_RTGLIB_PRELU_HPP
#include <migraphx/gpu/oper.hpp>
#include <migraphx/gpu/device/prelu.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct hip_prelu : binary_device<hip_prelu, device::prelu>
{
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
......@@ -32,6 +32,7 @@ struct reduce_op : oper<Derived>
{
std::vector<shape> in_shapes{inputs};
in_shapes.pop_back();
check_shapes{in_shapes}.standard();
return op.compute_shape(in_shapes);
}
......
#ifndef MIGRAPHX_GUARD_RTGLIB_REDUCE_PROD_HPP
#define MIGRAPHX_GUARD_RTGLIB_REDUCE_PROD_HPP
#include <migraphx/op/reduce_prod.hpp>
#include <migraphx/gpu/reduce_op.hpp>
#include <migraphx/gpu/device/reduce_prod.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct context;
struct hip_reduce_prod : reduce_op<hip_reduce_prod, op::reduce_prod, device::reduce_prod>
{
hip_reduce_prod() {}
hip_reduce_prod(const op::reduce_prod& op_ref) : reduce_op(op_ref) {}
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
......@@ -18,7 +18,9 @@ shape hip_logsoftmax::compute_shape(const std::vector<shape>& inputs) const
argument
hip_logsoftmax::compute(context& ctx, const shape&, const std::vector<argument>& args) const
{
device::logsoftmax(ctx.get_stream().get(), args.back(), args.front(), op.axis);
auto n_dim = args.front().get_shape().lens().size();
auto tuned_axis = (op.axis < 0) ? op.axis + n_dim : op.axis;
device::logsoftmax(ctx.get_stream().get(), args.back(), args.front(), tuned_axis);
return args.back();
}
......
......@@ -16,6 +16,7 @@
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/convolution.hpp>
#include <migraphx/gpu/deconvolution.hpp>
#include <migraphx/gpu/quant_convolution.hpp>
#include <migraphx/gpu/contiguous.hpp>
#include <migraphx/gpu/relu.hpp>
......@@ -41,6 +42,9 @@
#include <migraphx/gpu/asin.hpp>
#include <migraphx/gpu/acos.hpp>
#include <migraphx/gpu/atan.hpp>
#include <migraphx/gpu/asinh.hpp>
#include <migraphx/gpu/acosh.hpp>
#include <migraphx/gpu/atanh.hpp>
#include <migraphx/gpu/mul.hpp>
#include <migraphx/gpu/max.hpp>
#include <migraphx/gpu/min.hpp>
......@@ -53,18 +57,20 @@
#include <migraphx/gpu/lrn.hpp>
#include <migraphx/gpu/convert.hpp>
#include <migraphx/gpu/clip.hpp>
#include <migraphx/gpu/reduce_sum.hpp>
#include <migraphx/gpu/round.hpp>
#include <migraphx/gpu/ceil.hpp>
#include <migraphx/gpu/floor.hpp>
#include <migraphx/gpu/rsqrt.hpp>
#include <migraphx/gpu/sqrt.hpp>
#include <migraphx/gpu/reduce_max.hpp>
#include <migraphx/gpu/reduce_mean.hpp>
#include <migraphx/gpu/reduce_min.hpp>
#include <migraphx/gpu/reduce_max.hpp>
#include <migraphx/gpu/reduce_prod.hpp>
#include <migraphx/gpu/reduce_sum.hpp>
#include <migraphx/gpu/pow.hpp>
#include <migraphx/gpu/sqdiff.hpp>
#include <migraphx/gpu/int8_conv_pack.hpp>
#include <migraphx/gpu/prelu.hpp>
#include <utility>
#include <functional>
#include <algorithm>
......@@ -79,6 +85,7 @@ struct miopen_apply
const lowering* pass = nullptr;
std::unordered_map<std::string, std::function<instruction_ref(instruction_ref)>> apply_map{};
instruction_ref last{};
std::unordered_map<instruction_ref, std::string> prog_output_names{};
context& get_context()
{
......@@ -94,11 +101,33 @@ struct miopen_apply
(void)i;
}
void create_output_names()
{
this->last = instruction::get_output_alias(std::prev(prog->end()));
if(this->last->name() == "@return")
{
auto& prog_outputs = last->inputs();
std::vector<instruction_ref> outputs_alias(prog_outputs.size());
std::transform(prog_outputs.begin(),
prog_outputs.end(),
outputs_alias.begin(),
[](const auto& i) { return instruction::get_output_alias(i); });
std::size_t index = 0;
for(auto ins : outputs_alias)
{
prog_output_names[ins] = "#output_" + std::to_string(index++);
}
}
}
void init()
{
assert(prog != nullptr);
assert(pass != nullptr);
this->last = instruction::get_output_alias(std::prev(prog->end()));
create_output_names();
add_miopen_simple_op<miopen_abs>("abs", make_abs);
......@@ -119,6 +148,9 @@ struct miopen_apply
add_generic_op<hip_asin>("asin");
add_generic_op<hip_acos>("acos");
add_generic_op<hip_atan>("atan");
add_generic_op<hip_asinh>("asinh");
add_generic_op<hip_acosh>("acosh");
add_generic_op<hip_atanh>("atanh");
add_generic_op<hip_sqrt>("sqrt");
add_generic_op<hip_mul>("mul");
add_generic_op<hip_div>("div");
......@@ -129,6 +161,7 @@ struct miopen_apply
add_generic_op<hip_pow>("pow");
add_generic_op<hip_sqdiff>("sqdiff");
add_generic_op<hip_relu>("relu");
add_generic_op<hip_prelu>("prelu");
add_generic_op<hip_sign>("sign");
add_generic_op<hip_sigmoid>("sigmoid");
add_generic_op<hip_ceil>("ceil");
......@@ -144,15 +177,17 @@ struct miopen_apply
add_extend_op<hip_pad, op::pad>("pad");
add_extend_op<hip_convert, op::convert>("convert");
add_extend_op<hip_clip, op::clip>("clip");
add_extend_op<hip_reduce_sum, op::reduce_sum>("reduce_sum");
add_extend_op<hip_reduce_max, op::reduce_max>("reduce_max");
add_extend_op<hip_reduce_mean, op::reduce_mean>("reduce_mean");
add_extend_op<hip_reduce_min, op::reduce_min>("reduce_min");
add_extend_op<hip_reduce_max, op::reduce_max>("reduce_max");
add_extend_op<hip_reduce_prod, op::reduce_prod>("reduce_prod");
add_extend_op<hip_reduce_sum, op::reduce_sum>("reduce_sum");
add_gemm_op<op::dot>("dot");
add_gemm_op<op::quant_dot>("quant_dot");
add_lrn_op();
add_convolution_op();
add_deconvolution_op();
add_quant_convolution_op();
add_pooling_op();
add_batch_norm_inference_op();
......@@ -162,17 +197,37 @@ struct miopen_apply
{
if(not pass->offload_copy)
return;
for(auto ins : iterator_for(*prog))
{
if(ins->name() != "@param")
continue;
auto pos = std::next(ins);
auto a = insert_allocation(pos, ins->get_shape());
auto c = prog->insert_instruction(pos, hip_copy_to_gpu{}, ins, a);
prog->replace_instruction(ins, c);
}
auto end = std::prev(prog->end());
prog->add_instruction(hip_copy_from_gpu{}, end);
// return instruction
auto ret = std::prev(prog->end());
if(ret->name() == "@return")
{
auto& inputs = ret->inputs();
// each input of ret need to be copied from gpu to host, and replace
// output with copy output
for(auto& in : inputs)
{
auto p_output = prog->insert_instruction(ret, hip_copy_from_gpu{}, in);
instruction::replace_argument(ret, in, p_output);
}
}
// else branch to handle legacy program without the return instruction
else
{
prog->add_instruction(hip_copy_from_gpu{}, ret);
}
}
void apply()
......@@ -186,20 +241,30 @@ struct miopen_apply
check_shape(s, apply_map.at(it->name())(it));
}
}
copy_params();
}
instruction_ref insert_allocation(instruction_ref ins, const shape& s, std::string tag = "")
{
if(not pass->offload_copy and ins == last and tag.empty())
{
return prog->add_parameter("output", s);
}
else
// Instruction's output is an input of the ret instruction
if(pass->offload_copy)
{
auto result = prog->insert_instruction(ins, hip_allocate{s, std::move(tag)});
return result;
}
auto ins_alias = instruction::get_output_alias(ins);
if(last->name() == "@return" and tag.empty() and prog_output_names.count(ins_alias) > 0)
{
return prog->add_parameter(prog_output_names[ins_alias], s);
}
else if(ins == last and tag.empty())
{
return prog->add_parameter("output", s);
}
return prog->insert_instruction(ins, hip_allocate{s, std::move(tag)});
}
void add_convolution_op()
......@@ -218,6 +283,22 @@ struct miopen_apply
});
}
void add_deconvolution_op()
{
apply_map.emplace("deconvolution", [=](instruction_ref ins) {
auto&& op = any_cast<op::deconvolution>(ins->get_operator());
auto conv = miopen_deconvolution{op, make_deconv(op)};
auto ws = conv.compile(get_context(), ins->get_shape(), to_shapes(ins->inputs()));
auto workspace = insert_allocation(ins, ws, "workspace");
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(
ins, conv, ins->inputs().at(0), ins->inputs().at(1), workspace, output);
});
}
template <class Op>
void add_gemm_op(std::string name)
{
......
......@@ -14,7 +14,9 @@ shape hip_softmax::compute_shape(const std::vector<shape>& inputs) const
argument hip_softmax::compute(context& ctx, const shape&, const std::vector<argument>& args) const
{
device::softmax(ctx.get_stream().get(), args.back(), args.front(), op.axis);
auto n_dim = args.front().get_shape().lens().size();
auto tuned_axis = (op.axis < 0) ? op.axis + n_dim : op.axis;
device::softmax(ctx.get_stream().get(), args.back(), args.front(), tuned_axis);
return args.back();
}
......
......@@ -32,8 +32,9 @@ struct tf_parser
node_map nodes;
std::vector<tensorflow::NodeDef> input_nodes;
std::unordered_map<std::string, instruction_ref> instructions;
program prog = program();
bool is_nhwc = true;
program prog = program();
bool is_nhwc = true;
unsigned int batch_size = 1;
std::unordered_map<std::string, op_func> ops;
......@@ -189,6 +190,8 @@ struct tf_parser
add_binary_op("SquaredDifference", op::sqdiff{});
add_binary_op("Sub", op::sub{});
add_mem_op("ArgMax", &tf_parser::parse_arg_op<op::argmax>, false);
add_mem_op("ArgMin", &tf_parser::parse_arg_op<op::argmin>, false);
add_mem_op("AvgPool", &tf_parser::parse_pooling);
add_mem_op("BatchMatMul", &tf_parser::parse_matmul, false);
add_mem_op("BatchMatMulV2", &tf_parser::parse_matmul, false);
......@@ -208,6 +211,7 @@ struct tf_parser
add_mem_op("Pack", &tf_parser::parse_pack, false);
add_mem_op("Pad", &tf_parser::parse_pad);
add_mem_op("Reshape", &tf_parser::parse_reshape, false);
add_mem_op("Shape", &tf_parser::parse_shape, false);
add_mem_op("Slice", &tf_parser::parse_slice, false);
add_mem_op("Split", &tf_parser::parse_split, false);
add_mem_op("SplitV", &tf_parser::parse_split, false);
......@@ -323,6 +327,16 @@ struct tf_parser
transpose);
}
template <class Op>
instruction_ref
parse_arg_op(const std::string&, const attribute_map&, std::vector<instruction_ref> args)
{
int64_t axis = 0;
axis = args[1]->eval().at<int64_t>();
auto ins = prog.add_instruction(Op{axis}, args.front());
return prog.add_instruction(op::squeeze{{axis}}, ins);
}
instruction_ref
parse_batchnorm(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
{
......@@ -768,17 +782,17 @@ struct tf_parser
return prog.add_instruction(op, make_contiguous(args[0]));
}
void parse_from(std::istream& is)
// Use a literal instruction to replace the shape since output of
// shape operator are literals in migraphx
instruction_ref
parse_shape(const std::string&, const attribute_map&, std::vector<instruction_ref> args)
{
tensorflow::GraphDef graph;
if(graph.ParseFromIstream(&is))
{
this->parse_graph(graph);
}
else
{
throw std::runtime_error("Failed reading tf file");
}
std::vector<std::size_t> arg_shape = args[0]->get_shape().lens();
std::vector<int32_t> vec_shape(arg_shape.size());
migraphx::shape s(migraphx::shape::int32_type, {arg_shape.size()});
std::transform(
arg_shape.begin(), arg_shape.end(), vec_shape.begin(), [](auto i) { return i; });
return prog.add_literal(migraphx::literal{s, vec_shape});
}
instruction_ref
......@@ -1006,6 +1020,9 @@ struct tf_parser
{
reorder_data(dims);
}
std::transform(dims.begin(), dims.end(), dims.begin(), [&](auto dim) {
return static_cast<int>(dim) <= 0 ? batch_size : dim;
});
shape s = shape{shape_type, dims};
instructions[name] = to_nhwc(prog.add_parameter(name, s));
}
......@@ -1013,6 +1030,9 @@ struct tf_parser
{
this->parse_node(p.first);
}
// Needs to add a ret instruction at the end of
// the program
}
void parse_node(const std::string& name)
......@@ -1072,6 +1092,19 @@ struct tf_parser
}
}
void parse_from(std::istream& is)
{
tensorflow::GraphDef graph;
if(graph.ParseFromIstream(&is))
{
this->parse_graph(graph);
}
else
{
throw std::runtime_error("Failed reading tf file");
}
}
static attribute_map get_attributes(const tensorflow::NodeDef& node)
{
attribute_map result;
......@@ -1343,11 +1376,12 @@ struct tf_parser
}
};
program parse_tf(const std::string& name, bool is_nhwc)
program parse_tf(const std::string& name, tf_options options)
{
std::fstream input(name.c_str(), std::ios::in | std::ios::binary);
tf_parser parser;
parser.is_nhwc = is_nhwc;
parser.is_nhwc = options.is_nhwc;
parser.batch_size = options.batch_size;
#ifndef NDEBUG
// Log the program when it can't be parsed
......
......@@ -135,6 +135,7 @@ add_test(NAME test_tf COMMAND $<TARGET_FILE:test_tf> WORKING_DIRECTORY ${CMAKE_C
add_dependencies(tests test_tf)
add_dependencies(check test_tf)
add_subdirectory(api)
if(MIGRAPHX_ENABLE_PYTHON)
add_subdirectory(py)
endif()
......
function(add_api_test TEST_NAME TEST_SRC)
set(NAME test_api_${TEST_NAME})
add_executable(${NAME} EXCLUDE_FROM_ALL ${TEST_SRC})
rocm_clang_tidy_check(${NAME})
target_link_libraries(${NAME} migraphx_c)
target_include_directories(${NAME} PUBLIC ../include)
add_test(NAME ${NAME} COMMAND $<TARGET_FILE:${NAME}> WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_dependencies(tests ${NAME})
add_dependencies(check ${NAME})
endfunction()
add_api_test(cpu test_cpu.cpp)
if(MIGRAPHX_ENABLE_GPU)
add_api_test(gpu test_gpu.cpp)
# GPU-based tests
endif()
#include <migraphx/migraphx.h>
#include <migraphx/migraphx.hpp>
#include "test.hpp"
TEST_CASE(load_and_run)
{
auto p = migraphx::parse_onnx("conv_relu_maxpool_test.onnx");
auto shapes_before = p.get_output_shapes();
p.compile(migraphx::target("cpu"));
auto shapes_after = p.get_output_shapes();
CHECK(shapes_before.size() == 1);
CHECK(shapes_before.size() == shapes_after.size());
CHECK(bool{shapes_before.front() == shapes_after.front()});
migraphx::program_parameters pp;
auto param_shapes = p.get_parameter_shapes();
for(auto&& name : param_shapes.names())
{
pp.add(name, migraphx::argument::generate(param_shapes[name]));
}
auto outputs = p.eval(pp);
CHECK(shapes_before.size() == outputs.size());
CHECK(bool{shapes_before.front() == outputs.front().get_shape()});
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
#include <migraphx/migraphx.h>
#include <migraphx/migraphx.hpp>
#include "test.hpp"
TEST_CASE(load_and_run)
{
auto p = migraphx::parse_onnx("conv_relu_maxpool_test.onnx");
auto shapes_before = p.get_output_shapes();
migraphx_compile_options options;
options.offload_copy = true;
p.compile(migraphx::target("gpu"), options);
auto shapes_after = p.get_output_shapes();
CHECK(shapes_before.size() == 1);
CHECK(shapes_before.size() == shapes_after.size());
CHECK(bool{shapes_before.front() == shapes_after.front()});
migraphx::program_parameters pp;
auto param_shapes = p.get_parameter_shapes();
for(auto&& name : param_shapes.names())
{
pp.add(name, migraphx::argument::generate(param_shapes[name]));
}
auto outputs = p.eval(pp);
CHECK(shapes_before.size() == outputs.size());
CHECK(bool{shapes_before.front() == outputs.front().get_shape()});
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -13,37 +13,37 @@ void literal_broadcast()
{
migraphx::program p;
p.add_literal(get_2_broadcasted());
EXPECT(not p.get_shape().standard());
EXPECT(p.get_shape().broadcasted());
EXPECT(not p.get_output_shapes().back().standard());
EXPECT(p.get_output_shapes().back().broadcasted());
run_pass(p);
EXPECT(p.get_shape().standard());
EXPECT(not p.get_shape().broadcasted());
EXPECT(p.get_output_shapes().back().standard());
EXPECT(not p.get_output_shapes().back().broadcasted());
}
TEST_CASE(literal_transpose)
{
migraphx::program p;
p.add_literal(get_2x2_transposed());
EXPECT(not p.get_shape().standard());
EXPECT(p.get_shape().transposed());
EXPECT(not p.get_output_shapes().back().standard());
EXPECT(p.get_output_shapes().back().transposed());
run_pass(p);
EXPECT(p.get_shape().standard());
EXPECT(not p.get_shape().transposed());
EXPECT(p.get_output_shapes().back().standard());
EXPECT(not p.get_output_shapes().back().transposed());
}
TEST_CASE(after_literal_transpose)
{
migraphx::program p;
auto l = p.add_literal(get_2x2());
EXPECT(p.get_shape().standard());
EXPECT(not p.get_shape().transposed());
EXPECT(p.get_output_shapes().back().standard());
EXPECT(not p.get_output_shapes().back().transposed());
auto t = p.add_instruction(migraphx::op::transpose{{1, 0}}, l);
p.add_instruction(pass_op{}, t);
EXPECT(not p.get_shape().standard());
EXPECT(p.get_shape().transposed());
EXPECT(not p.get_output_shapes().back().standard());
EXPECT(p.get_output_shapes().back().transposed());
run_pass(p);
EXPECT(p.get_shape().standard());
EXPECT(not p.get_shape().transposed());
EXPECT(p.get_output_shapes().back().standard());
EXPECT(not p.get_output_shapes().back().transposed());
}
TEST_CASE(after_literal_broadcast)
......@@ -51,30 +51,30 @@ TEST_CASE(after_literal_broadcast)
migraphx::program p;
auto l1 = p.add_literal(get_2x2());
auto l2 = p.add_literal(get_2());
EXPECT(p.get_shape().standard());
EXPECT(not p.get_shape().broadcasted());
EXPECT(p.get_output_shapes().back().standard());
EXPECT(not p.get_output_shapes().back().broadcasted());
auto b = p.add_instruction(migraphx::op::broadcast{0, l1->get_shape().lens()}, l2);
p.add_instruction(pass_op{}, b);
EXPECT(not p.get_shape().standard());
EXPECT(p.get_shape().broadcasted());
EXPECT(not p.get_output_shapes().back().standard());
EXPECT(p.get_output_shapes().back().broadcasted());
run_pass(p);
EXPECT(p.get_shape().standard());
EXPECT(not p.get_shape().broadcasted());
EXPECT(p.get_output_shapes().back().standard());
EXPECT(not p.get_output_shapes().back().broadcasted());
}
TEST_CASE(after_param_transpose)
{
migraphx::program p;
auto l = p.add_parameter("2x2", {migraphx::shape::float_type, {2, 2}});
EXPECT(p.get_shape().standard());
EXPECT(not p.get_shape().transposed());
EXPECT(p.get_output_shapes().back().standard());
EXPECT(not p.get_output_shapes().back().transposed());
auto t = p.add_instruction(migraphx::op::transpose{{1, 0}}, l);
p.add_instruction(pass_op{}, t);
EXPECT(not p.get_shape().standard());
EXPECT(p.get_shape().transposed());
EXPECT(not p.get_output_shapes().back().standard());
EXPECT(p.get_output_shapes().back().transposed());
run_pass(p);
EXPECT(p.get_shape().standard());
EXPECT(not p.get_shape().transposed());
EXPECT(p.get_output_shapes().back().standard());
EXPECT(not p.get_output_shapes().back().transposed());
}
TEST_CASE(after_param_broadcast)
......@@ -82,15 +82,15 @@ TEST_CASE(after_param_broadcast)
migraphx::program p;
auto l1 = p.add_parameter("2x2", {migraphx::shape::float_type, {2, 2}});
auto l2 = p.add_parameter("2", {migraphx::shape::float_type, {2}});
EXPECT(p.get_shape().standard());
EXPECT(not p.get_shape().broadcasted());
EXPECT(p.get_output_shapes().back().standard());
EXPECT(not p.get_output_shapes().back().broadcasted());
auto b = p.add_instruction(migraphx::op::broadcast{0, l1->get_shape().lens()}, l2);
p.add_instruction(pass_op{}, b);
EXPECT(not p.get_shape().standard());
EXPECT(p.get_shape().broadcasted());
EXPECT(not p.get_output_shapes().back().standard());
EXPECT(p.get_output_shapes().back().broadcasted());
run_pass(p);
EXPECT(p.get_shape().standard());
EXPECT(not p.get_shape().broadcasted());
EXPECT(p.get_output_shapes().back().standard());
EXPECT(not p.get_output_shapes().back().broadcasted());
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -50,7 +50,7 @@ void matmul_test()
auto bl = p.add_literal(migraphx::literal{b_shape, b});
p.add_instruction(migraphx::op::dot{}, al, bl);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<T> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(c, results_vector));
......@@ -99,7 +99,7 @@ void matmul_test_ex()
auto bl = p.add_literal(migraphx::literal{b_shape, b});
p.add_instruction(migraphx::op::dot{}, al, bl);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<T> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(c, results_vector));
......@@ -134,7 +134,7 @@ TEST_CASE(matmul_mutli_dim_2)
p.add_instruction(migraphx::op::dot{}, l1, l2);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
......@@ -204,7 +204,7 @@ TEST_CASE(gemm_mutli_dim_2_beta0)
float beta = 0.0f;
p.add_instruction(migraphx::op::dot{alpha, beta}, l1, l2, l3);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
......@@ -265,7 +265,7 @@ TEST_CASE(gemm_beta_0)
float beta = 0.0f;
p.add_instruction(migraphx::op::dot{alpha, beta}, l1, l2, l3);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
......@@ -305,7 +305,7 @@ TEST_CASE(matmul_mutli_dim_2_3)
p.add_instruction(migraphx::op::dot{}, l1, l2);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
......@@ -355,7 +355,7 @@ TEST_CASE(gemm_mutli_dim1_2_3)
auto m3_beta = p.add_instruction(migraphx::op::mul{}, b_beta, l3);
p.add_instruction(migraphx::op::add{}, m3_beta, m12_alpha);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
......@@ -401,7 +401,7 @@ TEST_CASE(gemm_mutli_3args)
float beta = 0.41;
p.add_instruction(migraphx::op::dot{alpha, beta}, l1, l2, l3);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
......@@ -469,7 +469,7 @@ TEST_CASE(gemm_3args)
5.74736,
4.22063};
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(m, gold));
......@@ -505,7 +505,7 @@ TEST_CASE(matmul_vv_inner_product)
p.add_instruction(migraphx::op::dot{}, ual, ubl);
std::vector<float> gold = {-1.43461};
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(m, gold));
......@@ -539,7 +539,7 @@ TEST_CASE(matmul_vv_inner_product)
p.add_instruction(migraphx::op::dot{alpha}, ual, ubl);
std::vector<float> gold = {-0.4590752};
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(m, gold));
......@@ -575,7 +575,7 @@ TEST_CASE(matmul_vm)
std::vector<float> gold = {-3.78111, -3.40007, -2.1972, -3.31448, -3.80326};
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(m, gold));
......@@ -609,7 +609,7 @@ TEST_CASE(matmul_vm)
std::vector<float> gold = {-1.89056, -1.70003, -1.0986, -1.65724, -1.90163};
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(m, gold));
......@@ -653,7 +653,7 @@ TEST_CASE(matmul_vm)
-2.45019,
-1.35064};
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(m, gold));
......@@ -697,7 +697,7 @@ TEST_CASE(matmul_vm)
-0.514539,
-0.283635};
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(m, gold));
......@@ -734,7 +734,7 @@ TEST_CASE(matmul_mv)
p.add_instruction(migraphx::op::dot{}, al, ubl);
std::vector<float> gold = {1.31982, 1.19022, -1.96062};
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(m, gold));
......@@ -769,7 +769,7 @@ TEST_CASE(matmul_mv)
p.add_instruction(migraphx::op::dot{alpha}, al, ubl);
std::vector<float> gold = {0.395946, 0.357067, -0.588187};
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(m, gold));
......@@ -810,7 +810,7 @@ TEST_CASE(matmul_mv)
3.29447,
0.765651};
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(m, gold));
......@@ -861,7 +861,7 @@ TEST_CASE(matmul_mm1)
0.77227, 0.349659, 2.92759, 2.32384, -2.90664, 0.0527679,
-0.547761, -0.155467, 0.964619, 2.09133, -4.44281, -1.3864};
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(m, gold));
......@@ -911,7 +911,7 @@ TEST_CASE(matmul_mm1)
-0.415107, 0.305138, 0.435993, -0.107089, -0.767885, -4.00837, 1.09921, -2.02129,
0.109717, 0.618422, 0.438342, 0.29602, 2.00928, 0.420871};
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(m, gold));
......@@ -953,7 +953,7 @@ TEST_CASE(matmul_mm2)
0.60740202, 0.95217761, -1.06087445, -4.75868152, -3.6687713, -1.26539821};
p.add_instruction(migraphx::op::dot{}, al, bbl);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(m, gold));
......@@ -991,7 +991,7 @@ TEST_CASE(matmul_mm2)
8.27398578e-01, 1.94406914e+00, -2.39250915e-01, -1.77062701e+00, -6.46239534e-01,
-7.95202750e-01};
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(m, gold));
......@@ -1041,7 +1041,7 @@ TEST_CASE(matmul_mm2)
0.63012062, -0.25606052, -0.69419352, -1.78299913, -0.38572706, 1.92249442,
0.3884186, -0.48153048, 0.84932351, 0.67234919, -1.07821322, -0.01208216};
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(m, gold));
......@@ -1086,7 +1086,7 @@ TEST_CASE(matmul_mm2)
1.92882983, -0.30996324, 2.19736278, 2.32389426, 2.36741832, 1.62253915,
0.26698225, -0.00741609, -2.53680983, -0.0679954, 0.04499683, 0.85354276};
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(m, gold));
......@@ -1113,7 +1113,7 @@ TEST_CASE(quant_dot_2args_multi4)
724, 762, 688, 742, 796, 850, 904, 958, 1012, 1066};
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(m, gold));
......@@ -1138,7 +1138,7 @@ TEST_CASE(quant_dot_2args_multi4)
736, 768, 592, 628, 664, 700, 736, 772, 808, 844};
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(m, gold));
......@@ -1163,7 +1163,7 @@ TEST_CASE(quant_dot_2args_multi4)
974, 1126, 86, 302, 518, 734, 950, 1166, 1382, 1598};
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(m, gold));
......@@ -1189,7 +1189,7 @@ TEST_CASE(quant_dot_2args_multi4)
836, 964, 74, 218, 362, 506, 650, 794, 938, 1082};
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(m, gold));
......@@ -1215,7 +1215,7 @@ TEST_CASE(quant_dot_2args_general)
70, 76, 82, 88, 94, 190, 212, 234, 256, 278, 310, 348, 386, 424, 462};
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(m, gold));
......@@ -1239,7 +1239,7 @@ TEST_CASE(quant_dot_2args_general)
210, 228, 246, 264, 282, 240, 262, 284, 306, 328, 270, 296, 322, 348, 374};
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(m, gold));
......@@ -1268,7 +1268,7 @@ TEST_CASE(quant_dot_2args_general)
28, 76, 124, 172, 220, 76, 252, 428, 604, 780, 124, 428, 732, 1036, 1340};
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(m, gold));
......@@ -1293,7 +1293,7 @@ TEST_CASE(quant_dot_2args_general)
126, 342, 558, 774, 990, 144, 408, 672, 936, 1200, 162, 474, 786, 1098, 1410};
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(m, gold));
......@@ -1323,7 +1323,7 @@ TEST_CASE(quant_dot_3args_general)
982, 1011, 1040, 1069, 1098, 1127, 1156, 2557, 2650, 2743, 2836, 2929, 3022, 3115};
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(m, gold));
......@@ -1350,7 +1350,7 @@ TEST_CASE(quant_dot_3args_general)
70, 76, 82, 88, 94, 190, 212, 234, 256, 278, 310, 348, 386, 424, 462};
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(m, gold));
......@@ -1378,7 +1378,7 @@ TEST_CASE(quant_dot_3args_general)
1966, 2025, 2084, 2143, 2202, 2261, 2320, 2183, 2250, 2317, 2384, 2451, 2518, 2585};
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(m, gold));
......@@ -1406,7 +1406,7 @@ TEST_CASE(quant_dot_3args_general)
286, 737, 1188, 1639, 2090, 2541, 2992, 755, 2230, 3705, 5180, 6655, 8130, 9605};
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(m, gold));
......@@ -1435,7 +1435,7 @@ TEST_CASE(quant_dot_3args_general)
844, 2190, 3536, 4882, 6228, 7574, 8920, 942, 2480, 4018, 5556, 7094, 8632, 10170};
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(m, gold));
......@@ -1469,7 +1469,7 @@ TEST_CASE(quant_dot_3args_batch)
10386, 11288, 11408, 11528, 11648, 11768, 11888, 12008};
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(m, gold));
......@@ -1503,7 +1503,7 @@ TEST_CASE(quant_dot_3args_batch)
24618, 25949, 27280, 28611, 29942, 31273, 25224, 26587, 27950, 29313, 30676, 32039};
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> m;
result.visit([&](auto output) { m.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(m, gold));
......
......@@ -25,10 +25,10 @@ TEST_CASE(slice_test)
auto l0 = p.add_literal(migraphx::literal{s, data});
p.add_instruction(migraphx::op::slice{{2}, {1}, {3}}, l0);
migraphx::shape s2{migraphx::shape::int32_type, {2, 2, 2}, {6, 3, 1}};
EXPECT(p.get_shape() == s2);
EXPECT(p.get_output_shapes().back() == s2);
p.compile(migraphx::cpu::target{});
migraphx::shape sresult{migraphx::shape::int32_type, {2, 2, 2}, {4, 2, 1}};
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<int> gold = {1, 2, 4, 5, 7, 8, 10, 11};
std::vector<int> results_vector(2 * 2 * 2);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
......@@ -43,10 +43,10 @@ TEST_CASE(slice_test)
auto l0 = p.add_literal(migraphx::literal{s, data});
p.add_instruction(migraphx::op::slice{{0, 1, 2}, {0, 0, 0}, {2, 2, 2}}, l0);
migraphx::shape s2{migraphx::shape::int32_type, {2, 2, 2}, {6, 3, 1}};
EXPECT(p.get_shape() == s2);
EXPECT(p.get_output_shapes().back() == s2);
p.compile(migraphx::cpu::target{});
migraphx::shape sresult{migraphx::shape::int32_type, {2, 2, 2}, {4, 2, 1}};
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<int> gold = {0, 1, 3, 4, 6, 7, 9, 10};
std::vector<int> results_vector(2 * 2 * 2);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
......@@ -71,7 +71,7 @@ TEST_CASE(concat_test)
auto l2 = p.add_literal(migraphx::literal{s2, data2});
p.add_instruction(migraphx::op::concat{axis}, l0, l1, l2);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<int> gold = {0, 1, 2, 3, 4, 10, 5, 6, 7, 8, 9, 20};
std::vector<int> results_vector(2 * 6);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
......@@ -95,7 +95,7 @@ TEST_CASE(concat_test)
auto l2 = p.add_literal(migraphx::literal{s2, data2});
p.add_instruction(migraphx::op::concat{axis}, l0, l1, l2);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<int> gold = {0, 1, 2, 3, 4, 10, 5, 6, 7, 8, 9, 20};
std::vector<int> results_vector(2 * 6);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
......@@ -119,7 +119,7 @@ TEST_CASE(concat_test)
auto l2 = p.add_literal(migraphx::literal{s2, data2});
p.add_instruction(migraphx::op::concat{axis}, l0, l1, l2);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<int> gold = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
std::vector<int> results_vector(6 * 2);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
......@@ -143,7 +143,7 @@ TEST_CASE(concat_test)
auto l2 = p.add_literal(migraphx::literal{s2, data2});
p.add_instruction(migraphx::op::concat{axis}, l0, l1, l2);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<int> gold = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
std::vector<int> results_vector(6 * 2);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
......@@ -169,7 +169,7 @@ TEST_CASE(gather_test)
int axis = 0;
p.add_instruction(migraphx::op::gather{axis}, a0, a1);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> res_data(4 * 5);
std::vector<float> golden = {0.5f, 1.5f, 2.5f, 6.5f, 7.5f, 8.5f};
result.visit([&](auto output) { res_data.assign(output.begin(), output.end()); });
......@@ -189,7 +189,7 @@ TEST_CASE(gather_test)
int axis = 0;
p.add_instruction(migraphx::op::gather{axis}, a0, a1);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> res_data(4 * 5);
std::vector<float> golden = {0.5f, 1.5f, 2.5f, 6.5f, 7.5f, 8.5f};
result.visit([&](auto output) { res_data.assign(output.begin(), output.end()); });
......@@ -209,7 +209,7 @@ TEST_CASE(gather_test)
int axis = 1;
p.add_instruction(migraphx::op::gather{axis}, a0, a1);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> res_data(4 * 5);
std::vector<float> golden = {0.5f, 2.5f, 3.5f, 5.5f, 6.5f, 8.5f};
result.visit([&](auto output) { res_data.assign(output.begin(), output.end()); });
......@@ -229,7 +229,7 @@ TEST_CASE(gather_test)
int axis = -1;
p.add_instruction(migraphx::op::gather{axis}, a0, a1);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> res_data(4 * 5);
std::vector<float> golden = {0.5f, 2.5f, 3.5f, 5.5f, 6.5f, 8.5f};
result.visit([&](auto output) { res_data.assign(output.begin(), output.end()); });
......@@ -250,7 +250,7 @@ TEST_CASE(gather_test)
int axis = -1;
p.add_instruction(migraphx::op::gather{axis}, a0, a1);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> res_data{};
std::vector<float> golden = {0.5f, 3.5f, 6.5f};
result.visit([&](auto output) { res_data.assign(output.begin(), output.end()); });
......@@ -271,7 +271,7 @@ TEST_CASE(gather_test)
int axis = -1;
p.add_instruction(migraphx::op::gather{axis}, a0, a1);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> res_data{};
std::vector<float> golden = {0.5f, 3.5f, 6.5f};
result.visit([&](auto output) { res_data.assign(output.begin(), output.end()); });
......@@ -292,7 +292,7 @@ TEST_CASE(gather_test)
int axis = -1;
p.add_instruction(migraphx::op::gather{axis}, a0, a1);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> res_data{};
std::vector<float> golden = {0.5f};
result.visit([&](auto output) { res_data.assign(output.begin(), output.end()); });
......@@ -310,7 +310,7 @@ TEST_CASE(squeeze_test)
auto l0 = p.add_literal(migraphx::literal{s1, data});
p.add_instruction(migraphx::op::squeeze{{1}}, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
EXPECT(result.get_shape() == s2);
}
{
......@@ -321,9 +321,10 @@ TEST_CASE(squeeze_test)
auto l0 = p.add_literal(migraphx::literal{s1, data});
p.add_instruction(migraphx::op::squeeze{{3}}, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
EXPECT(result.get_shape() == s2);
}
{
migraphx::program p;
std::vector<float> data(4 * 3 * 3);
......@@ -332,7 +333,7 @@ TEST_CASE(squeeze_test)
auto l0 = p.add_literal(migraphx::literal{s1, data});
p.add_instruction(migraphx::op::squeeze{}, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
EXPECT(result.get_shape() == s2);
}
}
......@@ -347,7 +348,7 @@ TEST_CASE(unsqueeze_test)
auto l0 = p.add_literal(migraphx::literal{s1, data});
p.add_instruction(migraphx::op::unsqueeze{{1}}, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
EXPECT(result.get_shape() == s2);
}
{
......@@ -358,7 +359,7 @@ TEST_CASE(unsqueeze_test)
auto l0 = p.add_literal(migraphx::literal{s1, data});
p.add_instruction(migraphx::op::unsqueeze{{2}}, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
EXPECT(result.get_shape() == s2);
}
}
......@@ -375,7 +376,7 @@ TEST_CASE(globalavgpool_test)
auto l0 = p.add_literal(migraphx::literal{s, data});
p.add_instruction(op, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
......@@ -395,7 +396,7 @@ TEST_CASE(globalmaxpool_test)
auto l0 = p.add_literal(migraphx::literal{s, data});
p.add_instruction(op, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
......@@ -423,7 +424,7 @@ TEST_CASE(im2col_3x3_no_pad_identity_test)
auto l_weights = p.add_literal(migraphx::literal{s_weights, weights});
p.add_instruction(migraphx::op::im2col{padding, stride, dilation}, l_image, l_weights);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::size_t col_height = (size[0] - f[0] + 2 * padding[0]) / stride[0] + 1;
std::size_t col_width = (size[1] - f[1] + 2 * padding[1]) / stride[1] + 1;
......@@ -452,7 +453,7 @@ TEST_CASE(im2col_3x3_no_pad_test)
auto l_weights = p.add_literal(migraphx::literal{s_weights, weights});
p.add_instruction(migraphx::op::im2col{padding, stride, dilation}, l_image, l_weights);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<int> correct = {0, 1, 2, 4, 5, 6, 8, 9, 10, 1, 2, 3, 5, 6, 7, 9, 10, 11,
4, 5, 6, 8, 9, 10, 12, 13, 14, 5, 6, 7, 9, 10, 11, 13, 14, 15};
......@@ -484,7 +485,7 @@ TEST_CASE(im2col_3x3_stride_2_no_pad_test)
auto l_weights = p.add_literal(migraphx::literal{s_weights, weights});
p.add_instruction(migraphx::op::im2col{padding, stride, dilation}, l_image, l_weights);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<int> correct = {0, 1, 2, 6, 7, 8, 12, 13, 14, 2, 3, 4,
8, 9, 10, 14, 15, 16, 12, 13, 14, 18, 19, 20,
......@@ -517,7 +518,7 @@ TEST_CASE(im2col_3x3_with_padding_test)
auto l_weights = p.add_literal(migraphx::literal{s_weights, weights});
p.add_instruction(migraphx::op::im2col{padding, stride, dilation}, l_image, l_weights);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<int> correct = {0, 0, 0, 0, 0, 1, 0, 2, 3, 0, 0, 0, 0, 1, 0, 2, 3, 0,
0, 0, 1, 0, 2, 3, 0, 0, 0, 0, 1, 0, 2, 3, 0, 0, 0, 0};
......@@ -565,7 +566,7 @@ TEST_CASE(batch_norm_inference_test)
p.add_instruction(migraphx::op::batch_norm_inference{}, x, scale, bias, mean, variance);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> result_vector(width * height * channels * batches);
std::vector<float> gold(width * height * channels * batches);
......@@ -595,7 +596,7 @@ TEST_CASE(im2col_3x3_with_channels_identity_test)
auto l_weights = p.add_literal(migraphx::literal{s_weights, weights});
p.add_instruction(migraphx::op::im2col{padding, stride, dilation}, l_image, l_weights);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::size_t col_height = (size[0] - f[0] + 2 * padding[0]) / stride[0] + 1;
std::size_t col_width = (size[1] - f[1] + 2 * padding[1]) / stride[1] + 1;
......@@ -611,7 +612,7 @@ TEST_CASE(exp_test)
auto l = p.add_literal(migraphx::literal{s, {-1, 0, 1}});
p.add_instruction(migraphx::op::exp{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0.36787944f, 1.f, 2.71828183f};
......@@ -626,7 +627,7 @@ TEST_CASE(erf_test)
p.add_literal(migraphx::literal{s, {0.73785057, 1.58165966, -0.43597795, -0.01677432}});
p.add_instruction(migraphx::op::erf{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0.70327317, 0.97470088, -0.46247893, -0.01892602};
......@@ -641,7 +642,7 @@ TEST_CASE(sqrt_test)
migraphx::literal{s, {1.02481645, 0.85643062, 0.03404123, 0.92791926, 0.10569184}});
p.add_instruction(migraphx::op::sqrt{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {1.01233218, 0.92543537, 0.18450265, 0.96328566, 0.32510282};
......@@ -656,7 +657,7 @@ TEST_CASE(sign_test)
migraphx::literal{s, {1.02481645, 0.85643062, -0.03404123, -0.92791926, 0.0}});
p.add_instruction(migraphx::op::sign{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {1.0, 1.0, -1.0, -1.0, 0.0};
......@@ -670,13 +671,28 @@ TEST_CASE(log_test)
auto l = p.add_literal(migraphx::literal{s, {1, 2, 3}});
p.add_instruction(migraphx::op::log{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0.0f, 0.6931471806f, 1.0986122887f};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(prelu_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {3}};
auto x = p.add_literal(migraphx::literal{s, {-1, 0, 2}});
auto slope = p.add_literal(migraphx::literal{s, {2, 1, 2}});
p.add_instruction(migraphx::op::prelu{}, x, slope);
p.compile(migraphx::cpu::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {-2.0f, 0.0f, 2.0f};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(pow_test)
{
migraphx::program p;
......@@ -685,7 +701,7 @@ TEST_CASE(pow_test)
auto e = p.add_literal(migraphx::literal{s, {1, 2, 3}});
p.add_instruction(migraphx::op::pow{}, b, e);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {1.0f, 4.0f, 27.0f};
......@@ -699,7 +715,7 @@ TEST_CASE(sin_test)
auto l = p.add_literal(migraphx::literal{s, {-1, 0, 1}});
p.add_instruction(migraphx::op::sin{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {-0.84147098f, 0.f, 0.84147098f};
......@@ -713,7 +729,7 @@ TEST_CASE(cos_test)
auto l = p.add_literal(migraphx::literal{s, {-1, 0, 1}});
p.add_instruction(migraphx::op::cos{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0.54030231f, 1.f, 0.54030231f};
......@@ -727,7 +743,7 @@ TEST_CASE(tan_test)
auto l = p.add_literal(migraphx::literal{s, {-1, 0, 1}});
p.add_instruction(migraphx::op::tan{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {-1.55740772f, 0.0f, 1.55740772f};
......@@ -742,7 +758,7 @@ TEST_CASE(asin_test)
auto l = p.add_literal(migraphx::literal{s, data});
p.add_instruction(migraphx::op::asin{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {-0.5235987756f, 0.f, 1.119769515};
......@@ -757,7 +773,7 @@ TEST_CASE(acos_test)
auto l = p.add_literal(migraphx::literal{s, data});
p.add_instruction(migraphx::op::acos{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {2.4980915448f, 1.5707963268f, 0.0f};
......@@ -771,13 +787,57 @@ TEST_CASE(atan_test)
auto l = p.add_literal(migraphx::literal{s, {-1, 0, 1}});
p.add_instruction(migraphx::op::atan{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {-0.7853981634f, 0.0f, 0.7853981634f};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(asinh_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {3}};
std::vector<float> data{-0.5f, 0.0f, 0.9f};
auto l = p.add_literal(migraphx::literal{s, data});
p.add_instruction(migraphx::op::asinh{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {-0.481211841, 0, 0.808866858};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(acosh_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::double_type, {3}};
std::vector<float> data{1.1f, 1.2f, 2.0f};
auto l = p.add_literal(migraphx::literal{s, data});
p.add_instruction(migraphx::op::acosh{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0.4435683, 0.6223626, 1.316958};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(atanh_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::double_type, {3}};
auto l = p.add_literal(migraphx::literal{s, {0.4435683, 0.6223626, 0.316958}});
p.add_instruction(migraphx::op::atanh{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0.476664424, 0.728852153, 0.328261733};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(add_test)
{
migraphx::program p;
......@@ -786,7 +846,7 @@ TEST_CASE(add_test)
auto l2 = p.add_literal(migraphx::literal{s, {1, 2, 3}});
p.add_instruction(migraphx::op::add{}, l1, l2);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0, 2, 4};
......@@ -805,7 +865,7 @@ TEST_CASE(broadcast_test)
auto l2 = p.add_literal(migraphx::literal{b_shape, b_data});
p.add_instruction(migraphx::op::broadcast{axis, l1->get_shape().lens()}, l2);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
auto output = result.get<int32_t>();
EXPECT(output(0, 0) == -2);
EXPECT(output(0, 1) == -2);
......@@ -826,7 +886,7 @@ TEST_CASE(add_broadcast_test)
auto l3 = p.add_instruction(migraphx::op::broadcast{axis, l1->get_shape().lens()}, l2);
p.add_instruction(migraphx::op::add{}, l1, l3);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
EXPECT(result.get_shape().packed());
std::vector<float> results_vector(12);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
......@@ -845,7 +905,7 @@ TEST_CASE(add_broadcast_test)
auto l4 = p.add_instruction(migraphx::op::multibroadcast{{2, 2, 3}}, l2);
p.add_instruction(migraphx::op::add{}, l3, l4);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
EXPECT(result.get_shape().packed());
std::vector<float> results_vector(12);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
......@@ -862,7 +922,7 @@ TEST_CASE(sub_test)
auto l2 = p.add_literal(migraphx::literal{s, {1, 2, 3}});
p.add_instruction(migraphx::op::sub{}, l1, l2);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {-2, -2, -2};
......@@ -877,7 +937,7 @@ TEST_CASE(mul_test)
auto l2 = p.add_literal(migraphx::literal{s, {1, 2, 3}});
p.add_instruction(migraphx::op::mul{}, l1, l2);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {-1, 0, 3};
......@@ -892,7 +952,7 @@ TEST_CASE(div_test)
auto l2 = p.add_literal(migraphx::literal{s, {1.0f, 2.0f, 4.0f}});
p.add_instruction(migraphx::op::div{}, l1, l2);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {-1.f, 0.25f, 0.25f};
......@@ -906,7 +966,7 @@ TEST_CASE(relu_test)
auto l = p.add_literal(migraphx::literal{s, {-1.f, 0.f, 1.f}});
p.add_instruction(migraphx::op::relu{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0.f, 0.f, 1.f};
......@@ -920,7 +980,7 @@ TEST_CASE(leaky_relu_test)
auto l = p.add_literal(migraphx::literal{s, {-1.f, 0.f, 1.f}});
p.add_instruction(migraphx::op::leaky_relu{0.01}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {-0.01f, 0.f, 1.f};
......@@ -934,7 +994,7 @@ TEST_CASE(lrn_test)
auto l = p.add_literal(migraphx::literal{s, {-2.0f, 1.0f, 0.f, 1.0f, 2.0f}});
p.add_instruction(migraphx::op::lrn{0.0001, 0.75, 1, 5}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(5);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {-2 / 1.000075, 1 / 1.00009, 0 / 1.000145, 1 / 1.00009, 2 / 1.000075};
......@@ -968,7 +1028,7 @@ TEST_CASE(imagescaler_test)
auto bias_bcast = p.add_instruction(migraphx::op::broadcast{1, s.lens()}, bias_vals);
p.add_instruction(migraphx::op::add{}, img_scaled, bias_bcast);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(12);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0.41,
......@@ -999,7 +1059,7 @@ TEST_CASE(reshape_test)
std::vector<int64_t> new_shape = {8, 3, 1, 1};
p.add_instruction(migraphx::op::reshape{new_shape}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector, data));
......@@ -1010,7 +1070,7 @@ TEST_CASE(reshape_test)
std::vector<int64_t> new_shape = {1, 3, 4, 2};
p.add_instruction(migraphx::op::reshape{new_shape}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector, data));
......@@ -1021,7 +1081,7 @@ TEST_CASE(reshape_test)
std::vector<int64_t> new_shape = {1, 3, 4, 2};
p.add_instruction(migraphx::op::reshape{new_shape}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector, data));
......@@ -1073,7 +1133,7 @@ TEST_CASE(maxpool_test)
auto al = p.add_literal(migraphx::literal{a_shape, a});
p.add_instruction(migraphx::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{3, 2}}}, al);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(36);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector, c));
......@@ -1088,7 +1148,7 @@ TEST_CASE(softmax_simple_test)
auto al = p.add_literal(migraphx::literal{a_shape, a});
p.add_instruction(migraphx::op::softmax{1}, al);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(2);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector, s));
......@@ -1147,7 +1207,7 @@ TEST_CASE(softmax_test)
auto al = p.add_literal(migraphx::literal{a_shape, a});
p.add_instruction(migraphx::op::softmax{}, al);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(120);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector, s));
......@@ -1180,7 +1240,7 @@ TEST_CASE(logsoftmax_test_axis_0)
int axis = 0;
p.add_instruction(migraphx::op::logsoftmax{axis}, al);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector, s));
......@@ -1213,7 +1273,7 @@ TEST_CASE(logsoftmax_test_axis_1)
int axis = 1;
p.add_instruction(migraphx::op::logsoftmax{axis}, al);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector, s));
......@@ -1246,7 +1306,7 @@ TEST_CASE(logsoftmax_test_axis_2)
int axis = 2;
p.add_instruction(migraphx::op::logsoftmax{axis}, al);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector, s));
......@@ -1279,7 +1339,7 @@ TEST_CASE(logsoftmax_test_axis_3)
int axis = 3;
p.add_instruction(migraphx::op::logsoftmax{axis}, al);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector, s));
......@@ -1296,7 +1356,7 @@ TEST_CASE(argmax_test_0)
auto dl = p.add_literal(migraphx::literal{data_shape, data});
p.add_instruction(migraphx::op::argmax{0}, dl);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<int64_t> result_vec;
result.visit([&](auto output) { result_vec.assign(output.begin(), output.end()); });
......@@ -1314,7 +1374,25 @@ TEST_CASE(argmax_test_1)
auto dl = p.add_literal(migraphx::literal{data_shape, data});
p.add_instruction(migraphx::op::argmax{1}, dl);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<int64_t> result_vec;
result.visit([&](auto output) { result_vec.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(result_vec, res_gold));
}
TEST_CASE(argmax_test_neg_2)
{
migraphx::program p;
std::vector<float> data = {1.2255, 1.6834, -2.0305, -0.3221, 0.4701, 0.2583, 0.7545, 2.5758,
-1.6849, 0.0928, 0.9022, -0.8765, -0.4090, 0.9301, 2.0724, -1.5706,
0.4867, -0.1493, 0.6957, -0.2179, 0.7142, 0.7177, 0.0183, 1.3497};
std::vector<int64_t> res_gold = {0, 0, 2, 1, 2, 0, 0, 2};
migraphx::shape data_shape{migraphx::shape::float_type, {2, 3, 4}};
auto dl = p.add_literal(migraphx::literal{data_shape, data});
p.add_instruction(migraphx::op::argmax{-2}, dl);
p.compile(migraphx::cpu::target{});
auto result = p.eval({}).back();
std::vector<int64_t> result_vec;
result.visit([&](auto output) { result_vec.assign(output.begin(), output.end()); });
......@@ -1332,7 +1410,7 @@ TEST_CASE(argmax_test_2)
auto dl = p.add_literal(migraphx::literal{data_shape, data});
p.add_instruction(migraphx::op::argmax{2}, dl);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<int64_t> result_vec;
result.visit([&](auto output) { result_vec.assign(output.begin(), output.end()); });
......@@ -1350,7 +1428,7 @@ TEST_CASE(argmin_test_0)
auto dl = p.add_literal(migraphx::literal{data_shape, data});
p.add_instruction(migraphx::op::argmin{0}, dl);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<int64_t> result_vec;
result.visit([&](auto output) { result_vec.assign(output.begin(), output.end()); });
......@@ -1368,7 +1446,7 @@ TEST_CASE(argmin_test_1)
auto dl = p.add_literal(migraphx::literal{data_shape, data});
p.add_instruction(migraphx::op::argmin{1}, dl);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<int64_t> result_vec;
result.visit([&](auto output) { result_vec.assign(output.begin(), output.end()); });
......@@ -1386,7 +1464,25 @@ TEST_CASE(argmin_test_2)
auto dl = p.add_literal(migraphx::literal{data_shape, data});
p.add_instruction(migraphx::op::argmin{2}, dl);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<int64_t> result_vec;
result.visit([&](auto output) { result_vec.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(result_vec, res_gold));
}
TEST_CASE(argmin_test_neg_1)
{
migraphx::program p;
std::vector<float> data = {1.2255, 1.6834, -2.0305, -0.3221, 0.4701, 0.2583, 0.7545, 2.5758,
-1.6849, 0.0928, 0.9022, -0.8765, -0.4090, 0.9301, 2.0724, -1.5706,
0.4867, -0.1493, 0.6957, -0.2179, 0.7142, 0.7177, 0.0183, 1.3497};
std::vector<int64_t> res_gold = {2, 1, 0, 3, 3, 2};
migraphx::shape data_shape{migraphx::shape::float_type, {2, 3, 4}};
auto dl = p.add_literal(migraphx::literal{data_shape, data});
p.add_instruction(migraphx::op::argmin{-1}, dl);
p.compile(migraphx::cpu::target{});
auto result = p.eval({}).back();
std::vector<int64_t> result_vec;
result.visit([&](auto output) { result_vec.assign(output.begin(), output.end()); });
......@@ -1449,7 +1545,7 @@ TEST_CASE(conv2d_test)
p.add_instruction(migraphx::op::convolution{}, al, cl);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(16);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
......@@ -1505,7 +1601,7 @@ TEST_CASE(conv2d_padding_test)
p.add_instruction(migraphx::op::convolution{{{1, 1}}, {{1, 1}}}, al, cl);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(64);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
......@@ -1566,7 +1662,7 @@ TEST_CASE(conv2d_padding_stride_test)
p.add_instruction(migraphx::op::convolution{{{1, 1}}, {{2, 2}}}, al, cl);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(16);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
......@@ -1588,7 +1684,7 @@ TEST_CASE(quant_conv2d_test)
p.add_instruction(migraphx::op::quant_convolution{}, al, cl);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<int32_t> s = {10197,
10548,
......@@ -1625,7 +1721,7 @@ TEST_CASE(quant_conv2d_padding_test)
auto cl = p.add_literal(migraphx::literal{c_shape, c});
p.add_instruction(migraphx::op::quant_convolution{{{1, 1}}, {{1, 1}}}, al, cl);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<int32_t> s = {
4521, 6753, 7014, 4635, 6858, 10197, 10548, 6939, 7830, 11601, 11952, 7839, 5007,
7383, 7590, 4953, 10515, 15987, 16734, 11277, 16821, 25506, 26586, 17874, 19737, 29826,
......@@ -1651,7 +1747,7 @@ TEST_CASE(quant_conv2d_padding_stride_test)
auto cl = p.add_literal(migraphx::literal{c_shape, c});
p.add_instruction(migraphx::op::quant_convolution{{{1, 1}}, {{2, 2}}}, al, cl);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<int32_t> s = {4521,
7014,
......@@ -1674,6 +1770,28 @@ TEST_CASE(quant_conv2d_padding_stride_test)
EXPECT(migraphx::verify_range(results_vector, s));
}
TEST_CASE(deconv_test)
{
migraphx::shape s{migraphx::shape::float_type, {1, 1, 3, 3}};
std::vector<float> x_data{0, 1, 2, 3, 4, 5, 6, 7, 8};
std::vector<float> w_data{1, 1, 1, 1, 1, 1, 1, 1, 1};
std::vector<float> gold{0, 1, 3, 3, 2, 3, 8, 15, 12, 7, 9, 21, 36,
27, 15, 9, 20, 33, 24, 13, 6, 13, 21, 15, 8};
migraphx::program p;
auto x = p.add_literal(migraphx::literal{s, x_data});
auto w = p.add_literal(migraphx::literal{s, w_data});
p.add_instruction(migraphx::op::deconvolution{}, x, w);
p.compile(migraphx::cpu::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(transpose_test)
{
migraphx::shape a_shape{migraphx::shape::float_type, {1, 2, 2, 3}};
......@@ -1686,7 +1804,7 @@ TEST_CASE(transpose_test)
std::vector<int64_t> perm = {0, 3, 1, 2};
p.add_instruction(migraphx::op::transpose{perm}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
result.visit([&](auto output) {
std::vector<size_t> new_lens = {1, 3, 2, 2};
......@@ -1700,7 +1818,7 @@ TEST_CASE(transpose_test)
auto result = p.add_instruction(migraphx::op::transpose{perm}, l);
p.add_instruction(migraphx::op::contiguous{}, result);
p.compile(migraphx::cpu::target{});
auto result2 = p.eval({});
auto result2 = p.eval({}).back();
std::vector<float> results_vector(12);
result2.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
......@@ -1719,7 +1837,7 @@ TEST_CASE(contiguous_test)
auto l = p.add_literal(migraphx::literal{a_shape, data});
p.add_instruction(migraphx::op::contiguous{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(12);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
......@@ -1736,7 +1854,7 @@ TEST_CASE(identity_test)
auto l = p.add_literal(migraphx::literal{s, data});
p.add_instruction(migraphx::op::identity{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<int> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(std::equal(data.begin(), data.end(), results_vector.begin()));
......@@ -1749,7 +1867,7 @@ TEST_CASE(abs_test)
auto l = p.add_literal(migraphx::literal{s, {-1, 2, -3, 4}});
p.add_instruction(migraphx::op::abs{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{1, 2, 3, 4};
......@@ -1763,7 +1881,7 @@ TEST_CASE(sigmoid_test)
auto l = p.add_literal(migraphx::literal{s, {-1, 2, -3, 4}});
p.add_instruction(migraphx::op::sigmoid{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{sigmoid(-1), sigmoid(2), sigmoid(-3), sigmoid(4)};
......@@ -1777,7 +1895,7 @@ TEST_CASE(sinh_test)
auto l = p.add_literal(migraphx::literal{s, {-1.0, 2.0, -3.0, 4.0}});
p.add_instruction(migraphx::op::sinh{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{sinhf(-1), sinhf(2), sinhf(-3), sinhf(4)};
......@@ -1791,7 +1909,7 @@ TEST_CASE(cosh_test)
auto l = p.add_literal(migraphx::literal{s, {-1.0, 2.0, -3.0, 4.0}});
p.add_instruction(migraphx::op::cosh{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{coshf(-1), coshf(2), coshf(-3), coshf(4)};
......@@ -1805,7 +1923,7 @@ TEST_CASE(tanh_test)
auto l = p.add_literal(migraphx::literal{s, {-1.0, 2.0, -3.0, 4.0}});
p.add_instruction(migraphx::op::tanh{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{tanhf(-1), tanhf(2), tanhf(-3), tanhf(4)};
......@@ -1820,7 +1938,7 @@ TEST_CASE(elu_test)
float alpha = 0.5;
p.add_instruction(migraphx::op::elu{alpha}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{elu(alpha, -1), elu(alpha, 2), elu(alpha, -3), elu(alpha, 4)};
......@@ -1837,7 +1955,7 @@ TEST_CASE(max_test)
auto curr_max = p.add_instruction(migraphx::op::max{}, l0, l1);
p.add_instruction(migraphx::op::max{}, curr_max, l2);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{7, 8, 9};
......@@ -1854,7 +1972,7 @@ TEST_CASE(min_test)
auto curr_min = p.add_instruction(migraphx::op::min{}, l0, l1);
p.add_instruction(migraphx::op::min{}, curr_min, l2);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{1, 4, 3};
......@@ -1868,7 +1986,7 @@ TEST_CASE(pad_test)
auto l0 = p.add_literal(migraphx::literal{s, {1, 2, 3, 4}});
p.add_instruction(migraphx::op::pad{{1, 1, 1, 1}}, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(16);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{0, 0, 0, 0, 0, 1, 2, 0, 0, 3, 4, 0, 0, 0, 0, 0};
......@@ -1886,7 +2004,7 @@ TEST_CASE(fp16_test)
auto l1 = p.add_literal(migraphx::literal{s, {b}});
p.add_instruction(migraphx::op::add{}, l0, l1);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<migraphx::half> results_vector(1);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<migraphx::half> gold{c};
......@@ -1911,7 +2029,7 @@ TEST_CASE(fp32_fp16_test)
auto p = create_program();
migraphx::quantize_fp16(p, op_names);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> res;
result.visit([&](auto output) { res.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(res, gold_res));
......@@ -1931,13 +2049,28 @@ TEST_CASE(clip_test)
op.min_val = 0.0;
p.add_instruction(op, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0.0, 0.0, 6.0};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(reduce_prod_axis0)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {4, 2, 2}};
auto input = migraphx::literal{s, {1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 3, 2, 3}};
auto l0 = p.add_literal(input);
p.add_instruction(migraphx::op::reduce_prod{{0}}, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{6, 18, 12, 18};
EXPECT(results_vector == gold);
}
TEST_CASE(reduce_sum_axis0)
{
migraphx::program p;
......@@ -1946,7 +2079,7 @@ TEST_CASE(reduce_sum_axis0)
auto l0 = p.add_literal(input);
p.add_instruction(migraphx::op::reduce_sum{{0}}, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{15, 18, 21, 24};
......@@ -1961,7 +2094,7 @@ TEST_CASE(reduce_sum_axis1)
auto l0 = p.add_literal(input);
p.add_instruction(migraphx::op::reduce_sum{{1}}, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{4, 6, 12, 14, 20, 22};
......@@ -1976,7 +2109,7 @@ TEST_CASE(reduce_sum_axis2)
auto l0 = p.add_literal(input);
p.add_instruction(migraphx::op::reduce_sum{{2}}, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{3, 7, 11, 15, 19, 23};
......@@ -1991,7 +2124,7 @@ TEST_CASE(reduce_sum_axis02)
auto l0 = p.add_literal(input);
p.add_instruction(migraphx::op::reduce_sum{{0, 2}}, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{33, 45};
......@@ -2006,7 +2139,7 @@ TEST_CASE(reduce_sum_axis12)
auto l0 = p.add_literal(input);
p.add_instruction(migraphx::op::reduce_sum{{1, 2}}, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{10, 26, 42};
......@@ -2020,7 +2153,7 @@ TEST_CASE(rsqrt_test)
auto l = p.add_literal(migraphx::literal{s, {4.0, 16.0, 64.0}});
p.add_instruction(migraphx::op::rsqrt{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0.5, 0.25, 0.125};
......@@ -2035,7 +2168,7 @@ TEST_CASE(reduce_mean_axis1)
auto l0 = p.add_literal(input);
p.add_instruction(migraphx::op::reduce_mean{{1}}, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{2, 3, 6, 7, 10, 11};
......@@ -2050,7 +2183,7 @@ TEST_CASE(reduce_mean_axis2)
auto l0 = p.add_literal(input);
p.add_instruction(migraphx::op::reduce_mean{{2}}, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{1.5f, 3.5f, 5.5f, 7.5f, 9.5f, 11.5f};
......@@ -2065,7 +2198,7 @@ TEST_CASE(reduce_mean_axis02)
auto l0 = p.add_literal(input);
p.add_instruction(migraphx::op::reduce_mean{{0, 2}}, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{5.5, 7.5};
......@@ -2080,7 +2213,7 @@ TEST_CASE(reduce_mean_axis12)
auto l0 = p.add_literal(input);
p.add_instruction(migraphx::op::reduce_mean{{1, 2}}, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{2.5f, 6.5f, 10.5f};
......@@ -2095,7 +2228,7 @@ TEST_CASE(reduce_mean_int)
auto l0 = p.add_literal(input);
p.add_instruction(migraphx::op::reduce_mean{{1, 2}}, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<int> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<int> gold{2, 6, 10};
......@@ -2110,7 +2243,7 @@ TEST_CASE(reduce_min_axis1)
auto l0 = p.add_literal(input);
p.add_instruction(migraphx::op::reduce_min{{1}}, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{1, 2, 5, 6, 9, 10};
......@@ -2125,7 +2258,7 @@ TEST_CASE(reduce_min_axis02)
auto l0 = p.add_literal(input);
p.add_instruction(migraphx::op::reduce_min{{0, 2}}, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{1, 3};
......@@ -2140,7 +2273,7 @@ TEST_CASE(reduce_min_axis12)
auto l0 = p.add_literal(input);
p.add_instruction(migraphx::op::reduce_min{{1, 2}}, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{1, 5, 9};
......@@ -2155,7 +2288,7 @@ TEST_CASE(reduce_max_axis0)
auto l0 = p.add_literal(input);
p.add_instruction(migraphx::op::reduce_max{{0}}, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{9, 10, 11, 12};
......@@ -2170,7 +2303,7 @@ TEST_CASE(reduce_max_axis01)
auto l0 = p.add_literal(input);
p.add_instruction(migraphx::op::reduce_max{{0, 1}}, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{11, 12};
......@@ -2185,7 +2318,7 @@ TEST_CASE(reduce_max_axis02)
auto l0 = p.add_literal(input);
p.add_instruction(migraphx::op::reduce_max{{0, 2}}, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{10, 12};
......@@ -2200,7 +2333,7 @@ TEST_CASE(sqdiff_test)
auto l2 = p.add_literal(migraphx::literal{s, {1, 2, 3}});
p.add_instruction(migraphx::op::sqdiff{}, l1, l2);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {4, 4, 4};
......@@ -2214,7 +2347,7 @@ TEST_CASE(round_test)
auto l = p.add_literal(migraphx::literal{s, {1.1, 1.5, 1.6, -1.1, -1.5, -1.6, 0.0, 2.0, -2.0}});
p.add_instruction(migraphx::op::round{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {1.0, 2.0, 2.0, -1.0, -2.0, -2.0, 0.0, 2.0, -2.0};
......@@ -2228,7 +2361,7 @@ TEST_CASE(ceil_test)
auto l = p.add_literal(migraphx::literal{s, {1.1, 1.5, 1.6, -1.1, -1.5, -1.6, 0.0, 2.0, -2.0}});
p.add_instruction(migraphx::op::ceil{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {2.0, 2.0, 2.0, -1.0, -1.0, -1.0, 0.0, 2.0, -2.0};
......@@ -2242,7 +2375,7 @@ TEST_CASE(floor_test)
auto l = p.add_literal(migraphx::literal{s, {1.1, 1.5, 0.6, -1.1, -1.5, -0.6, 0.0, 2.0, -2.0}});
p.add_instruction(migraphx::op::floor{}, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {1.0, 1.0, 0.0, -2.0, -2.0, -1.0, -0.0, 2.0, -2.0};
......@@ -2274,8 +2407,8 @@ TEST_CASE(op_capture)
p.compile(migraphx::cpu::target{});
capture_p.compile(migraphx::cpu::target{});
auto cap_res = capture_p.eval({});
auto res = p.eval({});
auto cap_res = capture_p.eval({}).back();
auto res = p.eval({}).back();
std::vector<float> vec;
std::vector<float> cap_vec;
......
......@@ -83,7 +83,7 @@ TEST_CASE(rnn_forward)
und,
ih);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -128,7 +128,7 @@ TEST_CASE(rnn_forward)
p.add_instruction(migraphx::op::rnn_last_output{}, out_hs);
p.compile(migraphx::cpu::target{});
auto last_output = p.eval({});
auto last_output = p.eval({}).back();
std::vector<float> last_output_data;
last_output.visit([&](auto out) { last_output_data.assign(out.begin(), out.end()); });
......@@ -165,7 +165,7 @@ TEST_CASE(rnn_forward)
p.add_instruction(migraphx::op::rnn_last_output{}, out_hs);
p.compile(migraphx::cpu::target{});
auto last_output = p.eval({});
auto last_output = p.eval({}).back();
std::vector<float> last_output_data;
last_output.visit([&](auto out) { last_output_data.assign(out.begin(), out.end()); });
......@@ -195,7 +195,7 @@ TEST_CASE(rnn_forward)
p.add_instruction(migraphx::op::rnn_last_output{}, out_hs);
p.compile(migraphx::cpu::target{});
auto last_output = p.eval({});
auto last_output = p.eval({}).back();
std::vector<float> last_output_data;
last_output.visit([&](auto out) { last_output_data.assign(out.begin(), out.end()); });
......@@ -230,7 +230,7 @@ TEST_CASE(rnn_forward)
und,
ih);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -311,7 +311,7 @@ TEST_CASE(rnn_reverse)
und,
ih);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -356,7 +356,7 @@ TEST_CASE(rnn_reverse)
p.add_instruction(migraphx::op::rnn_last_output{}, out_hs);
p.compile(migraphx::cpu::target{});
auto last_output = p.eval({});
auto last_output = p.eval({}).back();
std::vector<float> last_output_data;
last_output.visit([&](auto out) { last_output_data.assign(out.begin(), out.end()); });
......@@ -436,7 +436,7 @@ TEST_CASE(rnn_bidirectional)
und,
ih);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -475,7 +475,7 @@ TEST_CASE(rnn_bidirectional)
p.add_instruction(migraphx::op::rnn_last_output{}, out_hs);
p.compile(migraphx::cpu::target{});
auto last_output = p.eval({});
auto last_output = p.eval({}).back();
std::vector<float> last_output_data;
last_output.visit([&](auto out) { last_output_data.assign(out.begin(), out.end()); });
......@@ -520,7 +520,7 @@ TEST_CASE(rnn_bidirectional)
p.add_instruction(migraphx::op::rnn_last_output{}, out_hs);
p.compile(migraphx::cpu::target{});
auto last_output = p.eval({});
auto last_output = p.eval({}).back();
std::vector<float> last_output_data;
last_output.visit([&](auto out) { last_output_data.assign(out.begin(), out.end()); });
......@@ -560,7 +560,7 @@ TEST_CASE(rnn_bidirectional)
r);
p.compile(migraphx::cpu::target{});
auto last_output = p.eval({});
auto last_output = p.eval({}).back();
std::vector<float> last_output_data;
last_output.visit([&](auto out) { last_output_data.assign(out.begin(), out.end()); });
......@@ -596,7 +596,7 @@ TEST_CASE(rnn_bidirectional)
und,
ih);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -699,7 +699,7 @@ TEST_CASE(gru_forward)
ih);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -736,7 +736,7 @@ TEST_CASE(gru_forward)
ih);
p.add_instruction(migraphx::op::rnn_last_output{}, concat_hs);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -778,7 +778,7 @@ TEST_CASE(gru_forward)
p.add_instruction(migraphx::op::rnn_last_output{}, concat_hs);
p.add_instruction(migraphx::op::rnn_last_output{}, concat_hs);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -819,7 +819,7 @@ TEST_CASE(gru_forward)
ih);
p.add_instruction(migraphx::op::rnn_last_output{}, concat_hs);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -911,7 +911,7 @@ TEST_CASE(gru_forward_args)
r);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -943,7 +943,7 @@ TEST_CASE(gru_forward_args)
bias);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -978,7 +978,7 @@ TEST_CASE(gru_forward_args)
ih);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -1069,7 +1069,7 @@ TEST_CASE(gru_forward_actv_funcs)
ih);
p.add_instruction(migraphx::op::rnn_last_output{}, concat_hs);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -1108,7 +1108,7 @@ TEST_CASE(gru_forward_actv_funcs)
und,
ih);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -1142,7 +1142,7 @@ TEST_CASE(gru_forward_actv_funcs)
ih);
p.add_instruction(migraphx::op::rnn_last_output{}, concat_hs);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -1186,7 +1186,7 @@ TEST_CASE(gru_forward_actv_funcs)
ih);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -1284,7 +1284,7 @@ TEST_CASE(gru_reverse)
ih);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -1321,7 +1321,7 @@ TEST_CASE(gru_reverse)
ih);
p.add_instruction(migraphx::op::rnn_last_output{}, concat_hs);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -1362,7 +1362,7 @@ TEST_CASE(gru_reverse)
ih);
p.add_instruction(migraphx::op::rnn_last_output{}, concat_hs);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -1398,7 +1398,7 @@ TEST_CASE(gru_reverse)
und,
ih);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -1438,7 +1438,7 @@ TEST_CASE(gru_reverse)
ih);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -1554,7 +1554,7 @@ TEST_CASE(gru_bidirectional)
ih);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -1595,7 +1595,7 @@ TEST_CASE(gru_bidirectional)
ih);
p.add_instruction(migraphx::op::rnn_last_output{}, concat_hs);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -1630,7 +1630,7 @@ TEST_CASE(gru_bidirectional)
ih);
p.add_instruction(migraphx::op::rnn_last_output{}, concat_hs);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -1734,7 +1734,7 @@ TEST_CASE(gru_bidirectional_args)
r);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -1770,7 +1770,7 @@ TEST_CASE(gru_bidirectional_args)
bias);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -1809,7 +1809,7 @@ TEST_CASE(gru_bidirectional_args)
ih);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -1921,7 +1921,7 @@ TEST_CASE(gru_bidirectional_actv_funcs)
ih);
p.add_instruction(migraphx::op::rnn_last_output{}, concat_hs);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -1954,7 +1954,7 @@ TEST_CASE(gru_bidirectional_actv_funcs)
und,
ih);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -1991,7 +1991,7 @@ TEST_CASE(gru_bidirectional_actv_funcs)
und,
ih);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -2032,7 +2032,7 @@ TEST_CASE(gru_bidirectional_actv_funcs)
ih);
p.add_instruction(migraphx::op::rnn_last_output{}, concat_hs);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -2068,7 +2068,7 @@ TEST_CASE(gru_bidirectional_actv_funcs)
ih);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -2112,7 +2112,7 @@ TEST_CASE(gru_bidirectional_actv_funcs)
ih);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -2236,7 +2236,7 @@ TEST_CASE(lstm_forward)
und);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -2280,7 +2280,7 @@ TEST_CASE(lstm_forward)
p.add_instruction(migraphx::op::rnn_last_output{}, hs);
p.compile(migraphx::cpu::target{});
auto last_hs = p.eval({});
auto last_hs = p.eval({}).back();
std::vector<float> output_data;
last_hs.visit([&](auto output) { output_data.assign(output.begin(), output.end()); });
......@@ -2328,7 +2328,7 @@ TEST_CASE(lstm_forward)
p.add_instruction(migraphx::op::lstm_last_cell_output{}, hs);
p.compile(migraphx::cpu::target{});
auto last_hs = p.eval({});
auto last_hs = p.eval({}).back();
std::vector<float> output_data;
last_hs.visit([&](auto output) { output_data.assign(output.begin(), output.end()); });
......@@ -2449,7 +2449,7 @@ TEST_CASE(lstm_forward_more)
r);
p.compile(migraphx::cpu::target{});
auto last_hs = p.eval({});
auto last_hs = p.eval({}).back();
std::vector<float> output_data;
last_hs.visit([&](auto output) { output_data.assign(output.begin(), output.end()); });
......@@ -2493,7 +2493,7 @@ TEST_CASE(lstm_forward_more)
pph);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -2542,7 +2542,7 @@ TEST_CASE(lstm_forward_more)
p.add_instruction(migraphx::op::rnn_last_output{}, hs);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> hs_data;
hs_concat.visit([&](auto output) { hs_data.assign(output.begin(), output.end()); });
......@@ -2672,7 +2672,7 @@ TEST_CASE(lstm_reverse)
ic,
pph);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> output_data;
hs_concat.visit([&](auto output) { output_data.assign(output.begin(), output.end()); });
std::vector<float> output_data_gold{
......@@ -2704,7 +2704,7 @@ TEST_CASE(lstm_reverse)
p.add_instruction(migraphx::op::lstm_last_cell_output{}, hs);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> output_data;
hs_concat.visit([&](auto output) { output_data.assign(output.begin(), output.end()); });
std::vector<float> output_data_gold{-0.443077,
......@@ -2736,7 +2736,7 @@ TEST_CASE(lstm_reverse)
p.add_instruction(migraphx::op::lstm_last_cell_output{}, hs);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> output_data;
hs_concat.visit([&](auto output) { output_data.assign(output.begin(), output.end()); });
std::vector<float> output_data_gold{-0.443077,
......@@ -2753,8 +2753,91 @@ TEST_CASE(lstm_reverse)
0.667298};
EXPECT(migraphx::verify_range(output_data, output_data_gold));
}
}
// lstm activation function test
TEST_CASE(lstm_reverse_actv)
{
std::size_t batch_size = 3;
std::size_t seq_len = 4;
std::size_t hidden_size = 4;
std::size_t input_size = 3;
std::size_t num_dirct = 1;
std::vector<float> w_data{
-0.2763, -0.4715, -0.3010, -0.2306, -0.2283, -0.2656, 0.2035, 0.3570, -0.1499, 0.4390,
-0.1843, 0.2351, 0.3357, 0.1217, 0.1401, 0.3300, -0.0429, 0.3266, 0.4834, -0.3914,
-0.1480, 0.3734, -0.0372, -0.1746, 0.0550, 0.4177, -0.1332, 0.4391, -0.3287, -0.4401,
0.1486, 0.1346, 0.1048, -0.4361, 0.0886, -0.3840, -0.2730, -0.1710, 0.3274, 0.0169,
-0.4462, 0.0729, 0.3983, -0.0669, 0.0756, 0.4150, -0.4684, -0.2522};
std::vector<float> r_data{
-0.4564, -0.4432, 0.1605, 0.4387, 0.0034, 0.4116, 0.2824, 0.4775, -0.2729, -0.4707,
0.1363, 0.2218, 0.0559, 0.2828, 0.2093, 0.4687, 0.3794, -0.1069, -0.3049, 0.1430,
-0.2506, 0.4644, 0.2755, -0.3645, -0.3155, 0.1425, 0.2891, 0.1786, -0.3274, 0.2365,
0.2522, -0.4312, -0.0562, -0.2748, 0.0776, -0.3154, 0.2851, -0.3930, -0.1174, 0.4360,
0.2436, 0.0164, -0.0680, 0.3403, -0.2857, -0.0459, -0.2991, -0.2624, 0.4194, -0.3291,
-0.4659, 0.3300, 0.0454, 0.4981, -0.4706, -0.4584, 0.2596, 0.2871, -0.3509, -0.1910,
0.3987, -0.1687, -0.0032, -0.1038};
std::vector<float> bias_data{-0.0258, 0.0073, -0.4780, -0.4101, -0.3556, -0.1017, 0.3632,
-0.1823, 0.1479, 0.1677, -0.2603, 0.0381, 0.1575, 0.1896,
0.4755, -0.4794, 0.2167, -0.4474, -0.3139, 0.1018, 0.4470,
-0.4232, 0.3247, -0.1636, -0.1582, -0.1703, 0.3920, 0.2055,
-0.4386, 0.4208, 0.0717, 0.3789};
std::vector<float> input_data{
-0.5516, 0.2391, -1.6951, -0.4313, -0.9730, -0.2005, 2.3930, -0.5221, -0.1331,
-0.0910, 1.2122, -0.1952, 0.4661, 0.6494, 2.1332, -1.0972, 0.9816, 0.1122,
0.3577, 1.3508, -0.5366, 1.7449, 0.5483, -0.0701, -0.4100, -2.2344, 0.3685,
0.4583, 2.3794, 1.0372, -0.8887, 0.7892, -0.4012, -0.2818, -2.3374, 1.5310};
std::vector<float> ih_data{1.5289,
1.0986,
0.6091,
1.6462,
0.8720,
0.5349,
-0.1962,
-1.7416,
-0.9912,
1.2831,
1.0896,
-0.6959};
// reverse, 3 args, 1 actv function
std::vector<float> ic_data{-0.8323,
0.3998,
0.1831,
0.5938,
2.7096,
-0.1790,
0.0022,
-0.8040,
0.1578,
0.0567,
0.8069,
-0.5141};
std::vector<float> pph_data{-0.8271,
-0.5683,
0.4562,
-1.2545,
1.2729,
-0.4082,
-0.4392,
-0.9406,
0.7794,
1.8194,
-0.5811,
0.2166};
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
migraphx::shape w_shape{migraphx::shape::float_type, {num_dirct, 4 * hidden_size, input_size}};
migraphx::shape r_shape{migraphx::shape::float_type, {num_dirct, 4 * hidden_size, hidden_size}};
migraphx::shape b_shape{migraphx::shape::float_type, {num_dirct, 8 * hidden_size}};
migraphx::shape ih_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}};
migraphx::shape ic_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}};
migraphx::shape pph_shape{migraphx::shape::float_type, {num_dirct, 3 * hidden_size}};
float clip = 0.0f;
{
migraphx::program p;
auto seq = p.add_literal(migraphx::literal{in_shape, input_data});
......@@ -2770,7 +2853,7 @@ TEST_CASE(lstm_reverse)
w,
r);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> output_data;
hs_concat.visit([&](auto output) { output_data.assign(output.begin(), output.end()); });
std::vector<float> output_data_gold{
......@@ -2801,7 +2884,7 @@ TEST_CASE(lstm_reverse)
r);
p.add_instruction(migraphx::op::rnn_last_output{}, hs);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> output_data;
hs_concat.visit([&](auto output) { output_data.assign(output.begin(), output.end()); });
std::vector<float> output_data_gold{-0.132123,
......@@ -2842,7 +2925,7 @@ TEST_CASE(lstm_reverse)
w,
r);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> output_data;
hs_concat.visit([&](auto output) { output_data.assign(output.begin(), output.end()); });
std::vector<float> output_data_gold{-0.104351,
......@@ -2961,7 +3044,7 @@ TEST_CASE(lstm_bidirectional)
ic,
pph);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> output_data;
hs_concat.visit([&](auto output) { output_data.assign(output.begin(), output.end()); });
std::vector<float> output_data_gold{
......@@ -3010,7 +3093,7 @@ TEST_CASE(lstm_bidirectional)
pph);
p.add_instruction(migraphx::op::rnn_last_output{}, hs);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> output_data;
hs_concat.visit([&](auto output) { output_data.assign(output.begin(), output.end()); });
std::vector<float> output_data_gold{
......@@ -3048,7 +3131,7 @@ TEST_CASE(lstm_bidirectional)
pph);
p.add_instruction(migraphx::op::lstm_last_cell_output{}, hs);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> output_data;
hs_concat.visit([&](auto output) { output_data.assign(output.begin(), output.end()); });
std::vector<float> output_data_gold{
......@@ -3075,7 +3158,7 @@ TEST_CASE(lstm_bidirectional)
w,
r);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> output_data;
hs_concat.visit([&](auto output) { output_data.assign(output.begin(), output.end()); });
std::vector<float> output_data_gold{
......@@ -3117,7 +3200,7 @@ TEST_CASE(lstm_bidirectional)
w,
r);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> output_data;
hs_concat.visit([&](auto output) { output_data.assign(output.begin(), output.end()); });
std::vector<float> output_data_gold{
......@@ -3186,7 +3269,7 @@ TEST_CASE(lstm_bidirectional_actv_func)
w,
r);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> output_data;
hs_concat.visit([&](auto output) { output_data.assign(output.begin(), output.end()); });
std::vector<float> output_data_gold{
......@@ -3222,7 +3305,7 @@ TEST_CASE(lstm_bidirectional_actv_func)
w,
r);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> output_data;
hs_concat.visit([&](auto output) { output_data.assign(output.begin(), output.end()); });
std::vector<float> output_data_gold{
......@@ -3258,7 +3341,7 @@ TEST_CASE(lstm_bidirectional_actv_func)
r);
p.add_instruction(migraphx::op::rnn_last_output{}, hs);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> output_data;
hs_concat.visit([&](auto output) { output_data.assign(output.begin(), output.end()); });
std::vector<float> output_data_gold{
......@@ -3288,7 +3371,7 @@ TEST_CASE(lstm_bidirectional_actv_func)
r);
p.add_instruction(migraphx::op::rnn_last_output{}, hs);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> output_data;
hs_concat.visit([&](auto output) { output_data.assign(output.begin(), output.end()); });
std::vector<float> output_data_gold{
......@@ -3319,7 +3402,7 @@ TEST_CASE(lstm_bidirectional_actv_func)
r);
p.add_instruction(migraphx::op::rnn_last_output{}, hs);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> output_data;
hs_concat.visit([&](auto output) { output_data.assign(output.begin(), output.end()); });
std::vector<float> output_data_gold{
......@@ -3351,7 +3434,7 @@ TEST_CASE(lstm_bidirectional_actv_func)
r);
p.add_instruction(migraphx::op::rnn_last_output{}, hs);
p.compile(migraphx::cpu::target{});
auto hs_concat = p.eval({});
auto hs_concat = p.eval({}).back();
std::vector<float> output_data;
hs_concat.visit([&](auto output) { output_data.assign(output.begin(), output.end()); });
std::vector<float> output_data_gold{
......
......@@ -21,7 +21,7 @@ TEST_CASE(simple_test)
auto count = std::distance(p.begin(), p.end());
run_pass(p);
EXPECT(std::distance(p.begin(), p.end()) == count);
auto result = p.eval({});
auto result = p.eval({}).back();
EXPECT(result == migraphx::literal{3});
EXPECT(result != migraphx::literal{4});
}
......@@ -37,7 +37,7 @@ TEST_CASE(simple_test_nop)
auto count = std::distance(p.begin(), p.end());
run_pass(p);
EXPECT(std::distance(p.begin(), p.end()) == count);
auto result = p.eval({});
auto result = p.eval({}).back();
EXPECT(result == migraphx::literal{3});
EXPECT(result != migraphx::literal{4});
}
......@@ -53,7 +53,7 @@ TEST_CASE(simple_test_nop2)
p.add_instruction(nop{});
run_pass(p);
EXPECT(std::distance(p.begin(), p.end()) == 2);
auto result = p.eval({});
auto result = p.eval({}).back();
EXPECT(result == migraphx::literal{});
EXPECT(result != migraphx::literal{4});
}
......@@ -69,7 +69,7 @@ TEST_CASE(duplicate_test1)
auto count = std::distance(p.begin(), p.end());
run_pass(p);
EXPECT(std::distance(p.begin(), p.end()) == (count - 1));
auto result = p.eval({});
auto result = p.eval({}).back();
EXPECT(result == migraphx::literal{3});
EXPECT(result != migraphx::literal{4});
}
......@@ -86,7 +86,7 @@ TEST_CASE(duplicate_test2)
auto count = std::distance(p.begin(), p.end());
run_pass(p);
EXPECT(std::distance(p.begin(), p.end()) == (count - 2));
auto result = p.eval({});
auto result = p.eval({}).back();
EXPECT(result == migraphx::literal{3});
EXPECT(result != migraphx::literal{4});
}
......@@ -105,7 +105,7 @@ TEST_CASE(depth_test)
auto count = std::distance(p.begin(), p.end());
run_pass(p);
EXPECT(std::distance(p.begin(), p.end()) == (count - 4));
auto result = p.eval({});
auto result = p.eval({}).back();
EXPECT(result == migraphx::literal{3});
EXPECT(result != migraphx::literal{4});
}
......@@ -122,7 +122,7 @@ TEST_CASE(undefined_test)
run_pass(p);
EXPECT(std::distance(p.begin(), p.end()) == count - 1);
EXPECT(not p.has_instruction(undef));
auto result = p.eval({});
auto result = p.eval({}).back();
EXPECT(result == migraphx::literal{3});
EXPECT(result != migraphx::literal{4});
}
......@@ -139,7 +139,7 @@ TEST_CASE(duplicate_args1)
run_pass(p);
EXPECT(std::distance(p.begin(), p.end()) != count);
EXPECT(std::distance(p.begin(), p.end()) == 2);
auto result = p.eval({});
auto result = p.eval({}).back();
EXPECT(result == migraphx::literal{0});
}
......@@ -156,7 +156,7 @@ TEST_CASE(duplicate_args2)
run_pass(p);
EXPECT(std::distance(p.begin(), p.end()) != count);
EXPECT(std::distance(p.begin(), p.end()) == 2);
auto result = p.eval({});
auto result = p.eval({}).back();
EXPECT(result == migraphx::literal{0});
}
......@@ -174,7 +174,7 @@ TEST_CASE(duplicate_args3)
run_pass(p);
EXPECT(std::distance(p.begin(), p.end()) != count);
EXPECT(std::distance(p.begin(), p.end()) == 2);
auto result = p.eval({});
auto result = p.eval({}).back();
EXPECT(result == migraphx::literal{0});
}
......
......@@ -49,7 +49,7 @@ TEST_CASE(basic)
p.add_instruction(pass_op{}, a3, p2);
run_pass(p);
EXPECT(p.get_shape() == migraphx::shape{migraphx::shape::float_type, {200}});
EXPECT(p.get_output_shapes().back() == migraphx::shape{migraphx::shape::float_type, {200}});
EXPECT(p.get_parameter_shape("memory").bytes() == (8 * 4 + 40 * 4 + 200 * 4));
}
......@@ -66,7 +66,7 @@ TEST_CASE(aligned)
p.add_instruction(pass_op{}, a3, p2);
run_pass(p);
EXPECT(p.get_shape() == migraphx::shape{migraphx::shape::float_type, {200}});
EXPECT(p.get_output_shapes().back() == migraphx::shape{migraphx::shape::float_type, {200}});
EXPECT(p.get_parameter_shape("memory").bytes() == (32 + 32 + 200 * 4));
}
......@@ -83,7 +83,7 @@ TEST_CASE(unaligned)
p.add_instruction(pass_op{}, a3, p2);
run_pass(p, 1);
EXPECT(p.get_shape() == migraphx::shape{migraphx::shape::float_type, {200}});
EXPECT(p.get_output_shapes().back() == migraphx::shape{migraphx::shape::float_type, {200}});
EXPECT(p.get_parameter_shape("memory").bytes() == (1 * 4 + 2 * 4 + 200 * 4));
}
......@@ -100,7 +100,7 @@ TEST_CASE(float_aligned)
p.add_instruction(pass_op{}, a3, p2);
run_pass(p, 4);
EXPECT(p.get_shape() == migraphx::shape{migraphx::shape::float_type, {200}});
EXPECT(p.get_output_shapes().back() == migraphx::shape{migraphx::shape::float_type, {200}});
EXPECT(p.get_parameter_shape("memory").bytes() == (1 * 4 + 2 * 4 + 200 * 4));
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment