Unverified Commit 77df49b8 authored by Chris Austen's avatar Chris Austen Committed by GitHub
Browse files

Merge branch 'develop' into jenkins_reorder

parents e4e19b1d 60b8b097
......@@ -53,7 +53,6 @@ TEST_CASE(host_same_buffer_copy)
migraphx::parameter_map pp;
std::vector<float> a_vec(ss.elements(), -1);
std::vector<float> b_vec(ss.elements(), 2);
std::vector<float> c_vec(ss.elements(), 0);
pp["a"] = migraphx::argument(ss, a_vec.data());
pp["b"] = migraphx::argument(ss, b_vec.data());
std::vector<float> gpu_result;
......@@ -64,7 +63,8 @@ TEST_CASE(host_same_buffer_copy)
auto result = p.eval(pp).back();
std::vector<float> results_vector(ss.elements(), -1);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify::verify_range(c_vec, results_vector));
std::vector<float> gold_vec(ss.elements(), 0);
EXPECT(migraphx::verify::verify_rms_range(results_vector, gold_vec));
}
TEST_CASE(arguments_lifetime)
......
......@@ -133,7 +133,8 @@ bool verify_mlir(const migraphx::module& mmlir)
auto inputs = generate_params(ref);
auto mlir = create_program_from_mlir(mmlir);
return migraphx::verify_args("mlir", run_ref(ref, inputs), run_gpu(mlir, inputs));
return migraphx::verify_args_with_tolerance(
"mlir", run_gpu(mlir, inputs), migraphx::verify::expected{run_ref(ref, inputs)});
}
TEST_CASE(conv)
......
......@@ -40,7 +40,6 @@
TEST_CASE(gpu_target_copy)
{
migraphx::target gpu_t = migraphx::make_target("gpu");
migraphx::target ref_t = migraphx::make_target("ref");
migraphx::shape s{migraphx::shape::int8_type, {2, 3, 4, 5}};
auto ref_arg_orig = migraphx::generate_argument(s, 0x123456L);
......@@ -52,7 +51,7 @@ TEST_CASE(gpu_target_copy)
std::vector<int8_t> val_final;
ref_arg_final.visit([&](auto v) { val_final.assign(v.begin(), v.end()); });
EXPECT(migraphx::verify::verify_range(val_orig, val_final));
EXPECT(migraphx::verify::verify_rms_range(val_orig, val_final));
}
TEST_CASE(int8_quantization)
......@@ -118,9 +117,12 @@ TEST_CASE(int8_quantization)
// the regular pipeline uses the rewrite_quantization in the much
// earlier stage.
if(migraphx::gpu::mlir_enabled())
EXPECT(migraphx::verify::verify_range(ref_result, gpu_result, 1e5));
EXPECT(migraphx::verify::verify_range_with_tolerance(
gpu_result,
migraphx::verify::expected{ref_result},
migraphx::verify::tolerance{0.01}));
else
EXPECT(migraphx::verify::verify_range(ref_result, gpu_result));
EXPECT(migraphx::verify::verify_rms_range(gpu_result, ref_result));
}
}
......
......@@ -24,16 +24,16 @@
#ifndef MIGRAPHX_GUARD_TEST_INCLUDE_POINTWISE_HPP
#define MIGRAPHX_GUARD_TEST_INCLUDE_POINTWISE_HPP
#include <migraphx/instruction_ref.hpp>
#include <migraphx/program.hpp>
#include <migraphx/module.hpp>
#include <migraphx/make_op.hpp>
template <class F>
migraphx::instruction_ref add_pointwise(migraphx::program& p,
migraphx::module_ref mm,
const std::string& name,
std::vector<migraphx::instruction_ref> inputs,
F f)
migraphx::module_ref create_pointwise_module(migraphx::program& p,
const std::string& name,
std::vector<migraphx::instruction_ref> inputs,
F f)
{
auto* pm = p.create_module(name);
pm->set_bypass();
......@@ -44,6 +44,17 @@ migraphx::instruction_ref add_pointwise(migraphx::program& p,
});
auto r = f(pm, params);
pm->add_return({r});
return pm;
}
template <class F>
migraphx::instruction_ref add_pointwise(migraphx::program& p,
migraphx::module_ref mm,
const std::string& name,
std::vector<migraphx::instruction_ref> inputs,
F f)
{
auto* pm = create_pointwise_module(p, name, inputs, f);
return mm->add_instruction(migraphx::make_op("pointwise"), inputs, {pm});
}
......
const_of_shape_default_test:
6shape"Constant*#
value*:B shape_tensor

shapey"ConstantOfShapeconst_of_shape_default_testb
y



B
\ No newline at end of file
const_of_shape_dyn_int64_test:
=
output_dimsy"ConstantOfShape*
value*:
Bvalueconst_of_shape_dyn_int64_testZ
output_dims

b
y



B
\ No newline at end of file
constant-of-shape:
const_of_shape_int64_test:
6shape"Constant*#
value**B shape_tensor 
value*:B shape_tensor
7
shapey"ConstantOfShape*
value*:
Bvalue constant_of_shapeb
Bvalueconst_of_shape_int64_testb
y




B
B
\ No newline at end of file
constant-of-shape:
!const_of_shape_no_value_attr_test:
6shape"Constant*#
value**B shape_tensor 
value*:B shape_tensor

shapey"ConstantOfShapeconstant_of_shapeb
shapey"ConstantOfShape!const_of_shape_no_value_attr_testb
y



B
B
\ No newline at end of file
......@@ -1007,9 +1007,9 @@ def const_of_shape_empty_input_test():
[10])
empty_val = np.array([]).astype(np.int64)
empty_ts = helper.make_tensor(name='empty_tensor',
data_type=TensorProto.INT32,
data_type=TensorProto.INT64,
dims=empty_val.shape,
vals=empty_val.flatten().astype(int))
vals=empty_val.flatten().astype(np.int64))
shape_const = helper.make_node(
'Constant',
inputs=[],
......@@ -1035,9 +1035,9 @@ def const_of_shape_float_test():
shape_val = np.array([2, 3, 4]).astype(np.int64)
shape_ts = helper.make_tensor(name='shape_tensor',
data_type=TensorProto.INT32,
data_type=TensorProto.INT64,
dims=shape_val.shape,
vals=shape_val.flatten().astype(int))
vals=shape_val.flatten().astype(np.int64))
shape_const = helper.make_node(
'Constant',
......@@ -1055,22 +1055,44 @@ def const_of_shape_float_test():
return ([shape_const, node], [], [y])
@onnx_test()
def const_of_shape_default_test():
shape_val = np.array([2, 3, 4]).astype(np.int64)
shape_ts = helper.make_tensor(name='shape_tensor',
data_type=TensorProto.INT64,
dims=shape_val.shape,
vals=shape_val.flatten().astype(np.int64))
shape_const = helper.make_node(
'Constant',
inputs=[],
outputs=['shape'],
value=shape_ts,
)
y = helper.make_tensor_value_info('y', TensorProto.INT64, [2, 3, 4])
node = onnx.helper.make_node('ConstantOfShape',
inputs=['shape'],
outputs=['y'])
return ([shape_const, node], [], [y])
@onnx_test()
def const_of_shape_int64_test():
tensor_val = onnx.helper.make_tensor('value', onnx.TensorProto.INT64, [1],
[10])
shape_val = np.array([2, 3, 4]).astype(np.int64)
shape_ts = helper.make_tensor(name='shape_tensor',
data_type=TensorProto.INT32,
data_type=TensorProto.INT64,
dims=shape_val.shape,
vals=shape_val.flatten().astype(int))
vals=shape_val.flatten().astype(np.int64))
shape_const = helper.make_node(
'Constant',
inputs=[],
outputs=['shape'],
value=shape_ts,
)
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3, 4])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [2, 3, 4])
node = onnx.helper.make_node('ConstantOfShape',
inputs=['shape'],
......@@ -1084,9 +1106,9 @@ def const_of_shape_int64_test():
def const_of_shape_no_value_attr_test():
shape_val = np.array([2, 3, 4]).astype(np.int64)
shape_ts = helper.make_tensor(name='shape_tensor',
data_type=TensorProto.INT32,
data_type=TensorProto.INT64,
dims=shape_val.shape,
vals=shape_val.flatten().astype(int))
vals=shape_val.flatten().astype(np.int64))
shape_const = helper.make_node(
'Constant',
inputs=[],
......@@ -1104,6 +1126,40 @@ def const_of_shape_no_value_attr_test():
return ([shape_const, node], [], [y])
@onnx_test()
def const_of_shape_dyn_float_test():
tensor_val = onnx.helper.make_tensor('value', onnx.TensorProto.FLOAT, [1],
[10])
output_dims = helper.make_tensor_value_info('output_dims',
TensorProto.INT64, [3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3, 4])
node = onnx.helper.make_node('ConstantOfShape',
inputs=['output_dims'],
outputs=['y'],
value=tensor_val)
return ([node], [output_dims], [y])
@onnx_test()
def const_of_shape_dyn_int64_test():
tensor_val = onnx.helper.make_tensor('value', onnx.TensorProto.INT64, [1],
[10])
output_dims = helper.make_tensor_value_info('output_dims',
TensorProto.INT64, [3])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [2, 3, 4])
node = onnx.helper.make_node('ConstantOfShape',
inputs=['output_dims'],
outputs=['y'],
value=tensor_val)
return ([node], [output_dims], [y])
@onnx_test()
def conv_1d_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5])
......
......@@ -1040,11 +1040,25 @@ TEST_CASE(constant_one_val_int64_test)
EXPECT(p == prog);
}
TEST_CASE(const_of_shape_default_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape output_dims_shape(migraphx::shape::int64_type, {3});
mm->add_literal(migraphx::literal(output_dims_shape, {2, 3, 4}));
migraphx::shape output_shape{migraphx::shape::float_type, {2, 3, 4}};
std::vector<float> vec(output_shape.elements(), 0.0);
mm->add_literal(migraphx::literal(output_shape, vec));
auto prog = optimize_onnx("const_of_shape_default_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(const_of_shape_empty_input_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal(migraphx::literal(migraphx::shape::int32_type));
mm->add_literal(migraphx::literal(migraphx::shape::int64_type));
migraphx::shape s(migraphx::shape::int64_type, {1}, {0});
std::vector<int64_t> vec(s.elements(), 10);
mm->add_literal(migraphx::literal(s, vec));
......@@ -1057,7 +1071,7 @@ TEST_CASE(const_of_shape_float_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ss(migraphx::shape::int32_type, {3});
migraphx::shape ss(migraphx::shape::int64_type, {3});
mm->add_literal(migraphx::literal(ss, {2, 3, 4}));
migraphx::shape s(migraphx::shape::float_type, {2, 3, 4});
std::vector<float> vec(s.elements(), 10.0f);
......@@ -1071,8 +1085,10 @@ TEST_CASE(const_of_shape_int64_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ss(migraphx::shape::int32_type, {3});
// output_dims
migraphx::shape ss(migraphx::shape::int64_type, {3});
mm->add_literal(migraphx::literal(ss, {2, 3, 4}));
// constant shape literal
migraphx::shape s(migraphx::shape::int64_type, {2, 3, 4});
std::vector<int64_t> vec(s.elements(), 10);
mm->add_literal(migraphx::literal(s, vec));
......@@ -1085,7 +1101,7 @@ TEST_CASE(const_of_shape_no_value_attr_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ss(migraphx::shape::int32_type, {3});
migraphx::shape ss(migraphx::shape::int64_type, {3});
mm->add_literal(migraphx::literal(ss, {2, 3, 4}));
migraphx::shape s(migraphx::shape::float_type, {2, 3, 4});
std::vector<float> vec(s.elements(), 0.0f);
......@@ -1095,6 +1111,42 @@ TEST_CASE(const_of_shape_no_value_attr_test)
EXPECT(p == prog);
}
TEST_CASE(const_of_shape_dyn_float_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto od_param =
mm->add_parameter("output_dims", migraphx::shape{migraphx::shape::int64_type, {3}});
auto alloc_ins = mm->add_instruction(
migraphx::make_op("allocate", {{"buf_type", migraphx::shape::float_type}}), od_param);
migraphx::shape dv_shape(migraphx::shape::float_type, {1}, {0});
auto dv_lit = mm->add_literal(migraphx::literal(dv_shape, {10}));
auto fill_ins = mm->add_instruction(migraphx::make_op("fill"), dv_lit, alloc_ins);
mm->add_return({fill_ins});
migraphx::onnx_options options;
auto prog = parse_onnx("const_of_shape_dyn_float_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(const_of_shape_dyn_int64_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto od_param =
mm->add_parameter("output_dims", migraphx::shape{migraphx::shape::int64_type, {3}});
auto alloc_ins = mm->add_instruction(
migraphx::make_op("allocate", {{"buf_type", migraphx::shape::int64_type}}), od_param);
migraphx::shape dv_shape(migraphx::shape::int64_type, {1}, {0});
auto dv_lit = mm->add_literal(migraphx::literal(dv_shape, {10}));
auto fill_ins = mm->add_instruction(migraphx::make_op("fill"), dv_lit, alloc_ins);
mm->add_return({fill_ins});
migraphx::onnx_options options;
auto prog = parse_onnx("const_of_shape_dyn_int64_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_autopad_fail_test)
{
EXPECT(test::throws([&] { optimize_onnx("conv_autopad_fail_test.onnx"); }));
......
This diff is collapsed.
......@@ -88,6 +88,13 @@ TEST_CASE(allocate_static)
expect_shape(out_shape, migraphx::make_op("allocate", {{"shape", to_value(out_shape)}}));
}
TEST_CASE(allocate_static_input_error)
{
migraphx::shape input{migraphx::shape::int64_type, {3}};
migraphx::shape out_shape{migraphx::shape::float_type, {2, 3, 4}};
expect_shape(out_shape, migraphx::make_op("allocate", {{"shape", to_value(out_shape)}}), input);
}
TEST_CASE(allocate_dyn)
{
migraphx::shape input{migraphx::shape::int64_type, {2}};
......@@ -109,6 +116,14 @@ TEST_CASE(allocate_dyn_with_shape_attr)
input);
}
TEST_CASE(allocate_dyn_no_input_error)
{
migraphx::shape shape_attr{migraphx::shape::float_type,
{{1, 4}, {3, 3}, {4, 8, {4, 6}}, {4, 8}, {4, 6}}};
expect_shape(shape_attr,
migraphx::make_op("allocate", {{"shape", migraphx::to_value(shape_attr)}}));
}
TEST_CASE(argmax_axis0)
{
migraphx::shape input{migraphx::shape::half_type, {2, 3, 4, 5}};
......@@ -2524,13 +2539,21 @@ TEST_CASE(reshape_shape)
migraphx::shape output{migraphx::shape::float_type, lens};
expect_shape(output, migraphx::make_op("reshape", {{"dims", new_shape}}), input);
}
}
TEST_CASE(reshape_shape_invalid)
{
migraphx::shape input{migraphx::shape::float_type, {24, 1, 1, 1}};
for(auto&& new_shape :
std::vector<std::vector<int64_t>>{{8, 3, 2, 2}, {1, 3, -1, -1}, {3, 0}, {3, 2}})
{
throws_shape(migraphx::make_op("reshape", {{"dims", new_shape}}), input);
}
}
TEST_CASE(reshape_shape_minus1_reshapes)
{
migraphx::shape input{migraphx::shape::float_type, {24, 1, 1, 1}};
std::vector<std::pair<std::vector<int64_t>, migraphx::shape>> minus1_tests{
{{2, -1, 3}, {migraphx::shape::float_type, {2, 4, 3}}},
{{0, -1, 0}, {migraphx::shape::float_type, {24, 1, 1}}},
......@@ -2654,11 +2677,11 @@ TEST_CASE(reshape_broadcast_squeeze)
expect_shape(output, migraphx::make_op("reshape", {{"dims", output.lens()}}), input);
}
TEST_CASE(reshape_broadcast_squeeze_error)
TEST_CASE(reshape_broadcast_squeeze_memlayout_change)
{
migraphx::shape input{migraphx::shape::float_type, {2, 16, 16, 1280}, {0, 0, 0, 1}};
std::vector<int64_t> new_shape = {2, 16, 20480};
throws_shape(migraphx::make_op("reshape", {{"dims", new_shape}}), input);
migraphx::shape output{migraphx::shape::float_type, {2, 16, 256, 80}, {0, 0, 0, 16}};
expect_shape(output, migraphx::make_op("reshape", {{"dims", output.lens()}}), input);
}
TEST_CASE(reshape_dyn_shape)
......@@ -2706,6 +2729,199 @@ TEST_CASE(reshape_non_fixed_not_matching_error)
throws_shape(migraphx::make_op("reshape", {{"dims", new_shape}}), input);
}
TEST_CASE(reshape_lazy_shape)
{
migraphx::shape input{migraphx::shape::float_type, {24, 1, 1, 1}};
for(auto&& new_shape :
std::vector<std::vector<int64_t>>{{8, 3, 1, 1}, {1, 3, 4, 2}, {1, 3, 4, 2}})
{
std::vector<std::size_t> lens(new_shape.size());
std::copy(new_shape.begin(), new_shape.end(), lens.begin());
migraphx::shape output{migraphx::shape::float_type, lens};
expect_shape(output, migraphx::make_op("reshape_lazy", {{"dims", new_shape}}), input);
}
for(auto&& new_shape :
std::vector<std::vector<int64_t>>{{8, 3, 2, 2}, {1, 3, -1, -1}, {3, 0}, {3, 2}})
{
throws_shape(migraphx::make_op("reshape_lazy", {{"dims", new_shape}}), input);
}
std::vector<std::pair<std::vector<int64_t>, migraphx::shape>> minus1_tests{
{{2, -1, 3}, {migraphx::shape::float_type, {2, 4, 3}}},
{{0, -1, 0}, {migraphx::shape::float_type, {24, 1, 1}}},
{{2, -1, 0}, {migraphx::shape::float_type, {2, 12, 1}}},
{{0, 0, -1}, {migraphx::shape::float_type, {24, 1, 1}}},
{{2, 0, -1}, {migraphx::shape::float_type, {2, 1, 12}}},
{{-1, 2, 3}, {migraphx::shape::float_type, {4, 2, 3}}},
{{-1, 0, 3}, {migraphx::shape::float_type, {8, 1, 3}}},
{{-1, 0, 0}, {migraphx::shape::float_type, {24, 1, 1}}},
{{-1, 3, 0}, {migraphx::shape::float_type, {8, 3, 1}}}};
for(auto& it : minus1_tests)
{
expect_shape(it.second, migraphx::make_op("reshape_lazy", {{"dims", it.first}}), input);
}
}
// This uses the permutation to compute the reshape_lazy since its simpler than
// trying to calculate strides. As we collapse or expand dimensions, we
// remove the collapsed dimensions or duplicate the expanded dimensions in
// the permutation. Then we renumber the permutation. So for dimensions of 4,
// 24, 1, 1, 1 with a permutation of 1, 0, 2, 3, 4 that reshape_lazys to 4, 1, 3,
// 4, 2, we first remove the collapsed dimensions or duplicate the expanded
// dimensions which gives 1, 0, 0, 0, 0. Then after renumbering we get a
// final permutation of 4, 0, 1, 2, 3.
TEST_CASE(reshape_lazy_nonstandard)
{
auto input = migraphx::shape::from_permutation(migraphx::shape::float_type,
{4, 24, 1, 1, 1},
migraphx::invert_permutation({1, 0, 2, 3, 4}));
std::vector<std::pair<std::vector<std::size_t>, std::vector<int64_t>>> tests{
{{4, 24}, {1, 0}},
{{4, 24, 1, 1, 1, 1}, {1, 0, 2, 3, 4, 5}},
{{4, 8, 3, 1, 1}, {2, 0, 1, 3, 4}},
{{4, 1, 3, 4, 2}, {4, 0, 1, 2, 3}},
{{4, 1, 4, 3, 2}, {4, 0, 1, 2, 3}},
{{4, 2, 4, 3}, {3, 0, 1, 2}},
{{4, 2, 12, 1}, {2, 0, 1, 3}},
{{4, 2, 1, 12}, {3, 0, 1, 2}},
{{4, 4, 2, 3}, {3, 0, 1, 2}},
{{4, 8, 1, 3}, {3, 0, 1, 2}},
{{4, 8, 3, 1}, {2, 0, 1, 3}}};
for(const auto& [dims, perm] : tests)
{
migraphx::shape output = migraphx::shape::from_permutation(
migraphx::shape::float_type, dims, migraphx::invert_permutation(perm));
expect_shape(output, migraphx::make_op("reshape_lazy", {{"dims", dims}}), input);
}
}
TEST_CASE(reshape_lazy_nonstandard_squeeze)
{
auto input = migraphx::shape::from_permutation(
migraphx::shape::float_type, {2, 16, 16, 1280}, migraphx::invert_permutation({0, 2, 3, 1}));
std::vector<std::size_t> lens = {2, 256, 1280};
migraphx::shape output = migraphx::shape::from_permutation(
migraphx::shape::float_type, lens, migraphx::invert_permutation({0, 2, 1}));
expect_shape(output, migraphx::make_op("reshape_lazy", {{"dims", lens}}), input);
}
TEST_CASE(reshape_lazy_nonstandard_error)
{
auto input = migraphx::shape::from_permutation(migraphx::shape::float_type,
{4, 24, 1, 1, 1},
migraphx::invert_permutation({1, 0, 2, 3, 4}));
for(auto&& new_shape : std::vector<std::vector<int64_t>>{{4, 8, 3, 2, 2},
{1},
{4, 8, 4},
{4, 24, 1, 1, 1, 1, 2},
{8, 4, 4},
{4, 1, 3, -1, -1},
{4, 3, 0},
{4, 3, 2},
{3, 0},
{3, 2}})
{
throws_shape(migraphx::make_op("reshape_lazy", {{"dims", new_shape}}), input);
}
}
TEST_CASE(reshape_lazy_nonpacked_unsqueeze1)
{
migraphx::shape input{migraphx::shape::float_type, {4, 16}, {32, 2}};
migraphx::shape output{migraphx::shape::float_type, {4, 2, 8}, {32, 16, 2}};
expect_shape(output, migraphx::make_op("reshape_lazy", {{"dims", output.lens()}}), input);
}
TEST_CASE(reshape_lazy_nonpacked_unsqueeze2)
{
migraphx::shape input{migraphx::shape::float_type, {4, 16}, {32, 2}};
migraphx::shape output{migraphx::shape::float_type, {2, 2, 16}, {64, 32, 2}};
expect_shape(output, migraphx::make_op("reshape_lazy", {{"dims", output.lens()}}), input);
}
TEST_CASE(reshape_lazy_nonpacked_squeeze)
{
migraphx::shape input{migraphx::shape::float_type, {4, 16}, {32, 2}};
migraphx::shape output{migraphx::shape::float_type, {64}, {2}};
expect_shape(output, migraphx::make_op("reshape_lazy", {{"dims", output.lens()}}), input);
}
TEST_CASE(reshape_lazy_broadcast_unsqueeze1)
{
migraphx::shape input{migraphx::shape::float_type, {2, 256, 1280}, {0, 0, 1}};
migraphx::shape output{migraphx::shape::float_type, {2, 16, 16, 1280}, {0, 0, 0, 1}};
expect_shape(output, migraphx::make_op("reshape_lazy", {{"dims", output.lens()}}), input);
}
TEST_CASE(reshape_lazy_broadcast_unsqueeze2)
{
migraphx::shape input{migraphx::shape::float_type, {2, 256, 1280}, {0, 0, 1}};
migraphx::shape output{migraphx::shape::float_type, {2, 256, 16, 80}, {0, 0, 80, 1}};
expect_shape(output, migraphx::make_op("reshape_lazy", {{"dims", output.lens()}}), input);
}
TEST_CASE(reshape_lazy_broadcast_squeeze)
{
migraphx::shape input{migraphx::shape::float_type, {2, 16, 16, 1280}, {0, 0, 0, 1}};
migraphx::shape output{migraphx::shape::float_type, {2, 256, 1280}, {0, 0, 1}};
expect_shape(output, migraphx::make_op("reshape_lazy", {{"dims", output.lens()}}), input);
}
TEST_CASE(reshape_lazy_broadcast_squeeze_error)
{
migraphx::shape input{migraphx::shape::float_type, {2, 16, 16, 1280}, {0, 0, 0, 1}};
std::vector<int64_t> new_shape = {2, 16, 20480};
throws_shape(migraphx::make_op("reshape_lazy", {{"dims", new_shape}}), input);
}
TEST_CASE(reshape_lazy_dyn_shape)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4}, {24, 24}, {1, 1}, {1, 1}}};
for(auto&& new_shape : std::vector<std::vector<int64_t>>{
{-1, 1, 1, 24}, {0, 8, 3, 1}, {-1, 3, 4, 2}, {0, 2, 4, 3}})
{
std::vector<migraphx::shape::dynamic_dimension> out_dyn_dims{};
for(std::size_t i = 0; i < new_shape.size(); ++i)
{
if(new_shape[i] == 0 or new_shape[i] == -1)
{
out_dyn_dims.push_back(input.dyn_dims().at(i));
}
else
{
std::size_t d = new_shape[i];
out_dyn_dims.push_back({d, d});
}
}
migraphx::shape output{migraphx::shape::float_type, out_dyn_dims};
expect_shape(output, migraphx::make_op("reshape_lazy", {{"dims", new_shape}}), input);
}
}
TEST_CASE(reshape_lazy_multiple_non_fixed_error)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4}, {24, 24}, {10, 20}, {1, 1}}};
std::vector<int64_t> new_shape = {0, 1, 0, 24};
throws_shape(migraphx::make_op("reshape_lazy", {{"dims", new_shape}}), input);
}
TEST_CASE(reshape_lazy_fixed_ele_not_matching_error)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4}, {24, 24}, {10, 10}, {1, 1}}};
std::vector<int64_t> new_shape = {0, 1, 5, 24};
throws_shape(migraphx::make_op("reshape_lazy", {{"dims", new_shape}}), input);
}
TEST_CASE(reshape_lazy_non_fixed_not_matching_error)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4}, {24, 24}, {1, 1}, {1, 1}}};
std::vector<int64_t> new_shape = {2, 1, 1, 24};
throws_shape(migraphx::make_op("reshape_lazy", {{"dims", new_shape}}), input);
}
TEST_CASE(return_shape_tuple)
{
using migraphx::shape;
......
This diff is collapsed.
......@@ -83,7 +83,7 @@ TEST_CASE(param_add)
auto hs = mm->add_instruction(migraphx::make_op("add"), hp1, hp2);
auto fs = mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::float_type)}}),
{{"target_type", migraphx::to_value(migraphx::shape::float_type)}}),
hs);
if(add_return)
{
......@@ -1013,7 +1013,7 @@ TEST_CASE(target_copy)
std::vector<float> orig_result;
run_prog(p, ref_t, m, orig_result);
EXPECT(migraphx::verify::verify_range(ref_result, orig_result));
EXPECT(migraphx::verify::verify_rms_range(ref_result, orig_result));
}
}
......@@ -1077,7 +1077,10 @@ TEST_CASE(int8_quantization_dot)
std::vector<float> no_quant_result;
run_prog(p, ref_t, m, no_quant_result);
EXPECT(migraphx::verify::verify_range(quant_result, no_quant_result, 30000));
EXPECT(migraphx::verify::verify_range_with_tolerance(
quant_result,
migraphx::verify::expected{no_quant_result},
migraphx::verify::tolerance{0.003}));
}
}
......@@ -1122,7 +1125,7 @@ TEST_CASE(int8_quantization_conv)
std::vector<float> no_quant_result;
run_prog(p, ref_t, no_quant_result);
EXPECT(migraphx::verify::verify_range(quant_result, no_quant_result));
EXPECT(migraphx::verify::verify_rms_range(quant_result, no_quant_result));
}
}
......@@ -1274,7 +1277,7 @@ TEST_CASE(test_op_capture)
cap_res.visit([&](auto output) { cap_vec.assign(output.begin(), output.end()); });
res.visit([&](auto output) { vec.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify::verify_range(vec, cap_vec));
EXPECT(migraphx::verify::verify_rms_range(vec, cap_vec));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -42,7 +42,7 @@ TEST_CASE(abs_test)
std::vector<float> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{1, 2, 3, 4};
EXPECT(migraphx::verify::verify_range(results_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(results_vector, gold));
}
TEST_CASE(abs_dyn_test)
......@@ -62,5 +62,5 @@ TEST_CASE(abs_dyn_test)
std::vector<float> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{1, 2, 3, 4};
EXPECT(migraphx::verify::verify_range(results_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(results_vector, gold));
}
......@@ -45,7 +45,7 @@ TEST_CASE(acos_test)
std::vector<float> gold = data;
std::transform(
gold.begin(), gold.end(), gold.begin(), [](float n) -> float { return acosf(n); });
EXPECT(migraphx::verify::verify_range(results_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(results_vector, gold));
}
TEST_CASE(acos_dyn_test)
......@@ -68,5 +68,5 @@ TEST_CASE(acos_dyn_test)
std::vector<float> gold = input_data;
std::transform(
gold.begin(), gold.end(), gold.begin(), [](float n) -> float { return acosf(n); });
EXPECT(migraphx::verify::verify_range(results_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(results_vector, gold));
}
......@@ -45,7 +45,7 @@ TEST_CASE(acosh_test)
std::vector<float> gold = data;
std::transform(
gold.begin(), gold.end(), gold.begin(), [](float n) -> float { return acoshf(n); });
EXPECT(migraphx::verify::verify_range(results_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(results_vector, gold));
}
TEST_CASE(acosh_dyn_test)
......@@ -68,5 +68,5 @@ TEST_CASE(acosh_dyn_test)
std::vector<float> gold = input_data;
std::transform(
gold.begin(), gold.end(), gold.begin(), [](float n) -> float { return acoshf(n); });
EXPECT(migraphx::verify::verify_range(results_vector, gold));
EXPECT(migraphx::verify::verify_rms_range(results_vector, gold));
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment