Unverified Commit 0662a9a3 authored by Brian Pickrell's avatar Brian Pickrell Committed by GitHub
Browse files

Merge branch 'develop' into dyn_resize_gather

parents b74d3a8f 35e5298e
 split_test_uneven_num_outputs:
.
xy1y2y3y4"Split*
num_outputssplit_test_uneven_num_outputsZ
x


b
y1


b
y2


b
y3


b
y4


B
\ No newline at end of file
......@@ -1014,6 +1014,95 @@ TEST_CASE(instance_norm_3d_test)
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(isinf_half_test)
{
migraphx::program p = migraphx::parse_onnx("isinf_half_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape s{migraphx::shape::half_type, {2, 3}};
migraphx::parameter_map pp;
migraphx::half nan = std::numeric_limits<migraphx::half>::quiet_NaN();
migraphx::half infinity = std::numeric_limits<migraphx::half>::infinity();
migraphx::half max = std::numeric_limits<migraphx::half>::max();
migraphx::half min = std::numeric_limits<migraphx::half>::min();
migraphx::half val = migraphx::half(3.6);
std::vector<migraphx::half> data = {-infinity, nan, min, val, max, infinity};
pp["t1"] = migraphx::argument(s, data.data());
auto result = p.eval(pp).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {1, 0, 0, 0, 0, 1};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(isinf_neg_test)
{
migraphx::program p = migraphx::parse_onnx("isinf_neg_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
migraphx::parameter_map pp;
float nan = std::numeric_limits<float>::quiet_NaN();
float infinity = std::numeric_limits<float>::infinity();
float max = std::numeric_limits<float>::max();
float min = std::numeric_limits<float>::min();
std::vector<float> data = {-infinity, nan, min, 3.6, max, infinity};
pp["t1"] = migraphx::argument(s, data.data());
auto result = p.eval(pp).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {1, 0, 0, 0, 0, 0};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(isinf_double_pos_test)
{
migraphx::program p = migraphx::parse_onnx("isinf_double_pos_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape s{migraphx::shape::double_type, {2, 3}};
migraphx::parameter_map pp;
double nan = std::numeric_limits<double>::quiet_NaN();
double infinity = std::numeric_limits<double>::infinity();
double max = std::numeric_limits<double>::max();
double min = std::numeric_limits<double>::min();
std::vector<double> data = {-infinity, nan, min, 3.6, max, infinity};
pp["t1"] = migraphx::argument(s, data.data());
auto result = p.eval(pp).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0, 0, 0, 0, 0, 1};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(isinf_no_detect_test)
{
migraphx::program p = migraphx::parse_onnx("isinf_no_detect_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
migraphx::parameter_map pp;
float nan = std::numeric_limits<float>::quiet_NaN();
float infinity = std::numeric_limits<float>::infinity();
float max = std::numeric_limits<float>::max();
float min = std::numeric_limits<float>::min();
std::vector<double> data = {-infinity, nan, min, 3.6, max, infinity};
pp["t1"] = migraphx::argument(s, data.data());
auto result = p.eval(pp).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0, 0, 0, 0, 0, 0};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(layer_norm_test)
{
std::vector<float> scale{1.2, 0.8};
......@@ -1434,6 +1523,77 @@ TEST_CASE(mod_test_fmod_different_types)
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(multinomial_dyn_test)
{
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4};
auto p = migraphx::parse_onnx("multinomial_dyn_test.onnx", options);
const size_t batch_size(2);
const size_t categories(5);
const size_t sample_size(100000);
p.compile(migraphx::make_target("ref"));
// Distribution function (2 distributions of 5 categories each)
std::vector<int> dist{15, 25, 15, 25, 20, 20, 20, 10, 25, 25};
EXPECT(dist.size() == categories * batch_size);
std::vector<float> data(categories * batch_size);
std::transform(dist.begin(), dist.end(), data.begin(), [&](auto d) { return log(d); });
// Shape of the probability distribution, which also defines the number of categories
migraphx::shape s{migraphx::shape::float_type, {batch_size, categories}};
migraphx::parameter_map pp;
pp["input"] = migraphx::argument(s, data.data());
auto result = p.eval(pp).back();
std::vector<int32_t> result_vec(batch_size * sample_size);
result.visit([&](auto output) { result_vec.assign(output.begin(), output.end()); });
// Make a categorical histogram of output
// for first result in batch
std::vector<int> res_dist(categories, 0);
size_t r = 0;
for(r = 0; r < result_vec.size() / 2; r++)
res_dist[result_vec[r]]++;
// normalizing factors for original and measured distributions
auto dist_sum = std::accumulate(dist.begin(), dist.begin() + 5, 0);
auto res_dist_sum = std::accumulate(res_dist.begin(), res_dist.end(), 0);
// Values approximate the distribution in dist
std::vector<float> norm(5);
std::vector<float> res_norm(5);
std::transform(dist.begin(), dist.begin() + 5, norm.begin(), [&](auto n) {
return static_cast<double>(n) / dist_sum;
});
std::transform(res_dist.begin(), res_dist.end(), res_norm.begin(), [&](auto n) {
return static_cast<double>(n) / res_dist_sum;
});
EXPECT(migraphx::verify::verify_range_with_tolerance(
norm, migraphx::verify::expected{res_norm}, migraphx::verify::tolerance{0.01}));
// Make a categorical histogram of output
// for second result in batch
std::fill(res_dist.begin(), res_dist.end(), 0);
for(; r < result_vec.size(); r++)
res_dist[result_vec[r]]++;
dist_sum = std::accumulate(dist.begin() + 5, dist.end(), 0);
res_dist_sum = std::accumulate(res_dist.begin(), res_dist.end(), 0);
std::transform(dist.begin() + 5, dist.end(), norm.begin(), [&](auto n) {
return static_cast<double>(n) / dist_sum;
});
std::transform(res_dist.begin(), res_dist.end(), res_norm.begin(), [&](auto n) {
return static_cast<double>(n) / res_dist_sum;
});
EXPECT(migraphx::verify::verify_range_with_tolerance(
res_norm, migraphx::verify::expected{norm}, migraphx::verify::tolerance{0.01}));
}
TEST_CASE(nonzero_test)
{
migraphx::program p = migraphx::parse_onnx("nonzero_dynamic_test.onnx");
......@@ -2010,6 +2170,43 @@ TEST_CASE(reversesequence_time_verify_test)
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(round_half_test)
{
migraphx::program p = migraphx::parse_onnx("round_half_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape xs{migraphx::shape::half_type, {4, 4}};
std::vector<float> tmp = {-3.51,
-3.5,
-3.49,
-2.51,
-2.50,
-2.49,
-1.6,
-1.5,
-0.51,
-0.5,
0.5,
0.6,
2.4,
2.5,
3.5,
4.5};
std::vector<migraphx::half> data{tmp.cbegin(), tmp.cend()};
migraphx::parameter_map param_map;
param_map["x"] = migraphx::argument(xs, data.data());
auto result = p.eval(param_map).back();
std::vector<migraphx::half> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
tmp = {-4.0, -4.0, -3.0, -3.0, -2.0, -2.0, -2.0, -2.0, -1.0, 0.0, 0.0, 1.0, 2.0, 2.0, 4.0, 4.0};
std::vector<migraphx::half> gold{tmp.cbegin(), tmp.cend()};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(selu_test)
{
migraphx::program p = migraphx::parse_onnx("selu_test.onnx");
......
......@@ -88,7 +88,7 @@ TEST_CASE(allocate_static)
expect_shape(out_shape, migraphx::make_op("allocate", {{"shape", to_value(out_shape)}}));
}
TEST_CASE(allocate_static_input_error)
TEST_CASE(allocate_static_input)
{
migraphx::shape input{migraphx::shape::int64_type, {3}};
migraphx::shape out_shape{migraphx::shape::float_type, {2, 3, 4}};
......@@ -116,7 +116,7 @@ TEST_CASE(allocate_dyn_with_shape_attr)
input);
}
TEST_CASE(allocate_dyn_no_input_error)
TEST_CASE(allocate_dyn_no_input)
{
migraphx::shape shape_attr{migraphx::shape::float_type,
{{1, 4}, {3, 3}, {4, 8, {4, 6}}, {4, 8}, {4, 6}}};
......@@ -124,6 +124,21 @@ TEST_CASE(allocate_dyn_no_input_error)
migraphx::make_op("allocate", {{"shape", migraphx::to_value(shape_attr)}}));
}
TEST_CASE(allocate_shape_and_buf_type_error)
{
migraphx::shape shape_attr{migraphx::shape::float_type,
{{1, 4}, {3, 3}, {4, 8, {4, 6}}, {4, 8}, {4, 6}}};
throws_shape(migraphx::make_op(
"allocate",
{{"shape", migraphx::to_value(shape_attr)}, {"buf_type", migraphx::shape::half_type}}));
}
TEST_CASE(allocate_no_attr_error)
{
migraphx::shape input{migraphx::shape::int64_type, {4}};
throws_shape(migraphx::make_op("allocate"), input);
}
TEST_CASE(argmax_axis0)
{
migraphx::shape input{migraphx::shape::half_type, {2, 3, 4, 5}};
......@@ -1942,12 +1957,42 @@ TEST_CASE(multibroadcast_3in_dyn_dyn)
expect_shape(expected_shape, migraphx::make_op("multibroadcast"), c_shape, a_shape, b_shape);
}
TEST_CASE(multinomial)
TEST_CASE(multinomial_bool_type)
{
migraphx::shape s{migraphx::shape::float_type, {2, 5}};
migraphx::shape s1{migraphx::shape::float_type, {1, 2}};
migraphx::shape s2{migraphx::shape::float_type, {3, 4}};
int dtype = 0;
throws_shape(migraphx::make_op("multinomial", {{"dtype", dtype}}), s, s);
throws_shape(migraphx::make_op("multinomial", {{"dtype", dtype}}), s1, s2);
}
TEST_CASE(multinomial)
{
migraphx::shape s1{migraphx::shape::float_type, {1, 2}};
migraphx::shape s2{migraphx::shape::float_type, {3, 4}};
migraphx::shape s3{migraphx::shape::float_type, {1, 4}};
int dtype = 2;
expect_shape(s3, migraphx::make_op("multinomial", {{"dtype", dtype}}), s1, s2);
}
TEST_CASE(multinomial_0size_input)
{
migraphx::shape s1{migraphx::shape::float_type, {1, 2}};
migraphx::shape s2{migraphx::shape::float_type, {}};
int dtype = 2;
throws_shape(migraphx::make_op("multinomial", {{"dtype", dtype}}), s1, s2);
}
TEST_CASE(multinomial_dyn)
{
migraphx::shape s1{migraphx::shape::int32_type, {{2, 3}, {5, 6}}};
migraphx::shape s2{migraphx::shape::int32_type, {{7, 8}, {9, 10}}};
migraphx::shape s3{migraphx::shape::int32_type, {{2, 3}, {9, 10}}};
expect_shape(
s3, migraphx::make_op("multinomial", {{"dtype", migraphx::shape::int32_type}}), s1, s2);
}
TEST_CASE(nms_shape)
......@@ -3188,6 +3233,64 @@ TEST_CASE(slice_static_shape)
TEST_CASE(slice_var_inputs_static_shape0)
{
// attr ends and axes set; inputs are (data, input_starts)
migraphx::shape input{migraphx::shape::float_type, {3, 4, 4}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{3, 3}, {0, 4}, {0, 4}}},
migraphx::make_op("slice", {{"ends", {2, 3}}, {"axes", {1, 2}}}),
input,
starts);
}
TEST_CASE(slice_var_inputs_static_mismatch_error0)
{
migraphx::shape input{migraphx::shape::float_type, {3, 4, 4}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
throws_shape(
migraphx::make_op("slice", {{"ends", {2, 3, 4}}, {"axes", {0, 1, 2}}}), input, starts);
}
TEST_CASE(slice_var_inputs_static_shape1)
{
// attr starts and axes set; inputs are (data, input_ends)
migraphx::shape input{migraphx::shape::float_type, {3, 4, 4}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{3, 3}, {0, 4}, {0, 4}}},
migraphx::make_op("slice", {{"starts", {0, 1}}, {"axes", {1, 2}}}),
input,
ends);
}
TEST_CASE(slice_var_inputs_static_mismatch_error1)
{
migraphx::shape input{migraphx::shape::float_type, {3, 4, 4}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
throws_shape(
migraphx::make_op("slice", {{"starts", {0, 1, 2}}, {"axes", {0, 1, 2}}}), input, ends);
}
TEST_CASE(slice_var_inputs_static_shape2)
{
// attr starts and ends set; inputs are (data, input_axes)
migraphx::shape input{migraphx::shape::float_type, {3, 4, 4}};
migraphx::shape axes{migraphx::shape::int64_type, {2}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{0, 3}, {0, 4}, {0, 4}}},
migraphx::make_op("slice", {{"starts", {0, 1}}, {"ends", {1, 2}}}),
input,
axes);
}
TEST_CASE(slice_var_inputs_static_mismatch_error2)
{
migraphx::shape input{migraphx::shape::float_type, {3, 4, 4}};
migraphx::shape axes{migraphx::shape::int64_type, {2}};
throws_shape(
migraphx::make_op("slice", {{"starts", {0, 1, 2}}, {"ends", {3, 4, 4}}}), input, axes);
}
TEST_CASE(slice_var_inputs_static_shape3)
{
// attr axes set; inputs are (data, input_starts, input_ends)
migraphx::shape input{migraphx::shape::float_type, {3, 4, 4}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
......@@ -3198,7 +3301,57 @@ TEST_CASE(slice_var_inputs_static_shape0)
ends);
}
TEST_CASE(slice_var_inputs_static_shape1)
TEST_CASE(slice_var_inputs_static_mismatch_error3)
{
migraphx::shape input{migraphx::shape::float_type, {3, 4, 4}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
throws_shape(migraphx::make_op("slice", {{"axes", {0, 1, 2}}}), input, starts, ends);
}
TEST_CASE(slice_var_inputs_static_shape4)
{
// attr ends set; inputs are (data, input_starts, input_axes)
migraphx::shape input{migraphx::shape::float_type, {3, 4, 4}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
migraphx::shape axes{migraphx::shape::int64_type, {2}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{0, 3}, {0, 4}, {0, 4}}},
migraphx::make_op("slice", {{"ends", {3, 4}}}),
input,
starts,
axes);
}
TEST_CASE(slice_var_inputs_static_mismatch_error4)
{
migraphx::shape input{migraphx::shape::float_type, {3, 4, 4}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
migraphx::shape axes{migraphx::shape::int64_type, {2}};
throws_shape(migraphx::make_op("slice", {{"ends", {3, 3, 3}}}), input, starts, axes);
}
TEST_CASE(slice_var_inputs_static_shape5)
{
// attr starts set; inputs are (data, input_ends, input_axes)
migraphx::shape input{migraphx::shape::float_type, {3, 4, 4}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
migraphx::shape axes{migraphx::shape::int64_type, {2}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{0, 3}, {0, 4}, {0, 4}}},
migraphx::make_op("slice", {{"starts", {0, 2}}}),
input,
ends,
axes);
}
TEST_CASE(slice_var_inputs_static_mismatch_error5)
{
migraphx::shape input{migraphx::shape::float_type, {3, 4, 4}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
migraphx::shape axes{migraphx::shape::int64_type, {2}};
throws_shape(migraphx::make_op("slice", {{"starts", {0, 1, 2}}}), input, ends, axes);
}
TEST_CASE(slice_var_inputs_static_shape6)
{
migraphx::shape input{migraphx::shape::float_type, {3, 4, 4}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
......@@ -3212,7 +3365,7 @@ TEST_CASE(slice_var_inputs_static_shape1)
axes);
}
TEST_CASE(slice_var_inputs_static_error0)
TEST_CASE(slice_var_inputs_static_mismatch_error6)
{
migraphx::shape input{migraphx::shape::float_type, {3, 4, 4}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
......@@ -3223,17 +3376,125 @@ TEST_CASE(slice_var_inputs_static_error0)
TEST_CASE(slice_var_inputs_dyn_shape0)
{
migraphx::shape input{migraphx::shape::float_type, {{3, 6}, {2, 4, {2, 4}}, {2, 4, {2, 4}}}};
// attr ends and axes set; inputs are (data, input_starts)
migraphx::shape input{migraphx::shape::float_type, {{3, 6}, {4, 6}, {4, 6}}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{3, 6}, {0, 6}, {0, 6}}},
migraphx::make_op("slice", {{"ends", {2, 3}}, {"axes", {1, 2}}}),
input,
starts);
}
TEST_CASE(slice_var_inputs_dyn_mismatch_error0)
{
migraphx::shape input{migraphx::shape::float_type, {{3, 6}, {4, 6}, {4, 6}}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
throws_shape(
migraphx::make_op("slice", {{"ends", {2, 3, 4}}, {"axes", {0, 1, 2}}}), input, starts);
}
TEST_CASE(slice_var_inputs_dyn_shape1)
{
// attr starts and axes set; inputs are (data, input_ends)
migraphx::shape input{migraphx::shape::float_type, {{3, 6}, {4, 6}, {4, 6}}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{3, 6}, {0, 4}, {0, 4}}},
expect_shape(migraphx::shape{migraphx::shape::float_type, {{3, 6}, {0, 6}, {0, 6}}},
migraphx::make_op("slice", {{"starts", {0, 1}}, {"axes", {1, 2}}}),
input,
ends);
}
TEST_CASE(slice_var_inputs_dyn_mismatch_error1)
{
migraphx::shape input{migraphx::shape::float_type, {{3, 6}, {4, 6}, {4, 6}}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
throws_shape(
migraphx::make_op("slice", {{"starts", {0, 1, 2}}, {"axes", {0, 1, 2}}}), input, ends);
}
TEST_CASE(slice_var_inputs_dyn_shape2)
{
// attr starts and ends set; inputs are (data, input_axes)
migraphx::shape input{migraphx::shape::float_type, {{3, 6}, {4, 6}, {4, 6}}};
migraphx::shape axes{migraphx::shape::int64_type, {2}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{0, 6}, {0, 6}, {0, 6}}},
migraphx::make_op("slice", {{"starts", {0, 1}}, {"ends", {8, 8}}}),
input,
axes);
}
TEST_CASE(slice_var_inputs_dyn_mismatch_error2)
{
migraphx::shape input{migraphx::shape::float_type, {{3, 6}, {4, 6}, {4, 6}}};
migraphx::shape axes{migraphx::shape::int64_type, {2}};
throws_shape(
migraphx::make_op("slice", {{"starts", {0, 1, 2}}, {"ends", {3, 4, 4}}}), input, axes);
}
TEST_CASE(slice_var_inputs_dyn_shape3)
{
// attr axes set; inputs are (data, input_starts, input_ends)
migraphx::shape input{migraphx::shape::float_type, {{3, 6}, {4, 6}, {4, 6}}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{3, 6}, {0, 6}, {0, 6}}},
migraphx::make_op("slice", {{"axes", {1, 2}}}),
input,
starts,
ends);
}
TEST_CASE(slice_var_inputs_dyn_shape1)
TEST_CASE(slice_var_inputs_dyn_mismatch_error3)
{
migraphx::shape input{migraphx::shape::float_type, {{3, 6}, {4, 6}, {4, 6}}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
throws_shape(migraphx::make_op("slice", {{"axes", {0, 1, 2}}}), input, starts, ends);
}
TEST_CASE(slice_var_inputs_dyn_shape4)
{
// attr ends set; inputs are (data, input_starts, input_axes)
migraphx::shape input{migraphx::shape::float_type, {{3, 6}, {4, 6}, {4, 6}}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
migraphx::shape axes{migraphx::shape::int64_type, {2}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{0, 6}, {0, 6}, {0, 6}}},
migraphx::make_op("slice", {{"ends", {3, 4}}}),
input,
starts,
axes);
}
TEST_CASE(slice_var_inputs_dyn_mismatch_error4)
{
migraphx::shape input{migraphx::shape::float_type, {{3, 6}, {4, 6}, {4, 6}}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
migraphx::shape axes{migraphx::shape::int64_type, {2}};
throws_shape(migraphx::make_op("slice", {{"ends", {3, 3, 3}}}), input, starts, axes);
}
TEST_CASE(slice_var_inputs_dyn_shape5)
{
// attr starts set; inputs are (data, input_ends, input_axes)
migraphx::shape input{migraphx::shape::float_type, {{3, 6}, {4, 6}, {4, 6}}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
migraphx::shape axes{migraphx::shape::int64_type, {2}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {{0, 6}, {0, 6}, {0, 6}}},
migraphx::make_op("slice", {{"starts", {0, 2}}}),
input,
ends,
axes);
}
TEST_CASE(slice_var_inputs_dyn_mismatch_error5)
{
migraphx::shape input{migraphx::shape::float_type, {{3, 6}, {4, 6}, {4, 6}}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
migraphx::shape axes{migraphx::shape::int64_type, {2}};
throws_shape(migraphx::make_op("slice", {{"starts", {0, 1, 2}}}), input, ends, axes);
}
TEST_CASE(slice_var_inputs_dyn_shape6)
{
migraphx::shape input{migraphx::shape::float_type, {{3, 6}, {2, 4, {2, 4}}, {2, 4, {2, 4}}}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
......@@ -3247,6 +3508,15 @@ TEST_CASE(slice_var_inputs_dyn_shape1)
axes);
}
TEST_CASE(slice_var_inputs_dyn_mismatch_error6)
{
migraphx::shape input{migraphx::shape::float_type, {{3, 6}, {4, 6}, {4, 6}}};
migraphx::shape starts{migraphx::shape::int64_type, {2}};
migraphx::shape ends{migraphx::shape::int64_type, {2}};
migraphx::shape axes{migraphx::shape::int64_type, {3}};
throws_shape(migraphx::make_op("slice"), input, starts, ends, axes);
}
TEST_CASE(slice_dyn_shape0)
{
migraphx::shape input{migraphx::shape::int32_type, {{2, 3}, {7, 7}, {2, 3}}};
......
......@@ -28,6 +28,7 @@ set(VENV_ONNX ${CMAKE_BINARY_DIR}/test/py/venv-onnx)
set(REQUIREMENTS ${CMAKE_CURRENT_SOURCE_DIR}/requirements.txt)
set(REQUIREMENTS_ONNX ${CMAKE_CURRENT_SOURCE_DIR}/requirements-onnx.txt)
set(PYTHON_VERSION_TO_DISABLE_ONNX 3.6)
option(MIGRAPHX_DISABLE_VIRTUAL_ENV "Disable python virtual environments" OFF)
function(add_py_venv_fixture FIXTURE_NAME VIRTUAL_ENV_DIR REQUIREMENTS_FILE)
......@@ -44,9 +45,17 @@ function(add_py_venv_fixture FIXTURE_NAME VIRTUAL_ENV_DIR REQUIREMENTS_FILE)
add_test(NAME py_${PYTHON_VERSION}_${FIXTURE_NAME}_initialize_env COMMAND ${PYTHON_EXECUTABLE} -m venv ${VIRTUAL_ENV_DIR}/${PYTHON_VERSION} --clear)
set_tests_properties(py_${PYTHON_VERSION}_${FIXTURE_NAME}_initialize_env PROPERTIES FIXTURES_SETUP ${FIXTURE_NAME}_${PYTHON_VERSION}_INIT_VENV)
set(PYTHON_EXECUTABLE ${VIRTUAL_ENV_DIR}/${PYTHON_VERSION}/bin/python)
add_test(
NAME py_${PYTHON_VERSION}_${FIXTURE_NAME}_setup_env
COMMAND ${PYTHON_EXECUTABLE} -m pip install -r ${REQUIREMENTS_FILE})
if(EXISTS ${REQUIREMENTS_FILE})
add_test(
NAME py_${PYTHON_VERSION}_${FIXTURE_NAME}_setup_env
COMMAND ${PYTHON_EXECUTABLE} -m pip install -r ${REQUIREMENTS_FILE})
else()
# If there is no requirements file, then there are no packages to install in the virtual env.
# Just create a placeholder test for setting up the required fixture for running the tests.
add_test(
NAME py_${PYTHON_VERSION}_${FIXTURE_NAME}_setup_env
COMMAND ${PYTHON_EXECUTABLE} -m pip install --help)
endif()
set_tests_properties(py_${PYTHON_VERSION}_${FIXTURE_NAME}_setup_env PROPERTIES FIXTURES_REQUIRED ${FIXTURE_NAME}_${PYTHON_VERSION}_INIT_VENV)
set_tests_properties(py_${PYTHON_VERSION}_${FIXTURE_NAME}_setup_env PROPERTIES FIXTURES_SETUP ${FIXTURE_NAME}_${PYTHON_VERSION}_VENV)
endif()
......@@ -61,23 +70,31 @@ function(add_py_test NAME SCRIPT FIXTURE_NAME VENV_DIR)
"PYTHONMALLOC=debug"
"MALLOC_CHECK_=3"
)
set(PYTHON_EXECUTABLE ${VENV_DIR}/${PYTHON_VERSION}/bin/python)
if(MIGRAPHX_DISABLE_VIRTUAL_ENV)
set(PYTHON_EXECUTABLE ${PYTHON_${PYTHON_VERSION}_EXECUTABLE})
else()
set(PYTHON_EXECUTABLE ${VENV_DIR}/${PYTHON_VERSION}/bin/python)
endif()
if(NOT (${FIXTURE_NAME} STREQUAL "onnx" AND ${PYTHON_VERSION} STREQUAL ${PYTHON_VERSION_TO_DISABLE_ONNX}))
add_test(
NAME test_py_${PYTHON_VERSION}_${NAME}
COMMAND ${ENV_COMMAND} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/${SCRIPT} ${ARGN})
set_tests_properties(test_py_${PYTHON_VERSION}_${NAME} PROPERTIES FIXTURES_REQUIRED ${FIXTURE_NAME}_${PYTHON_VERSION}_VENV)
add_custom_target(test_py_${PYTHON_VERSION}_${NAME}
COMMAND ${ENV_COMMAND} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/${SCRIPT} ${ARGN}
COMMENT "${PYTHON_EXECUTABLE} ${SCRIPT}")
if(NOT MIGRAPHX_DISABLE_VIRTUAL_ENV)
set_tests_properties(test_py_${PYTHON_VERSION}_${NAME} PROPERTIES FIXTURES_REQUIRED ${FIXTURE_NAME}_${PYTHON_VERSION}_VENV)
endif()
endif()
endforeach()
endfunction()
add_dependencies(tests migraphx_py)
add_dependencies(check migraphx_py)
add_py_venv_fixture(common ${VENV} ${REQUIREMENTS})
add_py_venv_fixture(onnx ${VENV_ONNX} ${REQUIREMENTS_ONNX})
if(NOT MIGRAPHX_DISABLE_VIRTUAL_ENV)
add_py_venv_fixture(common ${VENV} ${REQUIREMENTS})
add_py_venv_fixture(onnx ${VENV_ONNX} ${REQUIREMENTS_ONNX})
endif()
add_py_test(ref test_cpu.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(save_load test_save_load.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR})
......
......@@ -83,7 +83,6 @@ def disabled_tests_onnx_1_7_0(backend_test):
backend_test.exclude(r'test_nonmaxsuppression_two_batches_cpu')
backend_test.exclude(r'test_nonmaxsuppression_two_classes_cpu')
backend_test.exclude(r'test_nonzero_example_cpu')
backend_test.exclude(r'test_round_cpu')
backend_test.exclude(r'test_softmax_axis_0_cpu')
backend_test.exclude(r'test_softmax_axis_1_cpu')
backend_test.exclude(r'test_softmax_default_axis_cpu')
......@@ -135,9 +134,6 @@ def disabled_tests_onnx_1_7_0(backend_test):
backend_test.exclude(r'test_hardmax_example_cpu')
backend_test.exclude(r'test_hardmax_negative_axis_cpu')
backend_test.exclude(r'test_hardmax_one_hot_cpu')
backend_test.exclude(r'test_isinf_cpu')
backend_test.exclude(r'test_isinf_negative_cpu')
backend_test.exclude(r'test_isinf_positive_cpu')
backend_test.exclude(r'test_matmulinteger_cpu')
backend_test.exclude(r'test_maxpool_2d_uint8_cpu')
backend_test.exclude(r'test_maxunpool_export_with_output_shape_cpu')
......@@ -835,10 +831,6 @@ def disabled_tests_onnx_1_13_0(backend_test):
backend_test.exclude(r'test_scatter_elements_with_reduction_max_cpu')
backend_test.exclude(r'test_scatter_elements_with_reduction_min_cpu')
# The following tests fail due to the CastLike operator being unsupported
backend_test.exclude(r'test_split_1d_uneven_split_opset18_cpu')
backend_test.exclude(r'test_split_2d_uneven_split_opset18_cpu')
def disabled_tests_onnx_1_14_0(backend_test):
# fails
......
......@@ -26,4 +26,4 @@ onnx==1.14.1
protobuf==3.20.2
numpy==1.21.6
packaging==23.0
pytest==6.0.1
\ No newline at end of file
pytest==6.0.1
#####################################################################################
# The MIT License (MIT)
#
# Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#####################################################################################
numpy==1.21.6
\ No newline at end of file
......@@ -22,7 +22,6 @@
# THE SOFTWARE.
#####################################################################################
import migraphx
import numpy as np
def test_conv_relu():
......@@ -51,8 +50,12 @@ def test_sub_uint64():
params = {}
shapes = p.get_parameter_shapes()
params["0"] = np.arange(120).reshape(shapes["0"].lens()).astype(np.uint64)
params["1"] = np.arange(20).reshape(shapes["1"].lens()).astype(np.uint64)
params["0"] = migraphx.create_argument(
migraphx.shape(type='uint64_type', lens=shapes["0"].lens()),
list(range(120)))
params["1"] = migraphx.create_argument(
migraphx.shape(type='uint64_type', lens=shapes["1"].lens()),
list(range(20)))
r = p.run(params)
print(r)
......@@ -67,7 +70,9 @@ def test_neg_int64():
params = {}
shapes = p.get_parameter_shapes()
params["0"] = np.arange(6).reshape(shapes["0"].lens()).astype(np.int64)
params["0"] = migraphx.create_argument(
migraphx.shape(type='int64_type', lens=shapes["0"].lens()),
list(range(6)))
r = p.run(params)
print(r)
......@@ -82,8 +87,9 @@ def test_nonzero():
params = {}
shapes = p.get_parameter_shapes()
params["data"] = np.array([1, 1, 0,
1]).reshape(shapes["data"].lens()).astype(bool)
params["data"] = migraphx.create_argument(
migraphx.shape(type='bool_type', lens=shapes["data"].lens()),
[1, 1, 0, 1])
r = p.run(params)
print(r)
......@@ -101,8 +107,8 @@ def test_fp16_imagescaler():
params = {}
shapes = p.get_parameter_shapes()
params["0"] = np.random.randn(768).reshape(shapes["0"].lens()).astype(
np.float16)
params["0"] = migraphx.generate_argument(
migraphx.shape(type='half_type', lens=shapes["0"].lens()), 768)
r = p.run(params)[-1]
print(r)
......@@ -120,10 +126,12 @@ def test_if_pl():
params = {}
shapes = p.get_parameter_shapes()
params["x"] = np.ones(6).reshape(shapes["x"].lens()).astype(np.float32)
params["y"] = np.array([2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0
]).reshape(shapes["y"].lens()).astype(np.float32)
params["cond"] = np.array([1]).reshape(()).astype(bool)
params["x"] = migraphx.fill_argument(
migraphx.shape(type='float_type', lens=shapes["x"].lens()), 1)
params["y"] = migraphx.fill_argument(
migraphx.shape(type='float_type', lens=shapes["y"].lens()), 2.0)
params["cond"] = migraphx.fill_argument(
migraphx.shape(type="bool", lens=[1], strides=[0]), 1)
r = p.run(params)[-1]
print(r)
......
......@@ -30,7 +30,7 @@
#include <test.hpp>
TEST_CASE(allocate_dyn)
TEST_CASE(allocate_dyn0)
{
migraphx::program p;
auto* mm = p.get_main_module();
......@@ -47,3 +47,21 @@ TEST_CASE(allocate_dyn)
migraphx::shape sresult{migraphx::shape::float_type, {2, 3, 4, 4}};
result.visit([&](auto output) { EXPECT(output.get_shape() == sresult); });
}
TEST_CASE(allocate_dyn1)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::int64_type, {4}};
migraphx::shape out_shape{migraphx::shape::float_type, {2, 3, 4, 4}};
auto out_dims = mm->add_parameter("out_dims", s);
mm->add_instruction(migraphx::make_op("allocate", {{"shape", migraphx::to_value(out_shape)}}),
out_dims);
p.compile(migraphx::make_target("ref"));
migraphx::parameter_map params;
std::vector<int64_t> data = {2, 3, 4, 4};
params["out_dims"] = migraphx::argument(s, data.data());
auto result = p.eval(params).back();
result.visit([&](auto output) { EXPECT(output.get_shape() == out_shape); });
}
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/instruction.hpp>
#include <migraphx/literal.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/program.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/verify.hpp>
#include <test.hpp>
TEST_CASE(isinf_double_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::double_type, {2, 3}};
auto inf_val = std::numeric_limits<double>::infinity();
std::vector<double> data0 = {1.2, 5.2, inf_val, -inf_val, 0., 100.};
auto l1 = mm->add_literal(migraphx::literal{s, data0});
mm->add_instruction(migraphx::make_op("isinf"), l1);
p.compile(migraphx::make_target("ref"));
auto result = p.eval({}).back();
std::vector<double> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<double> gold = {0, 0, 1, 1, 0, 0};
EXPECT(migraphx::verify::verify_rms_range(results_vector, gold));
}
TEST_CASE(isinf_float_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
auto inf_val = std::numeric_limits<float>::infinity();
std::vector<float> data0 = {1.2, 5.2, inf_val, -inf_val, 0., 100.};
auto l1 = mm->add_literal(migraphx::literal{s, data0});
mm->add_instruction(migraphx::make_op("isinf"), l1);
p.compile(migraphx::make_target("ref"));
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0, 0, 1, 1, 0, 0};
EXPECT(migraphx::verify::verify_rms_range(results_vector, gold));
}
TEST_CASE(isinf_half_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::half_type, {2, 3}};
auto inf_val = std::numeric_limits<migraphx::half>::infinity();
migraphx::half a{1.2};
migraphx::half b{5.2};
std::vector<migraphx::half> data0 = {a, b, inf_val, -inf_val, b, a};
auto l1 = mm->add_literal(migraphx::literal{s, data0});
mm->add_instruction(migraphx::make_op("isinf"), l1);
p.compile(migraphx::make_target("ref"));
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0, 0, 1, 1, 0, 0};
EXPECT(migraphx::verify::verify_rms_range(results_vector, gold));
}
TEST_CASE(isinf_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{2, 2}, {3, 8}}};
auto input = mm->add_parameter("X", s);
auto inf_val = std::numeric_limits<migraphx::half>::infinity();
mm->add_instruction(migraphx::make_op("isinf"), input);
p.compile(migraphx::make_target("ref"));
std::vector<float> input_data = {1.2, 5.2, inf_val, -inf_val, 0., 100.};
migraphx::parameter_map params0;
migraphx::shape input_fixed_shape0{migraphx::shape::float_type, {2, 3}};
params0["X"] = migraphx::argument(input_fixed_shape0, input_data.data());
auto result = p.eval(params0).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0, 0, 1, 1, 0, 0};
EXPECT(migraphx::verify::verify_rms_range(results_vector, gold));
}
......@@ -24,9 +24,10 @@
#include <migraphx/instruction.hpp>
#include <migraphx/literal.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/program.hpp>
#include <migraphx/onnx.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/verify.hpp>
#include <numeric>
#include <random>
#include <test.hpp>
......@@ -48,27 +49,37 @@ TEST_CASE(multinomial_test)
migraphx::shape s{migraphx::shape::float_type, {1, 5}};
std::vector<int> dist{15, 25, 15, 25, 20};
std::vector<float> data(5);
std::transform(dist.begin(), dist.end(), data.begin(), [&](auto d) { return std::log(d); });
auto input = mm->add_literal(migraphx::literal(s, data));
std::vector<float> sum(5);
// convert to float
std::transform(dist.begin(), dist.end(), data.begin(), [&](auto d) { return d; });
// take cumulative sum
std::partial_sum(data.begin(), data.end(), sum.begin(), std::plus<float>());
// scale probabilities arbitrarily
float odd_scale = 10000.;
std::transform(sum.begin(), sum.end(), data.begin(), [&](auto d) { return d * odd_scale; });
auto maxes = mm->add_instruction(migraphx::make_op("reduce_max", {{"axes", {1}}}), input);
auto mb_maxes =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {1, 5}}}), maxes);
auto cdf = mm->add_instruction(migraphx::make_op("sub"), input, mb_maxes);
cdf = mm->add_instruction(migraphx::make_op("exp"), cdf);
cdf = mm->add_instruction(
migraphx::make_op("prefix_scan_sum", {{"axis", 1}, {"exclusive", false}}), cdf);
auto input = mm->add_literal(migraphx::literal(s, data));
mm->add_instruction(migraphx::make_op("multinomial"), cdf, rs_lit);
mm->add_instruction(migraphx::make_op("multinomial"), input, rs_lit);
p.compile(migraphx::make_target("ref"));
auto result = p.eval({}).back();
// result_vec contains an index, or category label, for each random input value
std::vector<int32_t> result_vec(sample_size);
result.visit([&](auto output) { result_vec.assign(output.begin(), output.end()); });
// res_dist is a count, or histogram, of the number of samples in each category. This is the
// sampled distribution.
std::vector<int> res_dist(5, 0);
for(const auto& r : result_vec)
res_dist[r]++;
auto dist_sum = std::accumulate(dist.begin(), dist.end(), 0);
// To check the result, normalize the original probability distribution dist
// and the sampling result res_dist; they should be close
// Total the unnormalized probabilities
auto dist_sum = std::accumulate(dist.begin(), dist.end(), 0);
// Total the number of values returned
auto res_dist_sum = std::accumulate(res_dist.begin(), res_dist.end(), 0);
std::vector<float> norm(5);
std::vector<float> res_norm(5);
......@@ -78,6 +89,204 @@ TEST_CASE(multinomial_test)
std::transform(res_dist.begin(), res_dist.end(), res_norm.begin(), [&](auto n) {
return static_cast<double>(n) / res_dist_sum;
});
EXPECT(migraphx::verify::verify_range_with_tolerance(
res_norm, migraphx::verify::expected{norm}, migraphx::verify::tolerance{0.01}));
}
TEST_CASE(multinomial_dyn_test)
{
// Invokes random_uniform and multinomial ops together, to verify the interface
// Dynamic Batch dimension input of 2 means there are 2 different probability
// distribution functions contained in Input_2
migraphx::program p;
auto* mm = p.get_main_module();
size_t sample_size = 100000;
size_t batch_size = 2;
// Shape of the random data
migraphx::shape rs{migraphx::shape::float_type, {{1, 2}, {2, sample_size + 1}}};
auto input = mm->add_parameter("Input_1", rs);
// Runtime randomization seed
// To seed the random_uniform, we can provide a value by literal or input,
// or ask the system to auto-seed with random_seed op.
migraphx::shape seed_shape{migraphx::shape::uint32_type,
{migraphx::shape::dynamic_dimension{0, 1}}};
auto seed_input = mm->add_parameter("Seed", seed_shape);
// Shape of the probability distribution, which also defines the number of categories
migraphx::shape s{migraphx::shape::float_type, {{2, 2}, {5, 6}}};
// Unnormalized distributions for batch size 2:
// 15, 25, 15, 15, 20
// 20, 20, 10, 25, 25
std::vector<int> dist{15, 25, 15, 25, 20, 20, 20, 10, 25, 25};
// Hard-coded non-normalized, accumulated distribution follows:
std::vector<float> data{.15f, .40f, .55f, .80f, 1.0f, 20.f, 40.f, 50.f, 75.f, 100.f};
auto input2 = mm->add_parameter("Input_2", s);
auto randoms = mm->add_instruction(migraphx::make_op("random_uniform"), seed_input, input);
mm->add_instruction(migraphx::make_op("multinomial"), input2, randoms);
p.compile(migraphx::make_target("ref"));
// Create a dummy input in the shape we want for the random data
std::vector<float> dummy(sample_size, 0);
migraphx::shape input_fixed_shape1{migraphx::shape::float_type, {batch_size, sample_size}};
migraphx::shape input_fixed_shape2{migraphx::shape::float_type, {batch_size, 5}};
migraphx::parameter_map params0;
params0["Input_1"] = migraphx::argument(input_fixed_shape1, dummy.data());
migraphx::shape seed_fixed_shape{migraphx::shape::uint32_type, {1}};
std::vector<uint32_t> seed_data = {4};
params0["Seed"] = migraphx::argument(seed_fixed_shape, seed_data.data());
params0["Input_2"] = migraphx::argument(input_fixed_shape2, data.data());
auto result = p.eval(params0).back();
std::vector<float> result_vec(input_fixed_shape2.elements());
result.visit([&](auto output) { result_vec.assign(output.begin(), output.end()); });
// Make a categorical histogram of output
std::vector<int> res_dist(5, 0);
size_t r = 0;
for(r = 0; r < result_vec.size() / 2; r++)
res_dist[result_vec[r]]++;
// histogram for second set of batch
std::vector<int> res_dist2(5, 0);
for(; r < result_vec.size(); r++)
res_dist2[result_vec[r]]++;
// Rescale or normalize both the input probability distribution and the output
// histogram, and compare. Should be close but not identical.
auto dist_sum = std::accumulate(dist.begin(), dist.begin() + 5, 0);
auto res_dist_sum = std::accumulate(res_dist.begin(), res_dist.end(), 0);
std::vector<float> norm(5);
std::vector<float> res_norm(5);
std::transform(dist.begin(), dist.begin() + 5, norm.begin(), [&](auto n) {
return static_cast<double>(n) / dist_sum;
});
std::transform(res_dist.begin(), res_dist.end(), res_norm.begin(), [&](auto n) {
return static_cast<double>(n) / res_dist_sum;
});
EXPECT(migraphx::verify::verify_range_with_tolerance(
res_norm, migraphx::verify::expected{norm}, migraphx::verify::tolerance{0.01}));
// Do the same rescaling for the 2nd in batch, which has a different probability distribution
dist_sum = std::accumulate(dist.begin() + 5, dist.end(), 0);
res_dist_sum = std::accumulate(res_dist2.begin(), res_dist2.end(), 0);
std::transform(dist.begin() + 5, dist.end(), norm.begin(), [&](auto n) {
return static_cast<double>(n) / dist_sum;
});
std::transform(res_dist2.begin(), res_dist2.end(), res_norm.begin(), [&](auto n) {
return static_cast<double>(n) / res_dist_sum;
});
EXPECT(migraphx::verify::verify_range_with_tolerance(
res_norm, migraphx::verify::expected{norm}, migraphx::verify::tolerance{0.01}));
}
TEST_CASE(multinomial_float_dyn_test)
{
// int data type for random_uniform op and float data type for multinomial.
migraphx::program p;
auto* mm = p.get_main_module();
size_t sample_size = 100000;
size_t batch_size = 2;
// Shape of the random data
migraphx::shape rs{migraphx::shape::int32_type, {{1, 2}, {2, sample_size + 1}}};
auto input = mm->add_parameter("Input_1", rs);
// Runtime randomization seed
// To seed the random_uniform, we can provide a value by literal or input,
// or ask the system to auto-seed with random_seed op.
migraphx::shape seed_shape{migraphx::shape::uint32_type,
{migraphx::shape::dynamic_dimension{0, 1}}};
auto seed_input = mm->add_parameter("Seed", seed_shape);
// Shape of the probability distribution, which also defines the number of categories
migraphx::shape s{migraphx::shape::float_type, {{2, 2}, {5, 6}}};
// Unnormalized distributions for batch size 2:
// 15, 25, 15, 15, 20
// 20, 20, 10, 25, 25
std::vector<int> dist{15, 25, 15, 25, 20, 20, 20, 10, 25, 25};
// Hard-coded normalized, accumulated distribution follows:
std::vector<float> data{.15f, .40f, .55f, .80f, 1.0f, .20f, .40f, .50f, .75f, 1.0f};
auto input2 = mm->add_parameter("Input_2", s);
auto randoms = mm->add_instruction(migraphx::make_op("random_uniform"), seed_input, input);
mm->add_instruction(migraphx::make_op("multinomial", {{"dtype", migraphx::shape::float_type}}),
input2,
randoms);
p.compile(migraphx::make_target("ref"));
// Create a dummy input in the shape we want for the random data
std::vector<float> dummy(sample_size, 0);
migraphx::shape input_fixed_shape1{migraphx::shape::float_type, {batch_size, sample_size}};
migraphx::shape input_fixed_shape2{migraphx::shape::float_type, {batch_size, 5}};
migraphx::parameter_map params0;
params0["Input_1"] = migraphx::argument(input_fixed_shape1, dummy.data());
migraphx::shape seed_fixed_shape{migraphx::shape::uint32_type, {1}};
std::vector<uint32_t> seed_data = {4};
params0["Seed"] = migraphx::argument(seed_fixed_shape, seed_data.data());
params0["Input_2"] = migraphx::argument(input_fixed_shape2, data.data());
auto result = p.eval(params0).back();
std::vector<float> result_vec(input_fixed_shape2.elements());
result.visit([&](auto output) { result_vec.assign(output.begin(), output.end()); });
// Make a categorical histogram of output
std::vector<int> res_dist(5, 0);
size_t r = 0;
for(r = 0; r < result_vec.size() / 2; r++)
res_dist[result_vec[r]]++;
// histogram for second set of batch
std::vector<int> res_dist2(5, 0);
for(; r < result_vec.size(); r++)
res_dist2[result_vec[r]]++;
// Rescale or normalize both the input probability distribution and the output
// histogram, and compare. Should be close but not identical.
auto dist_sum = std::accumulate(dist.begin(), dist.begin() + 5, 0);
auto res_dist_sum = std::accumulate(res_dist.begin(), res_dist.end(), 0);
std::vector<float> norm(5);
std::vector<float> res_norm(5);
std::transform(dist.begin(), dist.begin() + 5, norm.begin(), [&](auto n) {
return static_cast<double>(n) / dist_sum;
});
std::transform(res_dist.begin(), res_dist.end(), res_norm.begin(), [&](auto n) {
return static_cast<double>(n) / res_dist_sum;
});
EXPECT(migraphx::verify::verify_range_with_tolerance(
res_norm, migraphx::verify::expected{norm}, migraphx::verify::tolerance{0.01}));
// Do the same rescaling for the 2nd in batch, which has a different probability distribution
dist_sum = std::accumulate(dist.begin() + 5, dist.end(), 0);
res_dist_sum = std::accumulate(res_dist2.begin(), res_dist2.end(), 0);
std::transform(dist.begin() + 5, dist.end(), norm.begin(), [&](auto n) {
return static_cast<double>(n) / dist_sum;
});
std::transform(res_dist2.begin(), res_dist2.end(), res_norm.begin(), [&](auto n) {
return static_cast<double>(n) / res_dist_sum;
});
EXPECT(migraphx::verify::verify_range_with_tolerance(
res_norm, migraphx::verify::expected{norm}, migraphx::verify::tolerance{0.01}));
}
......@@ -30,39 +30,71 @@
#include <test.hpp>
TEST_CASE(round_test)
TEST_CASE(nearbyint_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {9}};
auto l =
mm->add_literal(migraphx::literal{s, {1.1, 1.5, 1.6, -1.1, -1.5, -1.6, 0.0, 2.0, -2.0}});
mm->add_instruction(migraphx::make_op("round"), l);
migraphx::shape s{migraphx::shape::float_type, {4, 4}};
auto l = mm->add_literal(migraphx::literal{s,
{-3.51,
-3.5,
-3.49,
-2.51,
-2.50,
-2.49,
-1.6,
-1.5,
-0.51,
-0.5,
0.5,
0.6,
2.4,
2.5,
3.5,
4.5}});
mm->add_instruction(migraphx::make_op("nearbyint"), l);
p.compile(migraphx::make_target("ref"));
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {1.0, 2.0, 2.0, -1.0, -2.0, -2.0, 0.0, 2.0, -2.0};
std::vector<float> gold = {
-4.0, -4.0, -3.0, -3.0, -2.0, -2.0, -2.0, -2.0, -1.0, 0.0, 0.0, 1.0, 2.0, 2.0, 4.0, 4.0};
EXPECT(migraphx::verify::verify_rms_range(results_vector, gold));
}
TEST_CASE(round_dyn_test)
TEST_CASE(nearbyint_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape::dynamic_dimension dd{4, 10};
migraphx::shape s{migraphx::shape::float_type, {dd}};
auto input = mm->add_parameter("X", s);
mm->add_instruction(migraphx::make_op("round"), input);
mm->add_instruction(migraphx::make_op("nearbyint"), input);
p.compile(migraphx::make_target("ref"));
std::vector<float> input_data{1.1, 1.5, 1.6, -1.1, -1.5, -1.6, 0.0, 2.0, -2.0};
std::vector<float> input_data{-3.51,
-3.5,
-3.49,
-2.51,
-2.50,
-2.49,
-1.6,
-1.5,
-0.51,
-0.5,
0.5,
0.6,
2.4,
2.5,
3.5,
4.5};
migraphx::parameter_map params0;
migraphx::shape input_fixed_shape0{migraphx::shape::float_type, {9}};
migraphx::shape input_fixed_shape0{migraphx::shape::float_type, {16}};
params0["X"] = migraphx::argument(input_fixed_shape0, input_data.data());
auto result = p.eval(params0).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {1.0, 2.0, 2.0, -1.0, -2.0, -2.0, 0.0, 2.0, -2.0};
std::vector<float> gold = {
-4.0, -4.0, -3.0, -3.0, -2.0, -2.0, -2.0, -2.0, -1.0, 0.0, 0.0, 1.0, 2.0, 2.0, 4.0, 4.0};
EXPECT(migraphx::verify::verify_rms_range(results_vector, gold));
}
......@@ -55,7 +55,7 @@ TEST_CASE(quantizelinear_1)
std::vector<float> results_vector(18);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{
-128, 127, 65, -128, 1, 1, -1, 100, 92, -128, 127, 65, -128, 1, 1, -1, 100, 92};
-128, 127, 64, -128, 1, 1, -1, 100, 92, -128, 127, 64, -128, 1, 1, -1, 100, 92};
EXPECT(results_vector == gold);
}
......@@ -80,6 +80,6 @@ TEST_CASE(quantizelinear_2)
auto result = p1.eval({}).back();
std::vector<float> results_vector(18);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{0, 255, 65, 0, 2, 2, 0, 255, 255, 0, 255, 65, 0, 2, 2, 0, 255, 255};
std::vector<float> gold{0, 255, 64, 0, 2, 2, 0, 255, 255, 0, 255, 64, 0, 2, 2, 0, 255, 255};
EXPECT(results_vector == gold);
}
......@@ -226,7 +226,6 @@ TEST_CASE(reshape_2in_test1)
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify::verify_rms_range(results_vector, gold));
}
TEST_CASE(reshape_2in_elements_runtime_error)
{
migraphx::program p;
......
......@@ -157,7 +157,169 @@ TEST_CASE(slice_var_inputs_static2)
EXPECT(migraphx::verify::verify_rms_range(results_vector, gold));
}
TEST_CASE(slice_var_inputs_dyn)
TEST_CASE(slice_var_inputs_dyn0)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s0{migraphx::shape::int32_type, {{2, 4, {2, 4}}, {2, 4, {2, 4}}, {3, 8}}};
auto input = mm->add_parameter("input", s0);
migraphx::shape s1{migraphx::shape::int32_type, {1}};
auto starts = mm->add_parameter("starts", s1);
mm->add_instruction(migraphx::make_op("slice", {{"axes", {2}}, {"ends", {10}}}), input, starts);
p.compile(migraphx::make_target("ref"));
migraphx::parameter_map params;
migraphx::shape s2{migraphx::shape::int32_type, {2, 2, 3}};
std::vector<int> input_data(2 * 2 * 3);
std::iota(input_data.begin(), input_data.end(), 0);
std::vector<int> start_data = {1};
params["input"] = migraphx::argument(s2, input_data.data());
params["starts"] = migraphx::argument(s1, start_data.data());
auto result = p.eval(params).back();
std::vector<int> gold = {1, 2, 4, 5, 7, 8, 10, 11};
std::vector<int> results_vector(2 * 2 * 2);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify::verify_rms_range(results_vector, gold));
}
TEST_CASE(slice_var_inputs_dyn1)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s0{migraphx::shape::int32_type, {{2, 4, {2, 4}}, {2, 4, {2, 4}}, {3, 8}}};
auto input = mm->add_parameter("input", s0);
migraphx::shape s1{migraphx::shape::int32_type, {1}};
auto ends = mm->add_parameter("ends", s1);
mm->add_instruction(migraphx::make_op("slice", {{"axes", {2}}, {"starts", {-5}}}), input, ends);
p.compile(migraphx::make_target("ref"));
migraphx::parameter_map params;
migraphx::shape s2{migraphx::shape::int32_type, {2, 2, 3}};
std::vector<int> input_data(2 * 2 * 3);
std::iota(input_data.begin(), input_data.end(), 0);
std::vector<int> ends_data = {3};
params["input"] = migraphx::argument(s2, input_data.data());
params["ends"] = migraphx::argument(s1, ends_data.data());
auto result = p.eval(params).back();
std::vector<int> gold = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
std::vector<int> results_vector(2 * 2 * 3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify::verify_rms_range(results_vector, gold));
}
TEST_CASE(slice_var_inputs_dyn2)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s0{migraphx::shape::int32_type, {{2, 4, {2, 4}}, {2, 4, {2, 4}}, {3, 8}}};
auto input = mm->add_parameter("input", s0);
migraphx::shape s1{migraphx::shape::int32_type, {1}};
auto axes = mm->add_parameter("axes", s1);
mm->add_instruction(migraphx::make_op("slice", {{"starts", {1}}, {"ends", {-1}}}), input, axes);
p.compile(migraphx::make_target("ref"));
migraphx::parameter_map params;
migraphx::shape s2{migraphx::shape::int32_type, {2, 2, 3}};
std::vector<int> input_data(2 * 2 * 3);
std::iota(input_data.begin(), input_data.end(), 0);
std::vector<int> axes_data = {2};
params["input"] = migraphx::argument(s2, input_data.data());
params["axes"] = migraphx::argument(s1, axes_data.data());
auto result = p.eval(params).back();
std::vector<int> gold = {1, 4, 7, 10};
std::vector<int> results_vector(2 * 2 * 1);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify::verify_rms_range(results_vector, gold));
}
TEST_CASE(slice_var_inputs_dyn3)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s0{migraphx::shape::int32_type, {{2, 4, {2, 4}}, {2, 4, {2, 4}}, {3, 8}}};
auto input = mm->add_parameter("input", s0);
migraphx::shape s1{migraphx::shape::int32_type, {1}};
auto starts = mm->add_parameter("starts", s1);
auto ends = mm->add_parameter("ends", s1);
mm->add_instruction(migraphx::make_op("slice", {{"axes", {2}}}), input, starts, ends);
p.compile(migraphx::make_target("ref"));
migraphx::parameter_map params;
migraphx::shape s2{migraphx::shape::int32_type, {2, 2, 3}};
std::vector<int> input_data(2 * 2 * 3);
std::iota(input_data.begin(), input_data.end(), 0);
std::vector<int> starts_data = {1};
std::vector<int> ends_data = {std::numeric_limits<int>::max()};
params["input"] = migraphx::argument(s2, input_data.data());
params["starts"] = migraphx::argument(s1, starts_data.data());
params["ends"] = migraphx::argument(s1, ends_data.data());
auto result = p.eval(params).back();
std::vector<int> gold = {1, 2, 4, 5, 7, 8, 10, 11};
std::vector<int> results_vector(2 * 2 * 2);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify::verify_rms_range(results_vector, gold));
}
TEST_CASE(slice_var_inputs_dyn4)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s0{migraphx::shape::int32_type, {{2, 4, {2, 4}}, {2, 4, {2, 4}}, {3, 8}}};
auto input = mm->add_parameter("input", s0);
migraphx::shape s1{migraphx::shape::int32_type, {1}};
auto starts = mm->add_parameter("starts", s1);
auto axes = mm->add_parameter("axes", s1);
mm->add_instruction(migraphx::make_op("slice", {{"ends", {std::numeric_limits<int>::max()}}}),
input,
starts,
axes);
p.compile(migraphx::make_target("ref"));
migraphx::parameter_map params;
migraphx::shape s2{migraphx::shape::int32_type, {2, 2, 3}};
std::vector<int> input_data(2 * 2 * 3);
std::iota(input_data.begin(), input_data.end(), 0);
std::vector<int> starts_data = {1};
std::vector<int> axes_data = {2};
params["input"] = migraphx::argument(s2, input_data.data());
params["starts"] = migraphx::argument(s1, starts_data.data());
params["axes"] = migraphx::argument(s1, axes_data.data());
auto result = p.eval(params).back();
std::vector<int> gold = {1, 2, 4, 5, 7, 8, 10, 11};
std::vector<int> results_vector(2 * 2 * 2);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify::verify_rms_range(results_vector, gold));
}
TEST_CASE(slice_var_inputs_dyn5)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s0{migraphx::shape::int32_type, {{2, 4, {2, 4}}, {2, 4, {2, 4}}, {3, 8}}};
auto input = mm->add_parameter("input", s0);
migraphx::shape s1{migraphx::shape::int32_type, {1}};
auto ends = mm->add_parameter("ends", s1);
auto axes = mm->add_parameter("axes", s1);
mm->add_instruction(migraphx::make_op("slice", {{"starts", {-4}}}), input, ends, axes);
p.compile(migraphx::make_target("ref"));
migraphx::parameter_map params;
migraphx::shape s2{migraphx::shape::int32_type, {2, 2, 3}};
std::vector<int> input_data(2 * 2 * 3);
std::iota(input_data.begin(), input_data.end(), 0);
std::vector<int> ends_data = {2};
std::vector<int> axes_data = {2};
params["input"] = migraphx::argument(s2, input_data.data());
params["ends"] = migraphx::argument(s1, ends_data.data());
params["axes"] = migraphx::argument(s1, axes_data.data());
auto result = p.eval(params).back();
std::vector<int> gold = {0, 1, 3, 4, 6, 7, 9, 10};
std::vector<int> results_vector(2 * 2 * 2);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify::verify_rms_range(results_vector, gold));
}
TEST_CASE(slice_var_inputs_dyn6)
{
migraphx::program p;
auto* mm = p.get_main_module();
......
......@@ -237,4 +237,86 @@ TEST_CASE(const_slice_4input)
EXPECT(m0 == m1);
}
TEST_CASE(static_dimensions_of0)
{
// dead_code_elimination will get rid of atan
migraphx::module m0;
{
migraphx::shape s{migraphx::shape::float_type, {2, 4, 4}};
auto input = m0.add_parameter("data", s);
auto atan_ins = m0.add_instruction(migraphx::make_op("atan"), input);
auto dimensions_of_ins =
m0.add_instruction(migraphx::make_op("dimensions_of", {{"end", 3}}), atan_ins);
m0.add_return({dimensions_of_ins});
}
run_pass(m0);
migraphx::module m1;
{
migraphx::shape s{migraphx::shape::float_type, {2, 4, 4}};
m1.add_parameter("data", s);
migraphx::shape lit_shape{migraphx::shape::int64_type, {3}};
std::vector<int64_t> lit_data = {2, 4, 4};
auto lit_ins = m1.add_literal(migraphx::literal{lit_shape, lit_data});
m1.add_return({lit_ins});
}
EXPECT(m0 == m1);
}
TEST_CASE(static_dimensions_of1)
{
// dead_code_elimination will get rid of atan
migraphx::module m0;
{
migraphx::shape s{migraphx::shape::float_type, {{2, 4, {2, 4}}, {4, 4}, {4, 4}}};
auto input = m0.add_parameter("data", s);
auto atan_ins = m0.add_instruction(migraphx::make_op("atan"), input);
auto dimensions_of_ins = m0.add_instruction(
migraphx::make_op("dimensions_of", {{"start", 1}, {"end", 3}}), atan_ins);
m0.add_return({dimensions_of_ins});
}
run_pass(m0);
migraphx::module m1;
{
migraphx::shape s{migraphx::shape::float_type, {{2, 4, {2, 4}}, {4, 4}, {4, 4}}};
m1.add_parameter("data", s);
migraphx::shape lit_shape{migraphx::shape::int64_type, {2}};
std::vector<int64_t> lit_data = {4, 4};
auto lit_ins = m1.add_literal(migraphx::literal{lit_shape, lit_data});
m1.add_return({lit_ins});
}
EXPECT(m0 == m1);
}
// Does nothing because the dynamic_dimensions from start to end
// are not all fixed
TEST_CASE(static_dimensions_of_nonfixed)
{
// dead_code_elimination will get rid of atan
migraphx::module m0;
{
migraphx::shape s{migraphx::shape::float_type, {{2, 4, {2, 4}}, {4, 8}, {4, 8}}};
auto input = m0.add_parameter("data", s);
auto atan_ins = m0.add_instruction(migraphx::make_op("atan"), input);
auto dimensions_of_ins = m0.add_instruction(
migraphx::make_op("dimensions_of", {{"start", 1}, {"end", 3}}), atan_ins);
m0.add_return({dimensions_of_ins});
}
run_pass(m0);
migraphx::module m1;
{
migraphx::shape s{migraphx::shape::float_type, {{2, 4, {2, 4}}, {4, 8}, {4, 8}}};
auto input = m1.add_parameter("data", s);
auto atan_ins = m1.add_instruction(migraphx::make_op("atan"), input);
auto dimensions_of_ins = m1.add_instruction(
migraphx::make_op("dimensions_of", {{"start", 1}, {"end", 3}}), atan_ins);
m1.add_return({dimensions_of_ins});
}
EXPECT(m0 == m1);
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -1345,7 +1345,7 @@ TEST_CASE(transpose_contiguous_unsqueeze_unary)
auto cont_ins = m1.add_instruction(migraphx::make_op("contiguous"), transpose_ins);
auto unsq_ins =
m1.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {2}}}), cont_ins);
auto round = m1.add_instruction(migraphx::make_op("round"), unsq_ins);
auto round = m1.add_instruction(migraphx::make_op("nearbyint"), unsq_ins);
m1.add_instruction(pass_op{}, round);
}
run_pass(m1);
......@@ -1354,7 +1354,7 @@ TEST_CASE(transpose_contiguous_unsqueeze_unary)
auto x = m2.add_parameter("x", {migraphx::shape::float_type, {2, 8, 5, 5}});
auto transpose_ins =
m2.add_instruction(migraphx::make_op("transpose", {{"permutation", {0, 2, 3, 1}}}), x);
auto round = m2.add_instruction(migraphx::make_op("round"), transpose_ins);
auto round = m2.add_instruction(migraphx::make_op("nearbyint"), transpose_ins);
auto cont_ins = m2.add_instruction(migraphx::make_op("contiguous"), round);
auto unsq_ins =
m2.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {2}}}), cont_ins);
......
......@@ -41,11 +41,7 @@ TEST_CASE(make_invalid_target)
TEST_CASE(targets)
{
// GCC doesn't load libmigraphx_ref unless necesssary even though it is linked to the test.
// Force it to load by making ref target
#if defined(__GNUC__) && !defined(__clang__)
auto ref_target = migraphx::make_target("ref");
#endif
auto ts = migraphx::get_targets();
EXPECT(ts.size() >= 1);
}
......
......@@ -27,16 +27,21 @@
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_round : verify_program<test_round>
struct gemm_2args_mm_8 : verify_program<gemm_2args_mm_8>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape a_shape{migraphx::shape::float_type, {2, 128, 32}, {4096, 1, 128}};
migraphx::shape b_shape{migraphx::shape::float_type, {32, 32}};
auto a = mm->add_parameter("a", a_shape);
auto b = mm->add_parameter("b", b_shape);
auto bb = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {2, 32, 32}}}), b);
mm->add_instruction(migraphx::make_op("dot"), a, bb);
migraphx::shape s{migraphx::shape::float_type, {2, 3, 4, 6}};
auto param = mm->add_parameter("x", s);
mm->add_instruction(migraphx::make_op("round"), param);
return p;
};
}
};
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment