Unverified Commit 0662a9a3 authored by Brian Pickrell's avatar Brian Pickrell Committed by GitHub
Browse files

Merge branch 'develop' into dyn_resize_gather

parents b74d3a8f 35e5298e
635d3faa3b3908d2806d009dc6872152cfcfcdda
b7b8b5b2ce80edb33990c7ae0fedac6ae3c623f4
......@@ -3858,6 +3858,64 @@ def instance_norm_val_3d_test():
return ([node], [], [y], [x_tensor, scale_tensor, bias_tensor])
@onnx_test()
def isinf_half_test():
t1 = helper.make_tensor_value_info('t1', TensorProto.FLOAT16, [2, 3])
t2 = helper.make_tensor_value_info('t2', TensorProto.BOOL, [2, 3])
node = onnx.helper.make_node(
'IsInf',
inputs=['t1'],
outputs=['t2'],
)
return ([node], [t1], [t2])
@onnx_test()
def isinf_neg_test():
t1 = helper.make_tensor_value_info('t1', TensorProto.FLOAT, [2, 3])
t2 = helper.make_tensor_value_info('t2', TensorProto.BOOL, [2, 3])
node = onnx.helper.make_node(
'IsInf',
detect_negative=[1],
detect_positive=[0],
inputs=['t1'],
outputs=['t2'],
)
return ([node], [t1], [t2])
@onnx_test()
def isinf_double_pos_test():
t1 = helper.make_tensor_value_info('t1', TensorProto.DOUBLE, [2, 3])
t2 = helper.make_tensor_value_info('t2', TensorProto.BOOL, [2, 3])
node = onnx.helper.make_node(
'IsInf',
detect_negative=[0],
detect_positive=[1],
inputs=['t1'],
outputs=['t2'],
)
return ([node], [t1], [t2])
@onnx_test()
def isinf_no_detect_test():
t1 = helper.make_tensor_value_info('t1', TensorProto.FLOAT, [2, 3])
t2 = helper.make_tensor_value_info('t2', TensorProto.BOOL, [2, 3])
node = onnx.helper.make_node(
'IsInf',
detect_negative=[0],
detect_positive=[0],
inputs=['t1'],
outputs=['t2'],
)
return ([node], [t1], [t2])
@onnx_test()
def isnan_float_test():
t1 = helper.make_tensor_value_info('t1', TensorProto.FLOAT, [2, 3])
......@@ -4276,6 +4334,50 @@ def loop_test():
return ([node], [iter, cond, a, b], [b_loop, uout])
@onnx_test()
def loop_test_implicit_tripcnt():
body = helper.make_graph([
helper.make_node("Add", ["a", "b_in"], ["my_local"]),
helper.make_node("Sub", ["a", "b_in"], ["a_sub_b_in"]),
helper.make_node("Greater", ["my_local", "a_sub_b_in"],
["keep_going"]),
helper.make_node("Add", ["a_sub_b_in", "a_sub_b_in"],
["user_defined_vals"]),
], "body", [
helper.make_tensor_value_info('iteration_num', TensorProto.INT64, [1]),
helper.make_tensor_value_info('keep_going_inp', TensorProto.BOOL, [1]),
helper.make_tensor_value_info('b_in', TensorProto.FLOAT, [1])
], [
helper.make_tensor_value_info('keep_going', TensorProto.BOOL, [1]),
helper.make_tensor_value_info('a_sub_b_in', TensorProto.FLOAT, [1]),
helper.make_tensor_value_info('my_local', TensorProto.FLOAT, [1]),
helper.make_tensor_value_info('user_defined_vals', TensorProto.FLOAT,
[1]),
])
iter = helper.make_tensor(name='max_trip_count',
data_type=TensorProto.INT64,
dims=[1],
vals=[15])
node = helper.make_node(
"Loop",
inputs=["max_trip_count", "keep_going_cond", "b"],
outputs=["b_loop", "my_local_loop", "user_defined_vals_loop"],
body=body)
a = helper.make_tensor_value_info('a', TensorProto.FLOAT, [1])
b = helper.make_tensor_value_info('b', TensorProto.FLOAT, [1])
cond = helper.make_tensor_value_info('keep_going_cond', TensorProto.BOOL,
[1])
b_loop = helper.make_tensor_value_info('b_loop', TensorProto.FLOAT, [1])
uout = helper.make_tensor_value_info('user_defined_vals_loop',
TensorProto.FLOAT, [2, 1])
return ([node], [cond, a, b], [b_loop, uout], [iter])
@onnx_test()
def lpnormalization_axis_error_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3])
......@@ -4883,9 +4985,9 @@ def mod_test_fmod_different_dtypes():
@onnx_test()
def multinomial_test():
sample_size = 10
seed = 0.0
input = helper.make_tensor_value_info("input", TensorProto.FLOAT, [1, 10])
sample_size = 13
seed = 0.
input = helper.make_tensor_value_info("input", TensorProto.FLOAT, [3, 10])
output = helper.make_tensor_value_info("output", TensorProto.INT32,
[1, 10])
......@@ -4898,6 +5000,44 @@ def multinomial_test():
return ([node], [input], [output])
@onnx_test()
def multinomial_dyn_test():
sample_size = 100000
seed = 1.3
categories = 5
input = helper.make_tensor_value_info("input", TensorProto.FLOAT,
[None, categories])
output = helper.make_tensor_value_info("output", TensorProto.FLOAT,
[None, categories])
node = onnx.helper.make_node(
'Multinomial',
inputs=['input'],
sample_size=sample_size,
dtype=1, # shape::float_type
seed=seed,
outputs=['output'])
return ([node], [input], [output])
@onnx_test()
def multinomial_autoseed_dyn_test():
# If seed attribute is not given, device should auto generate one at runtime
sample_size = 12
input = helper.make_tensor_value_info("input", TensorProto.FLOAT,
[None, 10])
output = helper.make_tensor_value_info("output", TensorProto.INT32,
[None, 10])
node = onnx.helper.make_node('Multinomial',
inputs=['input'],
sample_size=sample_size,
outputs=['output'])
return ([node], [input], [output])
@onnx_test()
def multinomial_generated_seed_test():
sample_size = 10
......@@ -7074,6 +7214,16 @@ def roialign_test():
return ([node], [x, roi, bi], [y])
@onnx_test()
def round_half_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16, [4, 4])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT16, [4, 4])
node = onnx.helper.make_node('Round', inputs=['x'], outputs=['y'])
return ([node], [x], [y])
@onnx_test()
def scatter_add_test():
x = helper.make_tensor_value_info('data', TensorProto.FLOAT, [3, 4, 5, 6])
......@@ -7993,6 +8143,32 @@ def slice_var_input_dyn1():
return ([node], [data, starts, ends, axes], [output])
@onnx_test()
def slice_var_input_default_steps():
step = np.array([1, 1])
step_tensor = helper.make_tensor(name="step",
data_type=TensorProto.INT64,
dims=step.shape,
vals=step.astype(int))
arg_step = helper.make_node("Constant",
inputs=[],
outputs=['arg_step'],
value=step_tensor)
data = helper.make_tensor_value_info('data', TensorProto.FLOAT, [None, 2])
starts = helper.make_tensor_value_info('starts', TensorProto.INT64, [2])
ends = helper.make_tensor_value_info('ends', TensorProto.INT64, [2])
axes = helper.make_tensor_value_info('axes', TensorProto.INT64, [2])
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 2])
node = onnx.helper.make_node(
'Slice',
inputs=['data', 'starts', 'ends', 'axes', 'arg_step'],
outputs=['output'])
return ([arg_step, node], [data, starts, ends, axes], [output])
@onnx_test()
def slice_var_input_steps_error():
step = np.array([2, 1])
......@@ -8006,9 +8182,9 @@ def slice_var_input_steps_error():
value=step_tensor)
data = helper.make_tensor_value_info('data', TensorProto.FLOAT, [3, 2])
starts = helper.make_tensor_value_info('starts', TensorProto.FLOAT, [2])
ends = helper.make_tensor_value_info('ends', TensorProto.FLOAT, [2])
axes = helper.make_tensor_value_info('axes', TensorProto.FLOAT, [2])
starts = helper.make_tensor_value_info('starts', TensorProto.INT64, [2])
ends = helper.make_tensor_value_info('ends', TensorProto.INT64, [2])
axes = helper.make_tensor_value_info('axes', TensorProto.INT64, [2])
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 2])
node = onnx.helper.make_node(
......@@ -8169,6 +8345,42 @@ def split_test_no_attribute():
return ([const_node, node], [x], [y1, y2, y3, y4])
@onnx_test()
def split_test_uneven():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [12, 15])
y1 = helper.make_tensor_value_info('y1', TensorProto.FLOAT, [3, 15])
y2 = helper.make_tensor_value_info('y2', TensorProto.FLOAT, [3, 15])
y3 = helper.make_tensor_value_info('y3', TensorProto.FLOAT, [3, 15])
y4 = helper.make_tensor_value_info('y4', TensorProto.FLOAT, [3, 15])
y5 = helper.make_tensor_value_info('y5', TensorProto.FLOAT, [0, 15])
node = onnx.helper.make_node(
'Split',
inputs=['x'],
outputs=['y1', 'y2', 'y3', 'y4', 'y5'],
)
return ([node], [x], [y1, y2, y3, y4, y5])
@onnx_test()
def split_test_uneven_num_outputs():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [11, 15])
y1 = helper.make_tensor_value_info('y1', TensorProto.FLOAT, [3, 15])
y2 = helper.make_tensor_value_info('y2', TensorProto.FLOAT, [3, 15])
y3 = helper.make_tensor_value_info('y3', TensorProto.FLOAT, [3, 15])
y4 = helper.make_tensor_value_info('y4', TensorProto.FLOAT, [2, 15])
node = onnx.helper.make_node(
'Split',
inputs=['x'],
outputs=['y1', 'y2', 'y3', 'y4'],
num_outputs=4,
)
return ([node], [x], [y1, y2, y3, y4])
@onnx_test()
def split_test_no_attribute_invalid_split():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [300, 15])
......@@ -8228,6 +8440,24 @@ def split_test_no_attribute_invalid_input_split():
return ([node], [x], [y1, y2, y3])
@onnx_test()
def split_test_invalid_num_outputs():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [11, 15])
y1 = helper.make_tensor_value_info('y1', TensorProto.FLOAT, [3, 15])
y2 = helper.make_tensor_value_info('y2', TensorProto.FLOAT, [3, 15])
y3 = helper.make_tensor_value_info('y3', TensorProto.FLOAT, [3, 15])
y4 = helper.make_tensor_value_info('y4', TensorProto.FLOAT, [2, 15])
node = onnx.helper.make_node(
'Split',
inputs=['x'],
outputs=['y1', 'y2', 'y3', 'y4'],
num_outputs=5,
)
return ([node], [x], [y1, y2, y3, y4])
@onnx_test()
def sqrt_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10, 15])
......@@ -8964,6 +9194,20 @@ def upsample_test():
return ([node], [X], [Y], [scale_tensor])
@onnx_test()
def upsample_ver7_test():
X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 2])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 1, 4, 6])
node = onnx.helper.make_node('Upsample',
inputs=['X'],
outputs=['Y'],
mode='nearest',
scales=[1.0, 1.0, 2.0, 3.0])
return ([node], [X], [Y])
@onnx_test()
def variable_batch_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT,
......
 isinf_half_test:N

t1t2"IsInfisinf_half_testZ
t1



b
t2
 

B
\ No newline at end of file
 loop_test_implicit_tripcnt:

max_trip_count
keep_going_cond
bb_loop my_local_loopuser_defined_vals_loop"Loop*
body2

a
b_inmy_local"Add

a
b_in
a_sub_b_in"Sub
+
my_local
a_sub_b_in
keep_going"Greater
0
a_sub_b_in
a_sub_b_inuser_defined_vals"AddbodyZ
iteration_num

Z
keep_going_inp
 
Z
b_in

b
keep_going
 
b
a_sub_b_in

b
my_local

b
user_defined_vals

loop_test_implicit_tripcnt*:Bmax_trip_countZ
keep_going_cond
 
Z
a

Z
b

b
b_loop

b(
user_defined_vals_loop


B
\ No newline at end of file
......@@ -3413,6 +3413,82 @@ TEST_CASE(if_tuple_test)
EXPECT(p == prog);
}
TEST_CASE(isinf_half_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::half_type, {2, 3}};
auto t1 = mm->add_parameter("t1", s);
auto ret = mm->add_instruction(migraphx::make_op("isinf"), t1);
mm->add_return({ret});
auto prog = migraphx::parse_onnx("isinf_half_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(isinf_neg_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
auto t1 = mm->add_parameter("t1", s);
auto is_inf = mm->add_instruction(migraphx::make_op("isinf"), t1);
auto zero_l = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0}});
auto mb_zero =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), zero_l);
auto is_neg = mm->add_instruction(migraphx::make_op("less"), t1, mb_zero);
if(is_neg->get_shape().type() != migraphx::shape::bool_type)
{
is_neg = mm->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::bool_type}}), is_neg);
}
auto ret = mm->add_instruction(migraphx::make_op("logical_and"), is_inf, is_neg);
mm->add_return({ret});
auto prog = migraphx::parse_onnx("isinf_neg_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(isinf_double_pos_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::double_type, {2, 3}};
auto t1 = mm->add_parameter("t1", s);
auto is_inf = mm->add_instruction(migraphx::make_op("isinf"), t1);
auto zero_l = mm->add_literal(migraphx::literal{migraphx::shape::double_type, {0}});
auto mb_zero =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), zero_l);
auto is_neg = mm->add_instruction(migraphx::make_op("greater"), t1, mb_zero);
if(is_neg->get_shape().type() != migraphx::shape::bool_type)
{
is_neg = mm->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::bool_type}}), is_neg);
}
auto ret = mm->add_instruction(migraphx::make_op("logical_and"), is_inf, is_neg);
mm->add_return({ret});
auto prog = migraphx::parse_onnx("isinf_double_pos_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(isinf_no_detect_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
mm->add_parameter("t1", s);
auto ret = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}),
mm->add_literal(migraphx::literal{migraphx::shape{migraphx::shape::bool_type}, {false}}));
mm->add_return({ret});
auto prog = migraphx::parse_onnx("isinf_no_detect_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(isnan_float_test)
{
migraphx::program p;
......@@ -4679,32 +4755,140 @@ TEST_CASE(multinomial_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
size_t sample_size = 10;
float seed = 0.0f;
size_t sample_size = 13;
size_t batch_size = 3;
size_t categories = 10;
float seed = 0;
auto input = mm->add_parameter("input", migraphx::shape{migraphx::shape::float_type, {1, 10}});
auto maxes = mm->add_instruction(migraphx::make_op("reduce_max", {{"axes", {1}}}), input);
auto mb_maxes =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {1, 10}}}), maxes);
auto input = mm->add_parameter(
"input", migraphx::shape{migraphx::shape::float_type, {batch_size, categories}});
auto maxes = mm->add_instruction(migraphx::make_op("reduce_max", {{"axes", {1}}}), input);
auto mb_maxes = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {batch_size, 10}}}), maxes);
auto cdf = mm->add_instruction(migraphx::make_op("sub"), input, mb_maxes);
cdf = mm->add_instruction(migraphx::make_op("exp"), cdf);
cdf = mm->add_instruction(
migraphx::make_op("prefix_scan_sum", {{"axis", 1}, {"exclusive", false}}), cdf);
std::mt19937 gen(seed);
std::uniform_real_distribution<> dis(0.0, 1.0);
std::vector<float> rand_samples(sample_size);
std::generate(rand_samples.begin(), rand_samples.end(), [&]() { return dis(gen); });
migraphx::shape rs{migraphx::shape::float_type, {1, sample_size}};
auto rs_lit = mm->add_literal(migraphx::literal{rs, rand_samples});
mm->add_instruction(migraphx::make_op("multinomial"), cdf, rs_lit);
migraphx::shape s{migraphx::shape::float_type, {1}};
std::vector<float> seed_data = {seed};
auto seed_input = mm->add_literal(migraphx::literal(s, seed_data));
auto rand_dummy =
mm->add_literal(migraphx::literal{migraphx::shape::float_type, {batch_size * sample_size}});
auto randoms = mm->add_instruction(migraphx::make_op("random_uniform"), seed_input, rand_dummy);
mm->add_instruction(migraphx::make_op("multinomial"), cdf, randoms);
auto prog = optimize_onnx("multinomial_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(multinomial_dyn_test)
{
// compile-time random seed
migraphx::program p;
auto* mm = p.get_main_module();
size_t sample_size = 100000;
size_t categories = 5;
float seed = 1.3f;
auto input = mm->add_parameter(
"input",
migraphx::shape{migraphx::shape::float_type, {{1, categories}, {categories, categories}}});
auto maxes = mm->add_instruction(migraphx::make_op("reduce_max", {{"axes", {1}}}), input);
auto cdf = add_common_op(*mm, migraphx::make_op("sub"), {input, maxes});
cdf = mm->add_instruction(migraphx::make_op("exp"), cdf);
cdf = mm->add_instruction(
migraphx::make_op("prefix_scan_sum", {{"axis", 1}, {"exclusive", false}}), cdf);
migraphx::shape s{migraphx::shape::float_type, {1}};
std::vector<float> seed_data = {seed};
auto seed_input = mm->add_literal(migraphx::literal(s, seed_data));
// dynamic input only: must calculate alloc_shape as (batch_size, sample_size)
// read the runtime input dimensions
auto dim_of = mm->add_instruction(migraphx::make_op("dimensions_of", {{"end", 2}}), input);
// make an argument of (1, 0)
migraphx::shape lit_shape(migraphx::shape::int64_type, {2});
std::vector<int64_t> data1{1, 0};
auto l1 = mm->add_literal(lit_shape, data1);
auto batch_arg = mm->add_instruction(migraphx::make_op("mul"), dim_of, l1);
std::vector<int64_t> data2(2, 0);
// make an argument of (0, sample_size)
data2[1] = sample_size;
auto l2 = mm->add_literal(lit_shape, data2);
auto alloc_shape = mm->add_instruction(migraphx::make_op("add"), batch_arg, l2);
migraphx::shape compile_shape =
migraphx::shape(migraphx::shape::float_type,
{input->get_shape().dyn_dims().front(), {sample_size, sample_size}});
auto alloc = mm->add_instruction(
migraphx::make_op("allocate", {{"shape", to_value(compile_shape)}}), alloc_shape);
auto randoms = mm->add_instruction(migraphx::make_op("random_uniform"), seed_input, alloc);
auto ret = mm->add_instruction(
migraphx::make_op("multinomial", {{"dtype", migraphx::shape::float_type}}), cdf, randoms);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, categories};
options.print_program_on_error = true;
auto prog = migraphx::parse_onnx("multinomial_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(multinomial_autoseed_dyn_test)
{
// runtime random seed
migraphx::program p;
auto* mm = p.get_main_module();
size_t sample_size = 12;
size_t categories = 10;
auto input = mm->add_parameter(
"input", migraphx::shape{migraphx::shape::float_type, {{1, 10}, {10, 10}}});
auto maxes = mm->add_instruction(migraphx::make_op("reduce_max", {{"axes", {1}}}), input);
auto cdf = add_common_op(*mm, migraphx::make_op("sub"), {input, maxes});
cdf = mm->add_instruction(migraphx::make_op("exp"), cdf);
cdf = mm->add_instruction(
migraphx::make_op("prefix_scan_sum", {{"axis", 1}, {"exclusive", false}}), cdf);
auto seed_input = mm->add_instruction(migraphx::make_op("random_seed"));
// dynamic input only: must calculate alloc_shape as (batch_size, sample_size)
// read the runtime input dimensions
auto dim_of = mm->add_instruction(migraphx::make_op("dimensions_of", {{"end", 2}}), input);
// make an argument of (1, 0)
migraphx::shape lit_shape(migraphx::shape::int64_type, {2});
std::vector<int64_t> data1{1, 0};
auto l1 = mm->add_literal(lit_shape, data1);
auto batch_arg = mm->add_instruction(migraphx::make_op("mul"), dim_of, l1);
std::vector<int64_t> data2(2, 0);
// make an argument of (0, sample_size)
data2[1] = sample_size;
auto l2 = mm->add_literal(lit_shape, data2);
auto alloc_shape = mm->add_instruction(migraphx::make_op("add"), batch_arg, l2);
migraphx::shape compile_shape =
migraphx::shape(migraphx::shape::float_type,
{input->get_shape().dyn_dims().front(), {sample_size, sample_size}});
auto alloc = mm->add_instruction(
migraphx::make_op("allocate", {{"shape", to_value(compile_shape)}}), alloc_shape);
auto randoms = mm->add_instruction(migraphx::make_op("random_uniform"), seed_input, alloc);
auto ret = mm->add_instruction(migraphx::make_op("multinomial"), cdf, randoms);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, categories};
options.print_program_on_error = true;
auto prog = migraphx::parse_onnx("multinomial_autoseed_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(multinomial_dtype_error_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("multinomial_dtype_error_test.onnx"); }));
......@@ -4712,10 +4896,11 @@ TEST_CASE(multinomial_dtype_error_test)
TEST_CASE(multinomial_generated_seed_test)
{
// multinomial op. no longer generates its own randoms
auto p1 = optimize_onnx("multinomial_generated_seed_test.onnx");
auto p2 = optimize_onnx("multinomial_generated_seed_test.onnx");
EXPECT(p1 != p2);
EXPECT(p1 == p2);
}
TEST_CASE(multinomial_int64_test)
......@@ -4723,27 +4908,27 @@ TEST_CASE(multinomial_int64_test)
migraphx::program p;
auto* mm = p.get_main_module();
size_t sample_size = 10;
float seed = 1.0f;
float seed = 1.0;
uint32_t batch_size = 1;
migraphx::shape::type_t dtype = migraphx::shape::type_t::int64_type;
auto input = mm->add_parameter("input", migraphx::shape{migraphx::shape::float_type, {1, 10}});
auto maxes = mm->add_instruction(migraphx::make_op("reduce_max", {{"axes", {1}}}), input);
auto mb_maxes =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {1, 10}}}), maxes);
auto cdf = mm->add_instruction(migraphx::make_op("sub"), input, mb_maxes);
auto cdf = add_common_op(*mm, migraphx::make_op("sub"), {input, maxes});
cdf = mm->add_instruction(migraphx::make_op("exp"), cdf);
cdf = mm->add_instruction(
migraphx::make_op("prefix_scan_sum", {{"axis", 1}, {"exclusive", false}}), cdf);
std::mt19937 gen(seed);
std::uniform_real_distribution<> dis(0.0, 1.0);
std::vector<float> rand_samples(sample_size);
std::generate(rand_samples.begin(), rand_samples.end(), [&]() { return dis(gen); });
migraphx::shape rs{migraphx::shape::float_type, {1, sample_size}};
auto rs_lit = mm->add_literal(migraphx::literal{rs, rand_samples});
mm->add_instruction(migraphx::make_op("multinomial", {{"dtype", dtype}}), cdf, rs_lit);
migraphx::shape s{migraphx::shape::float_type, {1}};
std::vector<float> data = {seed};
auto seed_input = mm->add_literal(migraphx::literal(s, data));
// static size
auto rand_dummy =
mm->add_literal(migraphx::literal{migraphx::shape::float_type, {batch_size * sample_size}});
auto randoms = mm->add_instruction(migraphx::make_op("random_uniform"), seed_input, rand_dummy);
mm->add_instruction(migraphx::make_op("multinomial", {{"dtype", dtype}}), cdf, randoms);
auto prog = optimize_onnx("multinomial_int64_test.onnx");
EXPECT(p == prog);
......@@ -5603,9 +5788,9 @@ TEST_CASE(quantizelinear_test)
auto l1_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {5}}}), l1);
auto div = mm->add_instruction(migraphx::make_op("div"), l0, l1_mbcast);
auto round = mm->add_instruction(migraphx::make_op("round"), div);
auto s = round->get_shape();
auto clip = insert_quantizelinear_clip(*mm, div, round, s, 0, 255);
auto nearbyint = mm->add_instruction(migraphx::make_op("nearbyint"), div);
auto s = nearbyint->get_shape();
auto clip = insert_quantizelinear_clip(*mm, div, nearbyint, s, 0, 255);
mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::uint8_type)}}),
......@@ -5628,9 +5813,9 @@ TEST_CASE(quantizelinear_int32_test)
{{"target_type", migraphx::to_value(migraphx::shape::float_type)}}),
l0);
auto div = mm->add_instruction(migraphx::make_op("div"), l0, l1_mbcast);
auto round = mm->add_instruction(migraphx::make_op("round"), div);
auto s = round->get_shape();
auto clip = insert_quantizelinear_clip(*mm, div, round, s, 0, 255);
auto nearbyint = mm->add_instruction(migraphx::make_op("nearbyint"), div);
auto s = nearbyint->get_shape();
auto clip = insert_quantizelinear_clip(*mm, div, nearbyint, s, 0, 255);
mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::uint8_type)}}),
......@@ -5650,7 +5835,7 @@ TEST_CASE(quantizelinear_zero_point_test)
auto l1_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {5}}}), l1);
auto div = mm->add_instruction(migraphx::make_op("div"), l0, l1_mbcast);
auto round = mm->add_instruction(migraphx::make_op("round"), div);
auto round = mm->add_instruction(migraphx::make_op("nearbyint"), div);
auto l2_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {5}}}), l2);
l2_mbcast = mm->add_instruction(
......@@ -5683,7 +5868,7 @@ migraphx::program make_quantizelinear_axis_prog()
migraphx::make_op("broadcast", {{"axis", axis}, {"out_lens", input_lens}}), l1);
auto div = mm->add_instruction(migraphx::make_op("div"), l0, l1_bcast);
auto round = mm->add_instruction(migraphx::make_op("round"), div);
auto round = mm->add_instruction(migraphx::make_op("nearbyint"), div);
auto l2_bcast = mm->add_instruction(
migraphx::make_op("broadcast", {{"axis", axis}, {"out_lens", input_lens}}), l2);
l2_bcast = mm->add_instruction(
......@@ -6444,9 +6629,8 @@ TEST_CASE(resize_nonstd_input_test)
auto tx =
mm->add_instruction(migraphx::make_op("transpose", {{"permutation", {0, 1, 3, 2}}}), inx);
mm->add_instruction(migraphx::make_op("undefined"));
auto tx_cont = mm->add_instruction(migraphx::make_op("contiguous"), tx);
auto lrsp = mm->add_instruction(migraphx::make_op("reshape", {{"dims", {8}}}), tx_cont);
auto lrsp = mm->add_instruction(migraphx::make_op("reshape", {{"dims", {8}}}), tx);
auto r = mm->add_instruction(migraphx::make_op("gather", {{"axis", 0}}), lrsp, li);
mm->add_return({r});
......@@ -6885,7 +7069,7 @@ TEST_CASE(round_test)
migraphx::program p;
auto* mm = p.get_main_module();
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::double_type, {10, 5}});
mm->add_instruction(migraphx::make_op("round"), input);
mm->add_instruction(migraphx::make_op("nearbyint"), input);
auto prog = optimize_onnx("round_test.onnx");
EXPECT(p == prog);
......@@ -7541,6 +7725,25 @@ TEST_CASE(slice_var_input_dyn1)
EXPECT(p == prog);
}
TEST_CASE(slice_var_input_default_steps)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto data =
mm->add_parameter("data", migraphx::shape{migraphx::shape::float_type, {{3, 8}, {2, 2}}});
auto starts = mm->add_parameter("starts", migraphx::shape{migraphx::shape::int64_type, {2}});
auto ends = mm->add_parameter("ends", migraphx::shape{migraphx::shape::int64_type, {2}});
auto axes = mm->add_parameter("axes", migraphx::shape{migraphx::shape::int64_type, {2}});
mm->add_literal({{migraphx::shape::int64_type, {2}}, {1, 1}});
auto ret = mm->add_instruction(migraphx::make_op("slice"), data, starts, ends, axes);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {3, 8};
auto prog = parse_onnx("slice_var_input_default_steps.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(slice_var_input_steps_error)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("slice_var_input_steps_error.onnx"); }));
......@@ -7743,6 +7946,46 @@ TEST_CASE(split_test_default)
EXPECT(p == prog);
}
TEST_CASE(split_test_uneven)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {12, 15}});
auto r1 = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {0}}, {"ends", {3}}}), input);
auto r2 = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {3}}, {"ends", {6}}}), input);
auto r3 = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {6}}, {"ends", {9}}}), input);
auto r4 = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {9}}, {"ends", {12}}}), input);
auto r5 = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {12}}, {"ends", {12}}}), input);
mm->add_return({r1, r2, r3, r4, r5});
auto prog = migraphx::parse_onnx("split_test_uneven.onnx");
EXPECT(p == prog);
}
TEST_CASE(split_test_uneven_num_outputs)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {11, 15}});
auto r1 = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {0}}, {"ends", {3}}}), input);
auto r2 = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {3}}, {"ends", {6}}}), input);
auto r3 = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {6}}, {"ends", {9}}}), input);
auto r4 = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {9}}, {"ends", {11}}}), input);
mm->add_return({r1, r2, r3, r4});
auto prog = migraphx::parse_onnx("split_test_uneven_num_outputs.onnx");
EXPECT(p == prog);
}
TEST_CASE(split_test_no_attribute_invalid_split)
{
EXPECT(
......@@ -7760,6 +8003,11 @@ TEST_CASE(split_test_no_attribute_invalid_input_split)
[&] { migraphx::parse_onnx("split_test_no_attribute_invalid_input_split.onnx"); }));
}
TEST_CASE(split_test_invalid_num_outputs)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("split_test_invalid_num_outputs.onnx"); }));
}
TEST_CASE(sqrt_test)
{
migraphx::program p;
......@@ -8260,6 +8508,27 @@ TEST_CASE(upsample_test)
EXPECT(p == prog);
}
TEST_CASE(upsample_ver7_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape sx{migraphx::shape::float_type, {1, 1, 2, 2}};
auto ix = mm->add_parameter("X", sx);
migraphx::shape si{migraphx::shape::int32_type, {1, 1, 4, 6}};
std::vector<int> ind = {0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 2, 2, 2, 3, 3, 3};
auto li = mm->add_literal(migraphx::literal(si, ind));
auto rsp = mm->add_instruction(migraphx::make_op("reshape", {{"dims", {4}}}), ix);
auto r = mm->add_instruction(migraphx::make_op("gather", {{"axis", 0}}), rsp, li);
mm->add_return({r});
auto prog = migraphx::parse_onnx("upsample_ver7_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(unknown_test_throw_print_error)
{
migraphx::onnx_options options;
......
reshape_variable_input_test0:q

0
12"Reshapereshape_variable_input_test0Z
0



Z
1

b
2


B
\ No newline at end of file
 round_half_test:J
xy"Roundround_half_testZ
x



b
y



B
\ No newline at end of file
 split_test_invalid_num_outputs:
.
xy1y2y3y4"Split*
num_outputssplit_test_invalid_num_outputsZ
x


b
y1


b
y2


b
y3


b
y4


B
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment