Commit de479d9a authored by Brian Pickrell's avatar Brian Pickrell
Browse files

work in progress; fixing some onnx tests and one merge bug

parent 264a7647
......@@ -46,7 +46,7 @@ namespace op {
struct rand_uniform
{
uint32_t sample_size = {20};
uint32_t sample_size = {23};
uint32_t seed = {0};
shape::type_t dtype = shape::type_t::float_type;
......@@ -62,20 +62,29 @@ struct rand_uniform
std::string name() const { return "rand_uniform"; }
shape normalize_compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs, *this, true}.has(1);
auto s = inputs.front();
if(s.dynamic())
if(inputs.size() > 0)
{
return s;
}
else if(s.broadcasted())
{
return {s.type(), s.lens()};
}
else
{
return s.with_lens(s.lens());
check_shapes{inputs, *this, true}.has(1);
auto s = inputs.front();
if(s.dynamic())
{
// return s;
return {dtype, {s.dyn_dims()[0], {sample_size, sample_size}}};
}
else if(s.broadcasted())
{
return {s.type(), s.lens()};
}
else
{
// For static input, return the input shape. Assume the batch_size and sample_size
// have already been factored in. This saves us from reallocating a shape at
// runtime when the input is a literal.
return s.with_lens(s.lens());
}
}
// No input instruction is required. 1-dimensional static output.
return shape{dtype, {sample_size}};
}
argument compute(const dyn_output& dyn_out, std::vector<argument> args) const
......@@ -87,15 +96,15 @@ struct rand_uniform
std::uniform_real_distribution<> dis(0.0, 1.0);
size_t elts(dyn_out.computed_shape.elements());
// Use of our visitor and par_for replaces a call like
// std::vector<float> rand_samples(sample_size);
// std::generate(rand_samples.begin(), rand_samples.end(), [&]() { return dis(gen); });
std::vector<float> rand_samples(sample_size);
std::generate(rand_samples.begin(), rand_samples.end(), [&]() { return dis(gen); });
result.visit([&](auto output) {
par_for(elts, [&](auto i) {
output[i] = dis(gen);
// output[i] = rand_samples[i];
});
});
// result.visit([&](auto output) {
// par_for(elts, [&](auto i) {
// output[i] = dis(gen);
// // output[i] = rand_samples[i];
// });
// });
return result;
}
};
......
......@@ -46,6 +46,10 @@ struct parse_multinomial : op_parser<parse_multinomial>
dtype = info.attributes.at("dtype").i();
shape::type_t output_type = get_type(dtype);
size_t batch_size = 1;
if(contains(info.attributes, "batch_size"))
batch_size = info.attributes.at("batch_size").i();
size_t sample_size = 1;
if(contains(info.attributes, "sample_size"))
sample_size = info.attributes.at("sample_size").i();
......@@ -55,62 +59,51 @@ struct parse_multinomial : op_parser<parse_multinomial>
// Subtract the per-batch maximum log-probability, making the per-batch max 0
auto maxes =
info.add_instruction(migraphx::make_op("reduce_max", {{"axes", {1}}}), args[0]);
auto cdf = info.add_common_op("sub", args[0], maxes);
auto cdf = info.add_common_op("sub", args[0], maxes);
// Take the element-wise exponent to get probabilities in the range (0, 1]
cdf = info.add_instruction(migraphx::make_op("exp"), cdf);
// Compute the cumulative density function
cdf = info.add_instruction(
migraphx::make_op("prefix_scan_sum", {{"axis", 1}, {"exclusive", false}}), cdf);
uint32_t seed(0);
if(contains(info.attributes, "seed"))
seed = info.attributes.at("seed").i();
instruction_ref randoms;
// Make a shape that's the size of the sample set
shape s0 = args[0]->get_shape();
migraphx::shape dist_shape;
instruction_ref rand_dummy;
if(s0.dynamic())
if(args.size() > 0)
{
dist_shape = {output_type, {s0.dyn_dims().front(), shape::dynamic_dimension({sample_size, sample_size})}};
auto temp = info.add_instruction(make_op("dimensions_of", {{"start", 0}, {"end", s0.ndim() - 1}}), args[0]);
auto asdf = temp->get_shape();
rand_dummy = info.add_instruction(migraphx::make_op("multibroadcast",
{{"out_dyn_dims", migraphx::to_value(dist_shape)}}), args[0], temp);
auto zap = rand_dummy->get_shape();
printf("hello %d\n", zap.ndim());
shape s0 = args[0]->get_shape();
// TODO: Use literal if batch size is fixed
if(s0.dynamic())
{
// Dynamic batch_size will be taken from args[0]. Other contents of input are
// ignored here.
randoms = info.add_instruction(
migraphx::make_op("rand_uniform",
{{"seed", seed}, {"sample_size", sample_size}}),
args[0]);
}
else
{
// use literal. It may be quite large.
batch_size = s0.lens().front();
auto rand_dummy = info.add_literal(
migraphx::literal{migraphx::shape::float_type, {batch_size * sample_size}});
randoms = info.add_instruction(migraphx::make_op("rand_uniform", {{"seed", seed}}),
rand_dummy);
}
}
else
{
// use literal
size_t batch_size = s0.lens().front();
dist_shape = {output_type, {batch_size, sample_size}};
rand_dummy = info.add_literal(migraphx::literal{dist_shape, {batch_size, sample_size}});
// mul_random = info.add_instruction(migraphx::make_op("multibroadcast",
// {{"out_lens", migraphx::to_value(dist_shape)}}), args[0]);
// migraphx::shape dist_shape{migraphx::shape::float_type, {batch_size, sample_size}};
// use literal. It may be quite large.
auto rand_dummy = info.add_literal(
migraphx::literal{migraphx::shape::float_type, {batch_size * sample_size}});
randoms = info.add_instruction(migraphx::make_op("rand_uniform", {{"seed", seed}}),
rand_dummy);
}
// auto mul_random = info.add_instruction(migraphx::make_op("multibroadcast"
// ,{{"out_dyn_dims", migraphx::to_value(b)}}
// ), s0, dist_shape);
uint32_t seed(0);
if(contains(info.attributes, "seed"))
seed = info.attributes.at("seed").i();
// how to populate data when dist_shape is dynamic? Answer: just send dist_shape`
// std::vector<float> data(dist_shape.elements(), 0.f);
// auto dummy = info.add_literal(migraphx::literal(dist_shape, data));
auto randoms = info.add_instruction(migraphx::make_op("rand_uniform", {{"seed", seed}}), rand_dummy);
return info.add_instruction(
migraphx::make_op("multinomial", {{"dtype", output_type}}), cdf, randoms);
}
......
......@@ -4414,9 +4414,9 @@ def mod_test_fmod_different_dtypes():
@onnx_test()
def multinomial_test():
sample_size = 10
sample_size = 13
seed = 0.0
input = helper.make_tensor_value_info("input", TensorProto.FLOAT, [1, 10])
input = helper.make_tensor_value_info("input", TensorProto.FLOAT, [3, 10])
output = helper.make_tensor_value_info("output", TensorProto.INT32,
[1, 10])
......@@ -4431,7 +4431,7 @@ def multinomial_test():
@onnx_test()
def multinomial_dyn_test():
sample_size = 10
sample_size = 13
seed = 0.0
input = helper.make_tensor_value_info("input", TensorProto.FLOAT, [None, 10])
output = helper.make_tensor_value_info("output", TensorProto.INT32,
......
......@@ -4161,26 +4161,25 @@ TEST_CASE(multinomial_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
size_t sample_size = 10;
size_t sample_size = 13;
size_t batch_size = 3;
float seed = 0.0f;
auto input = mm->add_parameter("input", migraphx::shape{migraphx::shape::float_type, {1, 10}});
auto maxes = mm->add_instruction(migraphx::make_op("reduce_max", {{"axes", {1}}}), input);
auto mb_maxes =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {1, 10}}}), maxes);
auto input =
mm->add_parameter("input", migraphx::shape{migraphx::shape::float_type, {batch_size, 10}});
auto maxes = mm->add_instruction(migraphx::make_op("reduce_max", {{"axes", {1}}}), input);
auto mb_maxes = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {batch_size, 10}}}), maxes);
auto cdf = mm->add_instruction(migraphx::make_op("sub"), input, mb_maxes);
cdf = mm->add_instruction(migraphx::make_op("exp"), cdf);
cdf = mm->add_instruction(
migraphx::make_op("prefix_scan_sum", {{"axis", 1}, {"exclusive", false}}), cdf);
std::mt19937 gen(seed);
std::uniform_real_distribution<> dis(0.0, 1.0);
std::vector<float> rand_samples(sample_size);
std::generate(rand_samples.begin(), rand_samples.end(), [&]() { return dis(gen); });
migraphx::shape rs{migraphx::shape::float_type, {1, sample_size}};
auto rs_lit = mm->add_literal(migraphx::literal{rs, rand_samples});
mm->add_instruction(migraphx::make_op("multinomial"), cdf, rs_lit);
auto rand_dummy =
mm->add_literal(migraphx::literal{migraphx::shape::float_type, {batch_size * sample_size}});
auto randoms =
mm->add_instruction(migraphx::make_op("rand_uniform", {{"seed", seed}}), rand_dummy);
mm->add_instruction(migraphx::make_op("multinomial"), cdf, randoms);
auto prog = optimize_onnx("multinomial_test.onnx");
......@@ -4191,44 +4190,40 @@ TEST_CASE(multinomial_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
size_t sample_size = 10;
size_t sample_size = 13;
float seed = 0.0f;
auto input = mm->add_parameter("input", migraphx::shape{migraphx::shape::float_type, {{1, 10}, {10, 10}}});
auto input = mm->add_parameter(
"input", migraphx::shape{migraphx::shape::float_type, {{1, 10}, {10, 10}}});
auto maxes = mm->add_instruction(migraphx::make_op("reduce_max", {{"axes", {1}}}), input);
// auto mb_maxes =
// mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {1, 10}}}), maxes);
// auto cdf = mm->add_instruction(migraphx::make_op("sub"), input, mb_maxes);
auto cdf = add_common_op(*mm, migraphx::make_op("sub"), {input, maxes});
auto cdf = add_common_op(*mm, migraphx::make_op("sub"), {input, maxes});
cdf = mm->add_instruction(migraphx::make_op("exp"), cdf);
cdf = mm->add_instruction(
migraphx::make_op("prefix_scan_sum", {{"axis", 1}, {"exclusive", false}}), cdf);
// std::mt19937 gen(seed);
// std::uniform_real_distribution<> dis(0.0, 1.0);
// std::vector<float> rand_samples(sample_size);
// std::generate(rand_samples.begin(), rand_samples.end(), [&]() { return dis(gen); });
migraphx::shape rs{migraphx::shape::float_type, {1, sample_size}};
// auto rs_lit = mm->add_literal(migraphx::literal{rs, rand_samples});
std::vector<float> data(rs.elements(), 0.3f);
auto dummy = mm->add_literal(migraphx::literal(rs, data));
auto randoms = mm->add_instruction(migraphx::make_op("rand_uniform", {{"seed", seed}}), dummy);
auto randoms = mm->add_instruction(
migraphx::make_op("rand_uniform", {{"seed", seed}, {"sample_size", sample_size}}), input);
auto ret = mm->add_instruction(migraphx::make_op("multinomial"), cdf, randoms);
// mm->add_return({ret});
mm->add_return({ret});
// auto prog = optimize_onnx("multinomial_dyn_test.onnx");
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 10};
options.default_dyn_dim_value = {1, 10};
options.print_program_on_error = true;
auto prog = migraphx::parse_onnx("multinomial_dyn_test.onnx", options);
auto prog = migraphx::parse_onnx("multinomial_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(multinomial_dtype_error_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("multinomial_dtype_error_test.onnx"); }));
}
TEST_CASE(multinomial_generated_seed_test)
TEST_CASE(multinomial_generated_seed_test) // this should be for rand_uniform now
{
auto p1 = optimize_onnx("multinomial_generated_seed_test.onnx");
auto p2 = optimize_onnx("multinomial_generated_seed_test.onnx");
......
......@@ -5296,7 +5296,7 @@ TEST_CASE(multinomial_test)
return static_cast<double>(n) / res_dist_sum;
});
EXPECT(migraphx::verify_range(norm, res_norm, 100000));
EXPECT(migraphx::verify::verify_range(norm, res_norm, 100000));
}
TEST_CASE(multinomial_dyn_test)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment