Commit c27d3b62 authored by Brian Pickrell's avatar Brian Pickrell
Browse files

good intermediate version. rand_uniform has no output_lens, but passes...

good intermediate version.  rand_uniform has no output_lens, but passes ref_ops_tests and onnx tests.  parse_randomuniform_ops not updated.
parent a15551e5
...@@ -209,8 +209,6 @@ instruction_ref insert_common_op(module& m, ...@@ -209,8 +209,6 @@ instruction_ref insert_common_op(module& m,
const operation& op, const operation& op,
std::vector<instruction_ref> inputs) std::vector<instruction_ref> inputs)
{ {
if(op.name() == "clip")
return inputs[0];
return m.insert_instruction(ins, op, insert_common_args(m, ins, std::move(inputs))); return m.insert_instruction(ins, op, insert_common_args(m, ins, std::move(inputs)));
} }
......
...@@ -48,38 +48,43 @@ namespace op { ...@@ -48,38 +48,43 @@ namespace op {
struct rand_uniform struct rand_uniform
{ {
uint32_t sample_size = {20}; // uint32_t sample_size = {20};
uint32_t seed = {3}; uint32_t seed = {0};
float range_min = 0.0f; bool use_auto_seed = false;
float range_max = 1.0f; float range_min = 0.0f;
float range_max = 1.0f;
// From Onnx RandomUniform: // From Onnx RandomUniform:
// dtype : int (default is 1) // dtype : int (default is 1) The data type for the elements of the output tensor. currently
// The data type for the elements of the output tensor. If not specified, default is // float only. high : float (default is 1.0) Upper boundary of the output values. low : float
// TensorProto::FLOAT. high : float (default is 1.0) Upper boundary of the output values. low : // (default is 0.0) Lower boundary of the output values. seed : float (Optional) Seed to the
// float (default is 0.0) Lower boundary of the output values. seed : float (Optional) Seed to // random generator, if not specified we will auto generate one. shape : list of ints (required)
// the random generator, if not specified we will auto generate one. shape : list of ints // The shape of the output tensor.
// (required) The shape of the output tensor.
// In Onnx, the size of array to fill is given by
// TODO: consider removing this and simply using the type of the passed argument. // TODO: consider removing this and simply using the type of the passed argument.
// The only bar to doing this currently is that we can't create random integers within the // The only bar to doing this currently is that we can't create random integers within the
// current bounds of (0, 1). // current bounds of (0, 1).
shape::type_t dtype = shape::type_t::float_type; shape::type_t dtype = shape::type_t::float_type;
// std::vector<size_t> output_lens = {1}; std::vector<size_t> output_lens = {};
template <class Self, class F> template <class Self, class F>
static auto reflect(Self& self, F f) static auto reflect(Self& self, F f)
{ {
return pack( return pack(f(self.dtype, "dtype"),
f(self.dtype, "dtype"), f(self.sample_size, "sample_size"), f(self.seed, "seed")); f(self.output_lens, "output_lens"),
f(self.seed, "seed"),
f(self.use_auto_seed, "use_auto_seed"));
} }
value attributes() const { return {{"sample_size", sample_size}, {"seed", seed}}; } // value attributes() const { return {{"sample_size", sample_size}, {"seed", seed}}; }
std::string name() const { return "rand_uniform"; } std::string name() const { return "rand_uniform"; }
shape normalize_compute_shape(std::vector<shape> inputs) const shape compute_shape(std::vector<shape> inputs) const
{ {
check_shapes{inputs, *this, true}.has(1, 2); check_shapes{inputs, *this, true}.has(1, 2);
if(inputs.size() > 1 and inputs.at(1).element_space() > 0 and if(inputs.size() > 1 and inputs.at(1).element_space() > 0 and
inputs.at(1).type() != shape::type_t::uint32_type) inputs.at(1).type() != shape::type_t::uint32_type)
MIGRAPHX_THROW("RAND_UNIFORM: Input 2 (seed) must have type unsigned int"); MIGRAPHX_THROW("RAND_UNIFORM: Input 2 (seed) must have type unsigned int");
...@@ -100,14 +105,21 @@ struct rand_uniform ...@@ -100,14 +105,21 @@ struct rand_uniform
argument result{dyn_out.computed_shape}; argument result{dyn_out.computed_shape};
auto local_seed(seed); auto local_seed(seed);
if(args.size() > 1) if(use_auto_seed)
local_seed = std::chrono::system_clock::now().time_since_epoch().count();
else
{ {
if(args.at(1).get_shape().element_space() > 0) if(args.size() > 1)
{ {
visit_all(args[1])([&](auto data) { local_seed = data[0]; }); if(args.at(1).get_shape().element_space() > 0)
{
visit_all(args[1])([&](auto data) { local_seed = data[0]; });
}
else // This is a bit of an Easter Egg.
// If a seed argument was given but it has a 0-size shape at
// inference time, also obtain a seed from the system clock:
local_seed = std::chrono::system_clock::now().time_since_epoch().count();
} }
else // obtain a seed from the system clock:
local_seed = std::chrono::system_clock::now().time_since_epoch().count();
} }
// If a seed argument was not defined, use the value from the seed attribute, // If a seed argument was not defined, use the value from the seed attribute,
// or the default. // or the default.
......
...@@ -75,15 +75,15 @@ struct parse_multinomial : op_parser<parse_multinomial> ...@@ -75,15 +75,15 @@ struct parse_multinomial : op_parser<parse_multinomial>
{ {
shape s0 = args[0]->get_shape(); shape s0 = args[0]->get_shape();
// TODO: Use literal if batch size is fixed // TODO: Use literal if batch size is fixed
// TODO: Add second argument for seed (an Migraphx rule, not Onnx) if Onnx seed not given
// It will be a literal with a shape of 0 size
if(s0.dynamic()) if(s0.dynamic())
{ {
// Dynamic batch_size will be taken from args[0]. Other contents of input are // Dynamic batch_size will be taken from args[0]. Other contents of input are
// ignored here. // ignored here.
randoms = info.add_instruction( randoms = info.add_instruction(
migraphx::make_op("rand_uniform", migraphx::make_op("rand_uniform",
{{"seed", seed}, {"sample_size", sample_size}}), {{"seed", seed},
// {"sample_size", sample_size},
{"use_auto_seed", not contains(info.attributes, "seed")}}),
args[0]); args[0]);
} }
else else
...@@ -93,8 +93,11 @@ struct parse_multinomial : op_parser<parse_multinomial> ...@@ -93,8 +93,11 @@ struct parse_multinomial : op_parser<parse_multinomial>
auto rand_dummy = info.add_literal( auto rand_dummy = info.add_literal(
migraphx::literal{migraphx::shape::float_type, {batch_size * sample_size}}); migraphx::literal{migraphx::shape::float_type, {batch_size * sample_size}});
randoms = info.add_instruction(migraphx::make_op("rand_uniform", {{"seed", seed}}), randoms = info.add_instruction(
rand_dummy); migraphx::make_op(
"rand_uniform",
{{"seed", seed}, {"use_auto_seed", not contains(info.attributes, "seed")}}),
rand_dummy);
} }
} }
else else
......
...@@ -4446,6 +4446,22 @@ def multinomial_dyn_test(): ...@@ -4446,6 +4446,22 @@ def multinomial_dyn_test():
return ([node], [input], [output]) return ([node], [input], [output])
@onnx_test()
def multinomial_autoseed_dyn_test():
# If seed attribute is not given, device should auto generate one at runtime
sample_size = 12
input = helper.make_tensor_value_info("input", TensorProto.FLOAT, [None, 10])
output = helper.make_tensor_value_info("output", TensorProto.INT32,
[None, 10])
node = onnx.helper.make_node('Multinomial',
inputs=['input'],
sample_size=sample_size,
outputs=['output'])
return ([node], [input], [output])
@onnx_test() @onnx_test()
def multinomial_generated_seed_test(): def multinomial_generated_seed_test():
sample_size = 10 sample_size = 10
......
...@@ -4205,9 +4205,11 @@ TEST_CASE(multinomial_dyn_test) ...@@ -4205,9 +4205,11 @@ TEST_CASE(multinomial_dyn_test)
migraphx::shape rs{migraphx::shape::float_type, {1, sample_size}}; migraphx::shape rs{migraphx::shape::float_type, {1, sample_size}};
std::vector<float> data(rs.elements(), 0.3f); std::vector<float> data(rs.elements(), 0.3f);
auto randoms = mm->add_instruction( auto randoms = mm->add_instruction(migraphx::make_op("rand_uniform",
migraphx::make_op("rand_uniform", {{"seed", seed}, {"sample_size", sample_size}}), input); {// {"sample_size", sample_size},
auto ret = mm->add_instruction(migraphx::make_op("multinomial"), cdf, randoms); {"seed", seed}}),
input);
auto ret = mm->add_instruction(migraphx::make_op("multinomial"), cdf, randoms);
mm->add_return({ret}); mm->add_return({ret});
// auto prog = optimize_onnx("multinomial_dyn_test.onnx"); // auto prog = optimize_onnx("multinomial_dyn_test.onnx");
...@@ -4218,17 +4220,52 @@ TEST_CASE(multinomial_dyn_test) ...@@ -4218,17 +4220,52 @@ TEST_CASE(multinomial_dyn_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(multinomial_autoseed_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
size_t sample_size = 13;
auto input = mm->add_parameter(
"input", migraphx::shape{migraphx::shape::float_type, {{1, 10}, {10, 10}}});
auto maxes = mm->add_instruction(migraphx::make_op("reduce_max", {{"axes", {1}}}), input);
auto cdf = add_common_op(*mm, migraphx::make_op("sub"), {input, maxes});
cdf = mm->add_instruction(migraphx::make_op("exp"), cdf);
cdf = mm->add_instruction(
migraphx::make_op("prefix_scan_sum", {{"axis", 1}, {"exclusive", false}}), cdf);
migraphx::shape rs{migraphx::shape::float_type, {1, sample_size}};
std::vector<float> data(rs.elements(), 0.3f);
auto randoms = mm->add_instruction(migraphx::make_op("rand_uniform",
{// {"sample_size", sample_size},
// {"seed", seed},
{"use_auto_seed", true}}),
input);
auto ret = mm->add_instruction(migraphx::make_op("multinomial"), cdf, randoms);
mm->add_return({ret});
// auto prog = optimize_onnx("multinomial_dyn_test.onnx");
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 10};
options.print_program_on_error = true;
auto prog = migraphx::parse_onnx("multinomial_autoseed_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(multinomial_dtype_error_test) TEST_CASE(multinomial_dtype_error_test)
{ {
EXPECT(test::throws([&] { migraphx::parse_onnx("multinomial_dtype_error_test.onnx"); })); EXPECT(test::throws([&] { migraphx::parse_onnx("multinomial_dtype_error_test.onnx"); }));
} }
TEST_CASE(multinomial_generated_seed_test) // this should be for rand_uniform now TEST_CASE(multinomial_generated_seed_test)
{ {
// multinomial op. no longer generates its own randoms
auto p1 = optimize_onnx("multinomial_generated_seed_test.onnx"); auto p1 = optimize_onnx("multinomial_generated_seed_test.onnx");
auto p2 = optimize_onnx("multinomial_generated_seed_test.onnx"); auto p2 = optimize_onnx("multinomial_generated_seed_test.onnx");
EXPECT(p1 != p2); EXPECT(p1 == p2);
} }
TEST_CASE(multinomial_int64_test) TEST_CASE(multinomial_int64_test)
...@@ -4236,7 +4273,7 @@ TEST_CASE(multinomial_int64_test) ...@@ -4236,7 +4273,7 @@ TEST_CASE(multinomial_int64_test)
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
size_t sample_size = 10; size_t sample_size = 10;
float seed = 1.0f; uint32_t seed = 0;
migraphx::shape::type_t dtype = migraphx::shape::type_t::int64_type; migraphx::shape::type_t dtype = migraphx::shape::type_t::int64_type;
auto input = mm->add_parameter("input", migraphx::shape{migraphx::shape::float_type, {1, 10}}); auto input = mm->add_parameter("input", migraphx::shape{migraphx::shape::float_type, {1, 10}});
...@@ -4248,14 +4285,15 @@ TEST_CASE(multinomial_int64_test) ...@@ -4248,14 +4285,15 @@ TEST_CASE(multinomial_int64_test)
cdf = mm->add_instruction( cdf = mm->add_instruction(
migraphx::make_op("prefix_scan_sum", {{"axis", 1}, {"exclusive", false}}), cdf); migraphx::make_op("prefix_scan_sum", {{"axis", 1}, {"exclusive", false}}), cdf);
std::mt19937 gen(seed);
std::uniform_real_distribution<> dis(0.0, 1.0);
std::vector<float> rand_samples(sample_size);
std::generate(rand_samples.begin(), rand_samples.end(), [&]() { return dis(gen); });
migraphx::shape rs{migraphx::shape::float_type, {1, sample_size}}; migraphx::shape rs{migraphx::shape::float_type, {1, sample_size}};
auto rs_lit = mm->add_literal(migraphx::literal{rs, rand_samples}); auto rand_dummy =
mm->add_literal(migraphx::literal{migraphx::shape::float_type, {sample_size}});
// this is rand_uniform without output_lens attribute
auto randoms = mm->add_instruction(
migraphx::make_op("rand_uniform", {{"seed", seed}, {"use_auto_seed", false}}), rand_dummy);
mm->add_instruction(migraphx::make_op("multinomial", {{"dtype", dtype}}), cdf, rs_lit); mm->add_instruction(migraphx::make_op("multinomial", {{"dtype", dtype}}), cdf, randoms);
auto prog = optimize_onnx("multinomial_int64_test.onnx"); auto prog = optimize_onnx("multinomial_int64_test.onnx");
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment