Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
934e21c2
Commit
934e21c2
authored
Jul 20, 2023
by
Brian Pickrell
Browse files
first working version of random operation
parent
1fda1753
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
143 additions
and
12 deletions
+143
-12
src/CMakeLists.txt
src/CMakeLists.txt
+1
-0
src/include/migraphx/op/rand_uniform.hpp
src/include/migraphx/op/rand_uniform.hpp
+112
-0
test/onnx/multinomial_dyn_test.onnx
test/onnx/multinomial_dyn_test.onnx
+0
-0
test/ref_ops_test.cpp
test/ref_ops_test.cpp
+30
-12
No files found.
src/CMakeLists.txt
View file @
934e21c2
...
...
@@ -179,6 +179,7 @@ register_migraphx_ops(
quant_convolution
quant_dot
quantizelinear
rand_uniform
recip
reduce_max
reduce_mean
...
...
src/include/migraphx/op/rand_uniform.hpp
0 → 100644
View file @
934e21c2
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* Random Uniform distribution operator. Given a shape, populate it with random values.
*
* Inputs: any tensor shape.
* Attributes: TBD
*
Output: Same shape.
*
*/
#ifndef MIGRAPHX_GUARD_OPERATORS_MULTINOMIAL_HPP
#define MIGRAPHX_GUARD_OPERATORS_MULTINOMIAL_HPP
#include <migraphx/check_shapes.hpp>
// #include <migraphx/argument.hpp>
#include <migraphx/par_for.hpp>
// #include <migraphx/reflect.hpp>
#include <random>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
op
{
struct
rand_uniform
{
uint32_t
sample_size
=
{
20
};
uint32_t
seed
=
{
0
};
shape
::
type_t
dtype
=
shape
::
type_t
::
float_type
;
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
pack
(
f
(
self
.
dtype
,
"dtype"
),
f
(
self
.
sample_size
,
"sample_size"
),
f
(
self
.
seed
,
"seed"
));
}
value
attributes
()
const
{
return
{{
"sample_size"
,
sample_size
},
{
"seed"
,
seed
}};
}
std
::
string
name
()
const
{
return
"rand_uniform"
;
}
shape
normalize_compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
{
check_shapes
{
inputs
,
*
this
,
true
}.
has
(
1
).
only_dims
(
2
);
if
(
inputs
.
front
().
dynamic
())
{
auto
batch
=
inputs
.
front
().
dyn_dims
().
front
();
return
{
dtype
,
{
batch
,
{
sample_size
,
sample_size
}}};
}
else
{
auto
batch
=
inputs
.
front
().
lens
().
front
();
return
{
dtype
,
{
batch
,
sample_size
}};
}
}
argument
compute
(
const
dyn_output
&
dyn_out
,
std
::
vector
<
argument
>
args
)
const
{
auto
asdf
=
dyn_out
.
computed_shape
;
argument
result
{
dyn_out
.
computed_shape
};
// size_t batch
std
::
mt19937
gen
(
seed
);
std
::
uniform_real_distribution
<>
dis
(
0.0
,
1.0
);
// Use of our visitor and par_for replaces a call like
// std::vector<float> rand_samples(sample_size);
// std::generate(rand_samples.begin(), rand_samples.end(), [&]() { return dis(gen); });
result
.
visit
([
&
](
auto
output
)
{
par_for
(
sample_size
,
[
&
](
auto
i
)
{
output
[
i
]
=
dis
(
gen
);
// output[i] = rand_samples[i];
});
});
return
result
;
}
};
}
// namespace op
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
#endif
test/onnx/multinomial_dyn_test.onnx
0 → 100644
View file @
934e21c2
File added
test/ref_ops_test.cpp
View file @
934e21c2
...
...
@@ -34,6 +34,7 @@
#include <migraphx/verify.hpp>
#include <migraphx/onnx.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/common.hpp>
#include <migraphx/serialize.hpp>
...
...
@@ -4959,30 +4960,47 @@ TEST_CASE(multinomial_dyn_test)
size_t sample_size = 100000;
float seed = 0.0f;
std::mt19937 gen(seed);
std::uniform_real_distribution<> dis(0.0, 1.0);
std::vector<float> rand_samples(sample_size);
std::generate(rand_samples.begin(), rand_samples.end(), [&]() { return dis(gen); });
migraphx::shape rs{migraphx::shape::float_type, {1, sample_size}};
auto rs_lit = mm->add_literal(migraphx::literal{rs, rand_samples});
// Randomization steps will now be performed by a runtime operation
// std::mt19937 gen(seed);
// std::uniform_real_distribution<> dis(0.0, 1.0);
// std::vector<float> rand_samples(sample_size);
// std::generate(rand_samples.begin(), rand_samples.end(), [&]() { return dis(gen); });
// The only thing we take from the input shape is the batch size
migraphx::shape rs{migraphx::shape::float_type, {{1, 2}, {123, sample_size + 1}}};
auto input = mm->add_parameter("Input", rs);
auto randoms = mm->add_instruction(migraphx::make_op("rand_uniform", {{"sample_size", sample_size}}), input);
// the probability distribution, which also defines the number of categories
migraphx::shape s{migraphx::shape::float_type, {{1, 2}, {5, 6}}};
std::vector<int> dist{15, 25, 15, 25, 20};
std::vector<float> data(5);
std::transform(dist.begin(), dist.end(), data.begin(), [&](auto d) { return std::log(d); });
auto input = mm->add_literal(migraphx::literal(s, data));
// auto input = mm->add_literal(migraphx::literal(s, data));
auto input2 = mm->add_parameter("Input_2", s);
auto maxes = mm->add_instruction(migraphx::make_op("reduce_max", {{"axes", {1}}}), input);
auto maxes = mm->add_instruction(migraphx::make_op("reduce_max", {{"axes", {1}}}), input
2
);
auto mb_maxes =
mm->add_instruction(migraphx::make_op("multibroadcast"
, {{"out_lens", {1, 5}}}
), maxes);
auto cdf = mm->add_instruction(migraphx::make_op("sub"), input, mb_maxes);
mm->add_instruction(migraphx::make_op("multibroadcast"), maxes
, input2
);
auto cdf = mm->add_instruction(migraphx::make_op("sub"), input
2
, mb_maxes);
cdf = mm->add_instruction(migraphx::make_op("exp"), cdf);
cdf = mm->add_instruction(
migraphx::make_op("prefix_scan_sum", {{"axis", 1}, {"exclusive", false}}), cdf);
mm->add_instruction(migraphx::make_op("multinomial"), cdf, rs_lit);
//TODO: I want a dynamic input to the multinomial op
std::vector<float> dummy(999);
mm->add_instruction(migraphx::make_op("multinomial"), cdf, randoms);
p.compile(migraphx::make_target("ref"));
auto result = p.eval({}).back();
migraphx::shape input_fixed_shape0{migraphx::shape::float_type, {1, 999}};
migraphx::shape input_fixed_shape1{migraphx::shape::float_type, {1, 5}};
migraphx::parameter_map params0;
params0["Input"] = migraphx::argument(input_fixed_shape0, dummy.data());
params0["Input_2"] = migraphx::argument(input_fixed_shape1, data.data());
auto result = p.eval(params0).back();
std::vector<int32_t> result_vec(sample_size);
result.visit([&](auto output) { result_vec.assign(output.begin(), output.end()); });
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment