Commit 23851d62 authored by Khalique Ahmed's avatar Khalique Ahmed
Browse files

Merge branch 'develop' of https://github.com/ROCmSoftwarePlatform/AMDMIGraphX into develop

parents 41d4e92b 5fa42993
......@@ -84,7 +84,7 @@ migraphx::program create_program_from_mlir(const migraphx::module& mmlir)
inputs.push_back(mm->add_parameter("output", mmlir.get_output_shapes().front()));
migraphx::gpu::context ctx;
migraphx::gpu::insert_mlir(*mm, mm->end(), compile_mlir(ctx, mmlir), inputs);
migraphx::gpu::insert_mlir(*mm, mm->end(), compile_mlir(ctx, mmlir, inputs), inputs);
return p;
}
......
batch_norm_invalid_rank_test:
7
batch_norm_rank_2_test:
J
x
scale
bias
mean
variancey"BatchNormalizationbatch_norm_invalid_rank_testZ
variancey"BatchNormalization*
epsilon75batch_norm_rank_2_testZ
x


Z

Z
scale

Z
Z
bias

Z
Z
mean

Z
Z
variance

b
b
y


B
\ No newline at end of file

B
\ No newline at end of file
......@@ -331,6 +331,24 @@ def batch_norm_flat_test():
return ([node], [x, scale, bias, mean, var], [out])
@onnx_test
def batch_norm_rank_2_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 5])
scale = helper.make_tensor_value_info('scale', TensorProto.FLOAT, [5])
bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [5])
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT, [5])
var = helper.make_tensor_value_info('variance', TensorProto.FLOAT, [5])
out = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 5])
node = onnx.helper.make_node(
'BatchNormalization',
inputs=['x', 'scale', 'bias', 'mean', 'variance'],
outputs=['y'],
epsilon=1e-6)
return ([node], [x, scale, bias, mean, var], [out])
@onnx_test
def batch_norm_1d_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16, [2, 3, 4])
......@@ -385,23 +403,6 @@ def batch_norm_3d_test():
return ([node], [x, scale, bias, mean, var], [out])
@onnx_test
def batch_norm_invalid_rank_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [8, 8])
scale = helper.make_tensor_value_info('scale', TensorProto.FLOAT, [8])
bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [8])
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT, [8])
var = helper.make_tensor_value_info('variance', TensorProto.FLOAT, [8])
out = helper.make_tensor_value_info('y', TensorProto.FLOAT, [8, 8])
node = onnx.helper.make_node(
'BatchNormalization',
inputs=['x', 'scale', 'bias', 'mean', 'variance'],
outputs=['y'])
return ([node], [x, scale, bias, mean, var], [out])
@onnx_test
def batch_norm_invalid_bias_rank_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3, 4, 4])
......
......@@ -394,6 +394,31 @@ TEST_CASE(batch_norm_flat_test)
EXPECT(p == prog);
}
TEST_CASE(batch_norm_rank_2_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("x", {migraphx::shape::float_type, {2, 5}});
auto scale = mm->add_parameter("scale", {migraphx::shape::float_type, {5}});
auto bias = mm->add_parameter("bias", {migraphx::shape::float_type, {5}});
auto mean = mm->add_parameter("mean", {migraphx::shape::float_type, {5}});
auto var = mm->add_parameter("variance", {migraphx::shape::float_type, {5}});
auto rt = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {1e-6f}});
auto numer = add_common_op(*mm, migraphx::make_op("sub"), {x, mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {var, eps});
auto denom = add_common_op(*mm, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(*mm, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {div0, scale});
add_common_op(*mm, migraphx::make_op("add"), {r0, bias});
auto prog = optimize_onnx("batch_norm_rank_2_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(batch_norm_1d_test)
{
migraphx::program p;
......
......@@ -115,6 +115,43 @@ TEST_CASE(batch_norm_flat_test)
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(batch_norm_rank_2_test)
{
migraphx::program p = migraphx::parse_onnx("batch_norm_rank_2_test.onnx");
p.compile(migraphx::ref::target{});
migraphx::shape x_shape{migraphx::shape::float_type, {2, 5}};
migraphx::shape c_shape(migraphx::shape::float_type, {5});
std::vector<float> x_data = {1., 2., 3., 4., 5., 6., 7., 8., 9., 10.};
std::vector<float> scale_data(5, 1.);
std::vector<float> bias_data(5, 0.);
std::vector<float> mean_data = {1., 2., 1., 2., 1.};
std::vector<float> variance_data(5, 0.5);
migraphx::parameter_map params;
params["x"] = migraphx::argument(x_shape, x_data.data());
params["scale"] = migraphx::argument(c_shape, scale_data.data());
params["bias"] = migraphx::argument(c_shape, bias_data.data());
params["mean"] = migraphx::argument(c_shape, mean_data.data());
params["variance"] = migraphx::argument(c_shape, variance_data.data());
auto result = p.eval(params).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0.,
0.,
2.8284243,
2.8284243,
5.65684859,
7.07106074,
7.07106074,
9.89948504,
9.89948504,
12.72790933};
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(batch_norm_1d_test)
{
migraphx::program p = migraphx::parse_onnx("batch_norm_1d_test.onnx");
......
......@@ -21,47 +21,27 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_RTGLIB_DECONVOLUTION_HPP
#define MIGRAPHX_GUARD_RTGLIB_DECONVOLUTION_HPP
#include <migraphx/shape.hpp>
#include <migraphx/op/deconvolution.hpp>
#include <migraphx/gpu/miopen.hpp>
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct context;
struct miopen_deconvolution
struct quant_conv_1d : verify_program<quant_conv_1d>
{
op::deconvolution op;
shared<convolution_descriptor> cd;
miopenConvFwdAlgorithm_t algo{};
uint64_t solution_id = 0;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack_join(op::deconvolution::reflect(self.op, f),
pack(f(self.solution_id, "solution_id")));
}
std::string name() const { return "gpu::deconv"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const;
shape find(context& ctx, const shape& output_shape, std::vector<shape> inputs);
void finalize(context& ctx, const shape& output_shape, std::vector<shape> inputs);
std::ptrdiff_t output_alias(const std::vector<shape>& shapes) const
migraphx::program create_program() const
{
return shapes.size() - 1;
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape a_shape{migraphx::shape::int8_type, {2, 3, 4}};
auto pa = mm->add_parameter("a", a_shape);
migraphx::shape c_shape{migraphx::shape::int8_type, {2, 3, 3}};
auto pc = mm->add_parameter("c", c_shape);
mm->add_instruction(
migraphx::make_op("quant_convolution",
{{"padding", {0}}, {"stride", {1}}, {"dilation", {1}}}),
pa,
pc);
return p;
}
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment