Commit 5a14c0bf authored by umangyadav's avatar umangyadav
Browse files

Merge branch 'develop' into workspace_size

parents cb01e280 5fa42993
......@@ -37,10 +37,7 @@ struct quant_conv_default_mode : verify_program<quant_conv_default_mode>
auto pa = mm->add_parameter("a", a_shape);
migraphx::shape c_shape{migraphx::shape::int8_type, {2, 3, 3, 3}};
auto pc = mm->add_parameter("c", c_shape);
mm->add_instruction(
migraphx::op::quant_convolution{{{0, 0}}, {{1, 1}}, {{1, 1}}, migraphx::op::same},
pa,
pc);
mm->add_instruction(migraphx::op::quant_convolution{{{0, 0}}, {{1, 1}}, {{1, 1}}}, pa, pc);
return p;
}
};
......@@ -37,10 +37,7 @@ struct quant_conv_int8x4_default : verify_program<quant_conv_int8x4_default>
auto pa = mm->add_parameter("a", a_shape);
migraphx::shape c_shape{migraphx::shape::int8_type, {16, 16, 3, 3}};
auto pc = mm->add_parameter("c", c_shape);
mm->add_instruction(
migraphx::op::quant_convolution{{{0, 0}}, {{1, 1}}, {{1, 1}}, migraphx::op::same},
pa,
pc);
mm->add_instruction(migraphx::op::quant_convolution{{{0, 0}}, {{1, 1}}, {{1, 1}}}, pa, pc);
return p;
}
};
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/serialize.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/batch_norm_inference.hpp>
struct test_batchnorm_1d_per_actv : verify_program<test_batchnorm_1d_per_actv>
{
const size_t d1 = 5;
const size_t channels = 2;
const size_t batches = 3;
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {batches, channels, d1}};
migraphx::shape vars{migraphx::shape::float_type, {channels, d1}};
auto x = mm->add_parameter("x", s);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op(
"batch_norm_inference",
{{"epsilon", 1.0e-5},
{"momentum", 0.96f},
{"bn_mode",
migraphx::to_value(migraphx::op::batch_norm_inference::per_activation)}}),
x,
scale,
bias,
mean,
variance);
return p;
}
};
......@@ -27,27 +27,19 @@
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_batchnorm_inference : verify_program<test_batchnorm_inference>
struct test_concat_axis_2 : verify_program<test_concat_axis_2>
{
const size_t width = 3;
const size_t height = 3;
const size_t channels = 3;
const size_t batches = 4;
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {batches, channels, height, width}};
migraphx::shape vars{migraphx::shape::float_type, {channels}};
auto x = mm->add_parameter("x", s);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op("batch_norm_inference"), x, scale, bias, mean, variance);
migraphx::shape s0{migraphx::shape::int32_type, {3, 2, 1}};
migraphx::shape s1{migraphx::shape::int32_type, {3, 2, 1}};
migraphx::shape s2{migraphx::shape::int32_type, {3, 2, 1}};
auto l0 = mm->add_parameter("x", s0);
auto l1 = mm->add_parameter("y", s1);
auto l2 = mm->add_parameter("z", s2);
mm->add_instruction(migraphx::make_op("concat", {{"axis", 2}}), l0, l1, l2);
return p;
}
};
......@@ -26,6 +26,8 @@
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/common.hpp>
struct test_conv_bn : verify_program<test_conv_bn>
{
......@@ -37,19 +39,38 @@ struct test_conv_bn : verify_program<test_conv_bn>
migraphx::shape xs{migraphx::shape::float_type, {1, 3, 224, 224}};
migraphx::shape ws{migraphx::shape::float_type, {64, 3, 7, 7}};
migraphx::shape vars{migraphx::shape::float_type, {64}};
auto x = mm->add_parameter("x", xs);
auto w = mm->add_parameter("w", ws);
auto x = mm->add_parameter("x", xs);
auto w = mm->add_parameter("w", ws);
// non-symmetrical tiling
auto conv = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {3, 3}}, {"stride", {2, 2}}, {"dilation", {1, 1}}}),
x,
w);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op("batch_norm_inference"), conv, scale, bias, mean, variance);
auto rt = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {1e-5f}});
auto usq_scale =
mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), scale);
auto usq_bias =
mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), bias);
auto usq_mean =
mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), mean);
auto usq_var =
mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), variance);
auto numer = add_common_op(*mm, migraphx::make_op("sub"), {conv, usq_mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {usq_var, eps});
auto denom = add_common_op(*mm, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(*mm, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {div0, usq_scale});
add_common_op(*mm, migraphx::make_op("add"), {r0, usq_bias});
return p;
}
};
......@@ -26,21 +26,38 @@
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/common.hpp>
struct test_conv_bn_add : verify_program<test_conv_bn_add>
{
static migraphx::instruction_ref add_bn(migraphx::module& m,
migraphx::instruction_ref x,
std::size_t channels,
std::size_t seed = 1)
static migraphx::instruction_ref add_bn(migraphx::module& m, migraphx::instruction_ref x)
{
migraphx::shape vars{migraphx::shape::float_type, {channels}};
auto scale = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 1 + seed)));
auto bias = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 2 + seed)));
auto mean = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 3 + seed)));
auto variance = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 4 + seed)));
return m.add_instruction(
migraphx::make_op("batch_norm_inference"), x, scale, bias, mean, variance);
auto bn_lens = x->get_shape().lens();
auto c_len = bn_lens.at(1);
migraphx::shape vars{migraphx::shape::float_type, {c_len}};
auto scale = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 1 + c_len)));
auto bias = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 2 + c_len)));
auto mean = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 3 + c_len)));
auto variance = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 4 + c_len)));
auto rt = m.add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto eps = m.add_literal(migraphx::literal{migraphx::shape::float_type, {1e-5f}});
auto usq_scale =
m.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), scale);
auto usq_bias = m.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), bias);
auto usq_mean = m.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), mean);
auto usq_var =
m.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), variance);
auto numer = add_common_op(m, migraphx::make_op("sub"), {x, usq_mean});
auto var_eps = add_common_op(m, migraphx::make_op("add"), {usq_var, eps});
auto denom = add_common_op(m, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(m, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(m, migraphx::make_op("mul"), {div0, usq_scale});
return add_common_op(m, migraphx::make_op("add"), {r0, usq_bias});
}
migraphx::program create_program() const
......@@ -57,10 +74,10 @@ struct test_conv_bn_add : verify_program<test_conv_bn_add>
{migraphx::shape::float_type, {ochannels, ichannels, 1, 1}}, 2));
auto relu1 = mm->add_instruction(migraphx::make_op("relu"), x);
auto conv1 = mm->add_instruction(migraphx::make_op("convolution"), relu1, w);
auto bn1 = add_bn(*mm, conv1, ochannels, 1);
auto bn1 = add_bn(*mm, conv1);
auto relu2 = mm->add_instruction(migraphx::make_op("relu"), y);
auto conv2 = mm->add_instruction(migraphx::make_op("convolution"), relu2, v);
auto bn2 = add_bn(*mm, conv2, ochannels, 1);
auto bn2 = add_bn(*mm, conv2);
auto sum = mm->add_instruction(migraphx::make_op("add"), bn1, bn2);
mm->add_instruction(migraphx::make_op("relu"), sum);
return p;
......
......@@ -27,6 +27,8 @@
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/common.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/common.hpp>
struct test_conv_bn_relu_pooling : verify_program<test_conv_bn_relu_pooling>
{
......@@ -49,8 +51,26 @@ struct test_conv_bn_relu_pooling : verify_program<test_conv_bn_relu_pooling>
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
auto bn = mm->add_instruction(
migraphx::make_op("batch_norm_inference"), conv, scale, bias, mean, variance);
auto rt = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {1e-5f}});
auto usq_scale =
mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), scale);
auto usq_bias =
mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), bias);
auto usq_mean =
mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), mean);
auto usq_var =
mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), variance);
auto numer = add_common_op(*mm, migraphx::make_op("sub"), {conv, usq_mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {usq_var, eps});
auto denom = add_common_op(*mm, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(*mm, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {div0, usq_scale});
auto bn = add_common_op(*mm, migraphx::make_op("add"), {r0, usq_bias});
auto relu = mm->add_instruction(migraphx::make_op("relu"), bn);
mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
......
......@@ -27,22 +27,40 @@
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/common.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/common.hpp>
struct test_conv_bn_relu_pooling2 : verify_program<test_conv_bn_relu_pooling2>
{
static migraphx::instruction_ref
add_bn(migraphx::program& p, migraphx::instruction_ref x, std::size_t channels)
static migraphx::instruction_ref add_bn(migraphx::module& m, migraphx::instruction_ref x)
{
auto* mm = p.get_main_module();
migraphx::shape vars{migraphx::shape::float_type, {channels}};
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1 + channels)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2 + channels)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3 + channels)));
auto variance =
mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4 + channels)));
return mm->add_instruction(
migraphx::make_op("batch_norm_inference"), x, scale, bias, mean, variance);
auto bn_lens = x->get_shape().lens();
auto c_len = bn_lens.at(1);
migraphx::shape vars{migraphx::shape::float_type, {c_len}};
auto scale = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 1 + c_len)));
auto bias = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 2 + c_len)));
auto mean = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 3 + c_len)));
auto variance = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 4 + c_len)));
auto rt = m.add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto eps = m.add_literal(migraphx::literal{migraphx::shape::float_type, {1e-5f}});
auto usq_scale =
m.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), scale);
auto usq_bias = m.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), bias);
auto usq_mean = m.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), mean);
auto usq_var =
m.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), variance);
auto numer = add_common_op(m, migraphx::make_op("sub"), {x, usq_mean});
auto var_eps = add_common_op(m, migraphx::make_op("add"), {usq_var, eps});
auto denom = add_common_op(m, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(m, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(m, migraphx::make_op("mul"), {div0, usq_scale});
return add_common_op(m, migraphx::make_op("add"), {r0, usq_bias});
}
migraphx::program create_program() const
{
migraphx::program p;
......@@ -59,7 +77,7 @@ struct test_conv_bn_relu_pooling2 : verify_program<test_conv_bn_relu_pooling2>
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
x1,
w1);
auto bn1 = add_bn(p, conv1, 2048);
auto bn1 = add_bn(*mm, conv1);
auto x2 = mm->add_parameter("x2", xs2);
auto w2 = mm->add_parameter("w2", ws2);
auto conv2 = mm->add_instruction(
......@@ -67,7 +85,7 @@ struct test_conv_bn_relu_pooling2 : verify_program<test_conv_bn_relu_pooling2>
{{"padding", {0, 0}}, {"stride", {2, 2}}, {"dilation", {1, 1}}}),
x2,
w2);
auto bn2 = add_bn(p, conv2, 2048);
auto bn2 = add_bn(*mm, conv2);
auto add = mm->add_instruction(migraphx::make_op("add"), bn1, bn2);
auto relu = mm->add_instruction(migraphx::make_op("relu"), add);
mm->add_instruction(migraphx::make_op("pooling",
......
......@@ -27,26 +27,20 @@
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_batchnorm_1d : verify_program<test_batchnorm_1d>
struct test_conv_group_add : verify_program<test_conv_group_add>
{
const size_t size = 3;
const size_t channels = 3;
const size_t batches = 4;
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {batches, channels, size}};
migraphx::shape vars{migraphx::shape::float_type, {channels}};
auto x = mm->add_parameter("x", s);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op("batch_norm_inference"), x, scale, bias, mean, variance);
migraphx::shape s{migraphx::shape::float_type, {1, 68, 28, 28}};
auto x = mm->add_parameter("x", s);
auto w = mm->add_parameter("w", {migraphx::shape::float_type, {68, 17, 1, 1}});
auto b = mm->add_parameter("b", {migraphx::shape::float_type, {68}});
auto conv = mm->add_instruction(migraphx::make_op("convolution", {{"group", 4}}), x, w);
auto bb = mm->add_instruction(
migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", {1, 68, 28, 28}}}), b);
mm->add_instruction(migraphx::make_op("add"), conv, bb);
return p;
}
};
......@@ -25,43 +25,49 @@
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/serialize.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/common.hpp>
#include <migraphx/op/batch_norm_inference.hpp>
/*
Checking for y == 0 ? eps : y
struct test_batchnorm_2d_per_actv : verify_program<test_batchnorm_2d_per_actv>
Adding this because HIP fmod sign changes when y = 0 resulting in nan and -nan not beign
consistent between ref and gpu implementations.
*/
migraphx::instruction_ref add_epsilon(migraphx::module& m, migraphx::instruction_ref y)
{
const size_t d1 = 2;
const size_t d2 = 4;
const size_t channels = 2;
const size_t batches = 3;
auto zero = m.add_literal(0.0f);
auto eps = m.add_literal(1e-3f);
auto op_y = add_common_op(m, migraphx::make_op("equal"), {y, zero});
return add_common_op(m, migraphx::make_op("where"), {op_y, eps, y});
}
struct test_fmod : verify_program<test_fmod>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {64}};
auto x = mm->add_parameter("x", s);
auto y = mm->add_parameter("y", s);
auto op_where = add_epsilon(*mm, y);
mm->add_instruction(migraphx::make_op("fmod"), x, op_where);
return p;
}
};
migraphx::shape s{migraphx::shape::float_type, {batches, channels, d1, d2}};
migraphx::shape vars{migraphx::shape::float_type, {channels, d1, d2}};
struct test_mod : verify_program<test_mod>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {64}};
auto x = mm->add_parameter("x", s);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op(
"batch_norm_inference",
{{"epsilon", 1.0e-6},
{"momentum", 0.9f},
{"bn_mode",
migraphx::to_value(migraphx::op::batch_norm_inference::per_activation)}}),
x,
scale,
bias,
mean,
variance);
auto y = mm->add_parameter("y", s);
auto op_where = add_epsilon(*mm, y);
mm->add_instruction(migraphx::make_op("mod"), x, op_where);
return p;
}
};
......@@ -29,14 +29,16 @@
#include <migraphx/op/reduce_mean.hpp>
migraphx::instruction_ref
add_layernorm(migraphx::module& m, migraphx::instruction_ref x, std::vector<size_t> dims)
migraphx::instruction_ref add_layernorm(migraphx::module& m,
migraphx::instruction_ref x,
std::vector<size_t> dims,
float eps = 1e-12f)
{
auto scale =
m.add_parameter("scale", migraphx::shape{migraphx::shape::float_type, {dims.back()}});
auto bias =
m.add_parameter("bias", migraphx::shape{migraphx::shape::float_type, {dims.back()}});
auto epsilon = m.add_literal(1e-12f);
auto epsilon = m.add_literal(eps);
auto exponent = m.add_literal(2.0f);
auto mean = m.add_instruction(migraphx::op::reduce_mean({2}), x);
......@@ -88,6 +90,19 @@ struct test_layernorm2 : verify_program<test_layernorm2>
}
};
struct test_layernorm_eps : verify_program<test_layernorm_eps>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<size_t> dims = {1, 2, 5};
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, dims});
add_layernorm(*mm, x, dims, 1e-5f);
return p;
}
};
struct test_layernorm_triadd : verify_program<test_layernorm_triadd>
{
migraphx::program create_program() const
......
......@@ -21,23 +21,23 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_SOFTMAX_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_SOFTMAX_HPP
#include <migraphx/argument.hpp>
#include <migraphx/config.hpp>
#include <hip/hip_runtime_api.h>
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/common.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void softmax(hipStream_t stream, const argument& result, const argument& arg, int64_t axis);
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
struct test_softmax_large3 : verify_program<test_softmax_large3>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {2, 4}});
auto large = mm->add_literal({migraphx::shape{migraphx::shape::float_type}, {100}});
auto add = migraphx::add_common_op(*mm, migraphx::make_op("mul"), {x, large});
mm->add_instruction(migraphx::make_op("softmax", {{"axis", -1}}), add);
return p;
}
};
......@@ -26,29 +26,33 @@
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_batchnorm_3d : verify_program<test_batchnorm_3d>
#include <migraphx/apply_alpha_beta.hpp>
struct test_unbatched_gemm_1 : verify_program<test_unbatched_gemm_1>
{
const size_t d1 = 2;
const size_t d2 = 2;
const size_t d3 = 2;
const size_t channels = 2;
const size_t batches = 2;
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape m1_shape{migraphx::shape::float_type, {2, 32, 64}};
migraphx::shape m2_shape{migraphx::shape::float_type, {64, 64}};
migraphx::shape m3_shape{migraphx::shape::float_type, {2, 32, 192}};
auto l1 = mm->add_parameter("1", m1_shape);
auto l2 = mm->add_literal(migraphx::generate_literal(m2_shape));
l2 = mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {2, 64, 64}}}),
l2);
auto l3 = mm->add_literal(migraphx::generate_literal(m2_shape));
l3 = mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {2, 64, 64}}}),
l3);
auto l4 = mm->add_literal(migraphx::generate_literal(m2_shape));
l4 = mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {2, 64, 64}}}),
l4);
auto concat = mm->add_instruction(migraphx::make_op("concat", {{"axis", 2}}), l2, l3, l4);
migraphx::shape s{migraphx::shape::float_type, {batches, channels, d1, d2, d3}};
migraphx::shape vars{migraphx::shape::float_type, {channels}};
auto x = mm->add_parameter("x", s);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op("batch_norm_inference"), x, scale, bias, mean, variance);
auto l5 = mm->add_parameter("3", m3_shape);
float alpha = 1.0f;
float beta = 1.0f;
migraphx::add_apply_alpha_beta(
*mm, {l1, concat, l5}, migraphx::make_op("dot"), alpha, beta);
return p;
}
};
......@@ -26,28 +26,21 @@
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_batchnorm_inference_2 : verify_program<test_batchnorm_inference_2>
#include <migraphx/apply_alpha_beta.hpp>
struct test_unbatched_gemm_2 : verify_program<test_unbatched_gemm_2>
{
const size_t width = 14;
const size_t height = 14;
const size_t channels = 256;
const size_t batches = 1;
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape m1_shape{migraphx::shape::float_type, {4, 32, 64}};
migraphx::shape m2_shape{migraphx::shape::float_type, {64, 64}};
auto l1 = mm->add_parameter("1", m1_shape);
auto l2 = mm->add_literal(migraphx::generate_literal(m2_shape));
l2 = mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {4, 64, 64}}}),
l2);
migraphx::shape s{migraphx::shape::float_type, {batches, channels, height, width}};
migraphx::shape vars{migraphx::shape::float_type, {channels}};
auto x = mm->add_parameter("x", s);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op("batch_norm_inference"), x, scale, bias, mean, variance);
mm->add_instruction(migraphx::make_op("dot"), l1, l2);
return p;
}
};
......@@ -116,6 +116,9 @@ def main():
model = migraphx.parse_onnx(model_name, default_dim_value=batch)
if args.verbose:
print(model)
model.compile(migraphx.get_target('gpu'), offload_copy=False)
params = {}
......
......@@ -21,7 +21,10 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#####################################################################################
import string, sys, re, runpy
import string
import sys
import re
import runpy
from functools import wraps
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
......@@ -308,18 +311,39 @@ class Parameter:
return self.substitute('${type} ${name}', prefix=prefix)
def virtual_output_args(self, prefix: Optional[str] = None) -> List[str]:
return [
'&{prefix}{n}'.format(prefix=prefix or '', n=n)
for t, n in self.cparams
]
container_type = self.type.remove_generic().basic().str()
decl_list: List[str] = []
container = (container_type == "std::vector"
or container_type == "vector")
for t, n, in self.cparams:
if not decl_list and container:
decl_list.append('{prefix}{n}.data()'.format(prefix=prefix
or '',
n=n))
else:
decl_list.append('&{prefix}{n}'.format(prefix=prefix or '',
n=n))
return decl_list
def virtual_output_declarations(self,
prefix: Optional[str] = None) -> List[str]:
return [
'std::remove_pointer_t<{type}> {prefix}{n};'.format(
type=Type(t).str(), prefix=prefix or '', n=n)
for t, n in self.cparams
]
container_type = self.type.remove_generic().basic().str()
container = (container_type == "std::vector"
or container_type == "vector")
decl_list: List[str] = []
for t, n, in self.cparams:
if not decl_list and container:
inner_t = self.type.inner_type()
if inner_t:
decl_list.append(
'std::array<{inner_t}, 1024> {prefix}{n};'.format(
inner_t=inner_t.str(), prefix=prefix or '', n=n))
else:
decl_list.append(
'std::remove_pointer_t<{type}> {prefix}{n}'.format(
type=Type(t).str(), prefix=prefix or '', n=n))
decl_list[-1] += '=1024;' if container else ';'
return decl_list
def virtual_output(self, prefix: Optional[str] = None) -> str:
write = self.virtual_write
......@@ -694,9 +718,14 @@ def generate_cpp_header() -> str:
[c.generate() for c in cpp_classes])
def cwrap(name: str) -> Callable:
c_type_map: Dict[str, Type] = {}
def cwrap(name: str, c_type: Optional[str] = None) -> Callable:
def with_cwrap(f):
type_map[name] = f
if c_type:
c_type_map[name] = Type(c_type)
@wraps(f)
def decorated(*args, **kwargs):
......@@ -917,6 +946,9 @@ def vector_c_wrap(p: Parameter) -> None:
# Not a generic type
if not inner:
return
if inner.str() in c_type_map:
inner = c_type_map[inner.str()]
t = inner.add_pointer()
if p.type.is_reference():
if p.type.is_const():
......@@ -927,6 +959,12 @@ def vector_c_wrap(p: Parameter) -> None:
p.add_size_param()
p.bad_param('${name} == nullptr or ${size} == nullptr',
'Null pointer')
elif p.virtual:
p.add_param(t)
p.add_size_param()
p.bad_param('${name} == nullptr or ${size} == nullptr',
'Null pointer')
p.virtual_write = '{${name}.begin(), ${name}.begin()+${size}}; // cppcheck-suppress returnDanglingLifetime'
else:
p.add_param(t)
p.bad_param('${name} == nullptr', 'Null pointer')
......@@ -946,7 +984,7 @@ def vector_c_wrap(p: Parameter) -> None:
p.write = ['std::copy(${result}.begin(), ${result}.end(), ${name})']
@cwrap('std::string')
@cwrap('std::string', 'char*')
def string_c_wrap(p: Parameter) -> None:
t = Type('char*')
if p.returns:
......@@ -1061,9 +1099,9 @@ struct ${ctype} {
c_api_virtual_impl = Template('''
${return_type} ${name}(${params}) const
{
${output_decls}
if (${fname} == nullptr)
throw std::runtime_error("${name} function is missing.");
${output_decls}
std::array<char, 256> exception_msg;
exception_msg.front() = '\\0';
auto api_error_result = ${fname}(${args});
......
......@@ -21,6 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/execution_environment.hpp>
#include <migraphx/migraphx.h>
#include <migraphx/rank.hpp>
#include <migraphx/shape.hpp>
......@@ -166,6 +167,13 @@ void set_output_names(tf_options& options, std::vector<const char*> names)
options.output_node_names = std::vector<std::string>(names.begin(), names.end());
}
std::vector<argument>
run_async(program& p, const parameter_map& params, void* s, std::string_view name)
{
execution_environment exec_env{any_ptr(s, name), true};
return p.eval(params, exec_env);
}
template <class Value>
std::vector<const char*> get_names(const std::unordered_map<std::string, Value>& m)
{
......@@ -265,11 +273,18 @@ struct experimental_custom_op
template <class CustomOp>
struct custom_operation
{
template <class Self, class F>
static auto reflect(Self&, F)
{
return pack();
}
value attributes() const
{
return {{"custom_op", true}, {"target", op.runs_on_offload_target() ? "gpu" : "cpu"}};
}
CustomOp op;
std::string name() const { return op.xobject.name; }
......@@ -284,6 +299,23 @@ struct custom_operation
{
return op.compute(std::move(ctx), std::move(output_shape), std::move(inputs));
}
std::ptrdiff_t output_alias(std::vector<shape> inputs) const
{
auto alias_vec = op.output_alias(std::move(inputs));
// TODO: For now, only support one output alias
if(alias_vec.empty())
{
return -1;
}
if(alias_vec.size() > 1)
{
MIGRAPHX_THROW("Currently, CustomOps in MIGraphX only supports one output_alias");
}
return alias_vec.front();
}
bool runs_on_offload_target() const { return op.runs_on_offload_target(); }
};
template <class CustomOp>
......
......@@ -26,7 +26,6 @@
#include <stdlib.h>
#include <stdbool.h>
// Add new types here
// clang-format off
#define MIGRAPHX_SHAPE_VISIT_TYPES(m) \
......
......@@ -66,12 +66,21 @@ any_ptr get_queue_context(T&)
{
return {};
}
template <class T>
void wait_for_context(T&, any_ptr)
{
}
template <class T>
void finish_on_context(T&, any_ptr){}
<%
interface('context',
virtual('to_value', returns = 'value', const = True, default = 'to_value_context'),
virtual('from_value', v = 'const value&', default = 'from_value_context'),
virtual('get_queue', returns = 'any_ptr', default = 'get_queue_context'),
virtual('wait_for', queue = 'any_ptr', returns = 'void', default = 'wait_for_context'),
virtual('finish_on', queue = 'any_ptr', returns = 'void', default = 'finish_on_context'),
virtual('finish', returns = 'void', const = True)) %>
inline void migraphx_to_value(value& v, const context& ctx)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment