"include/ck/utility/functional.hpp" did not exist on "5e5c27a63b1637556a17e17546147da6cb6d732e"
Commit c4b1102e authored by charlie's avatar charlie
Browse files

Merge branch 'dyn_model_test' of github.com:ROCmSoftwarePlatform/AMDMIGraphX into dyn_model_test

parents 5fc48e77 31065c7d
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/gpu/leaky_relu.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/miopen.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
shape miopen_leaky_relu::compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs, *this}.has(2).not_broadcasted();
return inputs.at(1);
}
argument miopen_leaky_relu::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
{
float alpha = 1;
float beta = 0;
auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape);
miopenActivationForward(ctx.get_stream().get_miopen(),
ad.get(),
&alpha,
x_desc.get(),
args[0].implicit(),
&beta,
y_desc.get(),
args[1].implicit());
return args[1];
}
void miopen_leaky_relu::finalize(context&, const shape&, const std::vector<shape>&)
{
ad = make_leaky_relu(op.alpha);
}
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
......@@ -26,6 +26,8 @@
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/instruction_ref.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/op/convolution.hpp>
#include <migraphx/op/deconvolution.hpp>
......@@ -35,15 +37,12 @@
#include <migraphx/op/quant_convolution.hpp>
#include <migraphx/op/quant_dot.hpp>
#include <migraphx/gpu/batch_norm_inference.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/convolution.hpp>
#include <migraphx/gpu/deconvolution.hpp>
#include <migraphx/gpu/device_name.hpp>
#include <migraphx/gpu/gemm.hpp>
#include <migraphx/gpu/int8_conv_pack.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/quant_convolution.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/compiler.hpp>
#include <migraphx/iterator_for.hpp>
......@@ -96,14 +95,11 @@ struct miopen_apply
add_extend_op("argmax");
add_extend_op("argmin");
// add_extend_op("elu");
add_extend_op("gather");
// add_extend_op("leaky_relu");
add_extend_op("logsoftmax");
add_extend_op("lrn");
add_extend_op("multinomial");
add_extend_op("nonzero");
add_extend_op("pad");
add_extend_op("pooling");
add_extend_op("prefix_scan_sum");
add_extend_op("reverse");
......@@ -113,16 +109,15 @@ struct miopen_apply
add_extend_op("scatter_none");
add_extend_op("topk");
add_batch_norm_inference_op();
add_convolution_op();
add_deconvolution_op();
add_convolution_op<op::convolution>("convolution");
add_convolution_op<op::deconvolution>("deconvolution");
add_convolution_op<op::quant_convolution>("quant_convolution");
add_gemm_op<op::dot>("dot");
add_gemm_op<op::quant_dot>("quant_dot");
add_if_op();
add_loop_op();
add_neg_op();
add_nms_op();
add_quant_convolution_op();
}
void copy_params() const
......@@ -171,7 +166,8 @@ struct miopen_apply
init();
for(auto it = mod->begin(); it != mod->end(); it++)
{
auto s = it->get_shape();
auto s = it->get_shape();
auto attrs = it->get_operator().attributes();
if(apply_map.count(it->name()) > 0)
{
check_shape(s, apply_map.at(it->name())(it));
......@@ -180,11 +176,37 @@ struct miopen_apply
{
check_shape(s, insert_precompile_op(it));
}
else if(attrs.contains("target"))
{
check_shape(s, insert_custom_op(it, attrs));
}
}
copy_params();
}
instruction_ref insert_custom_op(instruction_ref ins, const value& attrs) const
{
const auto& custom_op = ins->get_operator();
if(attrs.at("target") == "cpu")
{
auto s = ins->get_shape();
std::vector<instruction_ref> cpu_inputs;
auto inputs = ins->inputs();
auto output = inputs.back();
std::transform(
inputs.begin(), inputs.end(), std::back_inserter(cpu_inputs), [&](auto in) {
return mod->insert_instruction(ins, make_op("hip::copy_from_gpu"), in);
});
cpu_inputs.front() =
mod->insert_instruction(ins, make_op("hip::sync_stream"), cpu_inputs);
auto cpu_out = mod->insert_instruction(ins, custom_op, cpu_inputs);
auto gpu_out =
mod->insert_instruction(ins, make_op("hip::copy_to_gpu"), cpu_out, output);
return mod->replace_instruction(ins, gpu_out);
}
return ins;
}
instruction_ref insert_precompile_op(instruction_ref ins) const
{
auto output = insert_allocation(ins, ins->get_shape());
......@@ -203,38 +225,6 @@ struct miopen_apply
return mod->insert_instruction(ins, make_op("allocate", {{"shape", to_value(s)}}));
}
void add_convolution_op()
{
apply_map.emplace("convolution", [=](instruction_ref ins) {
auto&& op = any_cast<op::convolution>(ins->get_operator());
auto conv = miopen_convolution{op, make_conv(op)};
auto ws = conv.find(get_context(), ins->get_shape(), to_shapes(ins->inputs()));
auto workspace = insert_allocation(ins, ws);
auto output = insert_allocation(ins, ins->get_shape());
return mod->replace_instruction(
ins, conv, ins->inputs().at(0), ins->inputs().at(1), workspace, output);
});
}
void add_deconvolution_op()
{
apply_map.emplace("deconvolution", [=](instruction_ref ins) {
auto&& op = any_cast<op::deconvolution>(ins->get_operator());
auto conv = miopen_deconvolution{op, make_deconv(op)};
auto ws = conv.find(get_context(), ins->get_shape(), to_shapes(ins->inputs()));
auto workspace = insert_allocation(ins, ws);
auto output = insert_allocation(ins, ins->get_shape());
return mod->replace_instruction(
ins, conv, ins->inputs().at(0), ins->inputs().at(1), workspace, output);
});
}
template <typename Op>
void add_gemm_op(const std::string& name)
{
......@@ -248,31 +238,33 @@ struct miopen_apply
});
}
void add_quant_convolution_op()
template <typename Op>
void add_convolution_op(const std::string& name)
{
apply_map.emplace("quant_convolution", [=](instruction_ref ins) {
auto&& op = any_cast<op::quant_convolution>(ins->get_operator());
shape ws;
miopen_quant_convolution conv;
auto compile_quant_conv_with_format = [&](bool format) {
conv = miopen_quant_convolution{op, format, make_conv(op)};
ws = conv.find(get_context(), ins->get_shape(), to_shapes(ins->inputs()));
apply_map.emplace(name, [=](instruction_ref ins) {
operation conv =
miopen_convolution<Op>{any_cast<Op>(ins->get_operator()), int8_x4_format};
migraphx::context ctx = get_context();
size_t ws_bytes = 0;
auto compile_conv_with_format = [&](bool format) {
conv = miopen_convolution<Op>{any_cast<Op>(ins->get_operator()), format};
auto ws = conv.compile(ctx, ins->get_shape(), to_shapes(ins->inputs()));
ws_bytes = ws.get("workspace", 0);
};
try
{
compile_quant_conv_with_format(int8_x4_format);
{ // for the regular convolution and deconvolution, this try would always succeed
compile_conv_with_format(int8_x4_format);
}
catch(migraphx::exception&)
{
// In case no solver supports the default format, retry using the other format.
compile_quant_conv_with_format(not int8_x4_format);
compile_conv_with_format(not int8_x4_format);
}
auto args = ins->inputs();
auto workspace = insert_allocation(ins, ws);
auto output = insert_allocation(ins, ins->get_shape());
auto workspace = insert_allocation(ins, shape{shape::int8_type, {ws_bytes}});
return mod->replace_instruction(ins, conv, args[0], args[1], workspace, output);
});
}
......@@ -307,43 +299,6 @@ struct miopen_apply
});
}
void add_batch_norm_inference_op()
{
apply_map.emplace("batch_norm_inference", [=](instruction_ref ins) {
auto&& op = any_cast<op::batch_norm_inference>(ins->get_operator());
auto output = insert_allocation(ins, ins->get_shape());
shape old_shape = ins->inputs().at(1)->get_shape();
auto input = ins->inputs()[0];
auto input_lens = input->get_shape().lens();
std::vector<int64_t> rsp_lens(input_lens.size(), 1);
// for per_activation case, also need to reshape input
if(op.bn_mode == op::batch_norm_inference::per_activation)
{
std::copy(input_lens.begin() + 1, input_lens.end(), rsp_lens.begin() + 1);
}
else
{
rsp_lens[1] = static_cast<int64_t>(old_shape.elements());
}
auto reshape_op = op::reshape{rsp_lens};
std::vector<instruction_ref> reshapes;
std::transform(ins->inputs().begin() + 1,
ins->inputs().end(),
std::back_inserter(reshapes),
[&](auto i) { return mod->insert_instruction(ins, reshape_op, i); });
return mod->replace_instruction(ins,
miopen_batch_norm_inference{op},
input,
reshapes[0],
reshapes[1],
reshapes[2],
reshapes[3],
output);
});
}
// use 0 - input to represent neg
void add_neg_op()
{
......
......@@ -21,6 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "migraphx/make_op.hpp"
#include <migraphx/gpu/mlir.hpp>
#ifdef MIGRAPHX_MLIR
......@@ -43,8 +44,9 @@
#include <migraphx/gpu/code_object_op.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/device_name.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/gpu/perfdb.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/permutation.hpp>
#include <deque>
#include <variant>
......@@ -370,7 +372,11 @@ struct mlir_program
mlir_operation_state& add_results(const std::vector<shape>& outputs)
{
auto x = prog->make_tensors(outputs);
std::vector<shape> reshaped(outputs.size());
std::transform(outputs.begin(), outputs.end(), reshaped.begin(), [](const shape& r) {
return shape{r.type(), r.lens()};
});
auto x = prog->make_tensors(reshaped);
mlirOperationStateAddResults(&op_state, x.size(), x.data());
return *this;
}
......@@ -502,11 +508,12 @@ struct mlir_program
{
pp =
problem_params{ins->get_operator(), to_shapes(ins->inputs()), ins->get_shape()};
std::string tuned = get_tune_params();
// check if HW supports xdlops
bool xdlops = contains(get_xdlops_archs(), target_name);
std::string tuned = get_tune_params(xdlops);
if(not tuned.empty())
ops.add_attributes({{"perf_config", tuned}});
// check if HW supports xdlops
if(contains(get_xdlops_archs(), target_name))
if(xdlops)
ops.add_attributes({{"xdlopsV2", true}});
}
......@@ -571,7 +578,7 @@ struct mlir_program
MIGRAPHX_THROW("Failed to compile mlir program");
}
std::string get_tune_params() { return get_mlir_perf_for_conv(pp); }
std::string get_tune_params(bool xdlops) { return get_mlir_perf_for_conv(pp, xdlops); }
mlir_context ctx;
MlirLocation location;
......@@ -589,8 +596,54 @@ std::string dump_mlir(const module& m)
return mlir_print(&mlirOperationPrint, mod_op);
}
code_object_op compile_mlir(const context&, const module& m)
void adjust_param_shapes(module& m, const std::vector<instruction_ref>& inputs)
{
auto names = m.get_parameter_names();
std::sort(names.begin(), names.end());
for(auto i : range(names.size()))
{
const auto& name = names[i];
const auto& input = inputs[i]->get_shape();
auto param = m.get_parameter(name);
if(input.standard())
continue;
auto lens = input.lens();
auto strides = input.strides();
std::vector<operation> ops;
if(input.transposed())
{
auto perm = find_permutation(input);
auto iperm = invert_permutation(perm);
lens = reorder_dims(lens, iperm);
strides = reorder_dims(strides, iperm);
ops.push_back(make_op("transpose", {{"permutation", perm}}));
}
if(input.broadcasted())
{
std::transform(lens.begin(),
lens.end(),
strides.begin(),
lens.begin(),
[](auto len, auto stride) -> std::size_t {
if(stride == 0)
return 1;
return len;
});
ops.push_back(make_op("multibroadcast", {{"out_lens", input.lens()}}));
}
auto new_param =
std::accumulate(ops.begin(),
ops.end(),
m.add_parameter(name + ".0", shape{input.type(), lens}),
[&](auto x, auto op) { return m.insert_instruction(param, op, x); });
m.replace_instruction(param, new_param);
m.remove_instruction(param);
}
}
code_object_op compile_mlir(const context&, module m, const std::vector<instruction_ref>& inputs)
{
adjust_param_shapes(m, inputs);
const bool trace = enabled(MIGRAPHX_TRACE_MLIR{});
if(trace)
std::cout << m << std::endl;
......@@ -662,13 +715,19 @@ instruction_ref insert_mlir(module& m,
std::string dump_mlir(const module&) { return {}; }
code_object_op compile_mlir(const context&, const module&) { return {}; }
template <class T>
void use(T&)
{
}
// Disabling clang-tidy warning on non-real useage.
// NOLINTBEGIN(performance-unnecessary-value-param)
code_object_op compile_mlir(const context&, module, const std::vector<instruction_ref>&)
{
return {};
}
// NOLINTEND(performance-unnecessary-value-param)
instruction_ref
// cppcheck-suppress funcArgNamesDifferent
insert_mlir(module& m, instruction_ref, code_object_op co, const std::vector<instruction_ref>&)
......
......@@ -108,16 +108,17 @@ auto query_miopen_db(const std::string& query)
} // namespace
std::string get_mlir_perf_for_conv(const problem_params& pp)
std::string get_mlir_perf_for_conv(const problem_params& pp, bool xdlops)
{
std::string query = "select P.* \
std::string solver = xdlops ? "ConvMlirIgemmFwdXdlops" : "ConvMlirIgemmFwd";
std::string query = "select P.* \
from perf_db P, config C \
where P.config = C.id AND \
P.solver = 'ConvMlirIgemmFwdXdlops' AND \
P.solver = '${solver}' AND \
${config}";
auto results =
query_miopen_db(interpolate_string(query, {{"config", generate_miopen_config(pp)}}));
auto results = query_miopen_db(
interpolate_string(query, {{"config", generate_miopen_config(pp)}, {"solver", solver}}));
if(results.empty())
return "";
return results.front().at("params");
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/gpu/quant_convolution.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/generate.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
shape miopen_quant_convolution::compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs, *this}.has(4).standard();
return op.normalize_compute_shape({inputs.at(0), inputs.at(1)});
}
argument miopen_quant_convolution::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
{
auto x_desc = make_tensor(args[0].get_shape(), int8_x4_format);
auto w_desc = make_tensor(args[1].get_shape(), int8_x4_format);
auto y_desc = make_tensor(output_shape);
float alpha = 1;
float beta = 0;
auto status = miopenConvolutionForward(ctx.get_stream().get_miopen(),
&alpha,
x_desc.get(),
args[0].implicit(),
w_desc.get(),
args[1].implicit(),
cd.get(),
algo,
&beta,
y_desc.get(),
args[3].implicit(),
args[2].implicit(),
args[2].get_shape().bytes());
if(status != miopenStatusSuccess)
{
MIGRAPHX_THROW("QUANT_CONVOLUTION: run convolution forward failed");
}
return args[3];
}
shape miopen_quant_convolution::find(context& ctx,
const shape& output_shape,
std::vector<shape> inputs)
{
shape workspace_shape{};
auto x_desc = make_tensor(inputs[0], int8_x4_format);
auto w_desc = make_tensor(inputs[1], int8_x4_format);
auto y_desc = make_tensor(output_shape);
std::size_t workspace_size = 0;
miopenConvolutionForwardGetWorkSpaceSize(ctx.get_stream().get_miopen(),
w_desc.get(),
x_desc.get(),
cd.get(),
y_desc.get(),
&workspace_size);
workspace_shape = shape{shape::int8_type, {workspace_size}};
auto x_shape = inputs[0];
auto w_shape = inputs[1];
if(int8_x4_format)
{
x_shape = pack_int8_shape(x_shape);
w_shape = pack_int8_shape(w_shape);
}
auto x = to_gpu(generate_argument(x_shape));
auto w = to_gpu(generate_argument(w_shape));
auto y = allocate_gpu(output_shape);
auto workspace = allocate_gpu(workspace_shape);
int algo_count = 1;
miopenConvAlgoPerf_t perf;
auto status = miopenFindConvolutionForwardAlgorithm(ctx.get_stream().get_miopen(),
x_desc.get(),
x.implicit(),
w_desc.get(),
w.implicit(),
cd.get(),
y_desc.get(),
y.implicit(),
1,
&algo_count,
&perf,
workspace.implicit(),
workspace_size,
false);
if(status != miopenStatusSuccess)
MIGRAPHX_THROW("MIOpen Quant Convolution: find convolution failed");
algo = perf.fwd_algo;
size_t solution_count;
status = miopenConvolutionForwardGetSolutionCount(ctx.get_stream().get_miopen(),
w_desc.get(),
x_desc.get(),
cd.get(),
y_desc.get(),
&solution_count);
if(status != miopenStatusSuccess)
MIGRAPHX_THROW("MIOpen Quant Convolution: get solution count failed");
std::vector<miopenConvSolution_t> solutions(solution_count);
status = miopenConvolutionForwardGetSolution(ctx.get_stream().get_miopen(),
w_desc.get(),
x_desc.get(),
cd.get(),
y_desc.get(),
solution_count,
&solution_count,
solutions.data());
if(status != miopenStatusSuccess)
MIGRAPHX_THROW("MIOpen Quant Convolution: get solution failed");
solution_id = solutions.front().solution_id;
return shape{shape::int8_type, {perf.memory}};
}
void miopen_quant_convolution::finalize(context& ctx,
const shape& output_shape,
std::vector<shape> inputs)
{
if(cd == nullptr)
cd = make_conv(op);
if(solution_id == 0)
{
// Check that workspace hasn't changed
auto size = inputs.at(2).bytes();
auto ws = find(ctx, output_shape, inputs);
if(ws.bytes() > size)
MIGRAPHX_THROW("MIOpen Quant Convolution: workspace has changed during finalization.");
}
auto x_desc = make_tensor(inputs[0], int8_x4_format);
auto w_desc = make_tensor(inputs[1], int8_x4_format);
auto y_desc = make_tensor(output_shape);
auto status = miopenConvolutionForwardCompileSolution(ctx.get_stream().get_miopen(),
w_desc.get(),
x_desc.get(),
cd.get(),
y_desc.get(),
solution_id);
if(status != miopenStatusSuccess)
MIGRAPHX_THROW("MIOpen Quant Convolution: compile solution failed");
}
shape miopen_quant_convolution::pack_int8_shape(const shape& s) const
{
if(s.type() != shape::int8_type)
{
MIGRAPHX_THROW("PACK_INT8_SHAPE: only process int8_type");
}
auto lens = s.lens();
auto strides = s.strides();
lens[1] = (lens[1] + 3) / 4 * 4;
strides[0] = strides[1] * lens[1];
return {s.type(), lens, strides};
}
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
......@@ -41,7 +41,6 @@
#include <migraphx/propagate_constant.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/replace_allocate.hpp>
#include <migraphx/rewrite_batchnorm.hpp>
#include <migraphx/rewrite_gelu.hpp>
#include <migraphx/rewrite_pooling.hpp>
#include <migraphx/rewrite_quantization.hpp>
......@@ -110,8 +109,6 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti
dead_code_elimination{},
insert_pad{},
dead_code_elimination{},
rewrite_batchnorm{},
dead_code_elimination{},
rewrite_rnn{},
dead_code_elimination{},
inline_module{},
......@@ -141,12 +138,12 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti
dead_code_elimination{},
pack_int8_args{},
dead_code_elimination{},
adjust_allocation{gpu_allocation_model{}},
dead_code_elimination{},
fuse_ops{&ctx, options.fast_math},
dead_code_elimination{},
replace_allocate{gpu_allocation_model{}, options.offload_copy},
dead_code_elimination{},
adjust_allocation{gpu_allocation_model{}},
dead_code_elimination{},
compile_ops{&ctx},
dead_code_elimination{},
write_literals{&ctx},
......
......@@ -26,7 +26,6 @@
#include <migraphx/instruction.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/op/identity.hpp>
#include <migraphx/op/batch_norm_inference.hpp>
#include <migraphx/op/convolution.hpp>
#include <migraphx/op/deconvolution.hpp>
#include <migraphx/op/quant_convolution.hpp>
......@@ -73,84 +72,6 @@ typename std::conditional_t<std::is_integral<T>{}, std::make_signed<T>, std::ena
return x;
}
//
// ref implemenataion of batch norm for inference
//
// inputs are:
// args[0] -> input data buffer
// args[1] -> mini batch mean
// args[2] -> mini batch variance
// args[3] -> gamma
// args[4] -> bias
//
// The equation to compute batch norm for inference is:
//
// output[i] = bias + gamma * (input[i] + mean) / sqrt(variance + epsilon)
//
// the input data format should be nchw
//
struct ref_batch_norm_inference
{
op::batch_norm_inference op;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "ref::batch_norm_inference"; }
shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); }
argument compute(context&, const shape& output_shape, std::vector<argument> args) const
{
argument output{output_shape};
double epsilon = op.epsilon;
auto input = args[0];
auto arg_gamma = args[1];
auto arg_bias = args[2];
auto mini_batch_mean = args[3];
auto mini_batch_variance = args[4];
if(op.bn_mode == op::batch_norm_inference::spatial)
{
visit_all(output, input, mini_batch_mean, mini_batch_variance, arg_gamma, arg_bias)(
[&](auto result, auto buffer, auto mean, auto variance, auto gamma, auto bias) {
par_for(output_shape.elements(), [&](auto i) {
auto idx = output_shape.multi(i);
auto c = idx[1];
assert((variance[c] + epsilon) > 0);
result[i] =
gamma[c] * (buffer[i] - mean[c]) / std::sqrt(variance[c] + epsilon) +
bias[c];
});
});
}
if(op.bn_mode == op::batch_norm_inference::per_activation)
{
visit_all(output, input, mini_batch_mean, mini_batch_variance, arg_gamma, arg_bias)(
[&](auto result, auto buffer, auto mean, auto variance, auto gamma, auto bias) {
par_for(output_shape.elements(), [&](auto i) {
auto idx = output_shape.multi(i);
idx[0] = 0;
auto index = output_shape.index(idx);
assert((variance[index] + epsilon) > 0);
result[i] = gamma[index] * (buffer[i] - mean[index]) /
std::sqrt(variance[index] + epsilon) +
bias[index];
});
});
}
return output;
}
};
MIGRAPHX_REGISTER_OP(ref_batch_norm_inference)
struct ref_lrn
{
op::lrn op;
......@@ -643,8 +564,6 @@ struct ref_apply
void init()
{
apply_map["batch_norm_inference"] =
extend_op<ref_batch_norm_inference, op::batch_norm_inference>();
apply_map["convolution"] = extend_op<ref_convolution<op::convolution>, op::convolution>();
apply_map["dot"] = extend_op<ref_gemm, op::dot>();
apply_map["quant_dot"] = extend_op<ref_quant_gemm, op::quant_dot>();
......
......@@ -23,6 +23,7 @@
*/
#include <migraphx/tf/op_parser.hpp>
#include <migraphx/tf/tf_parser.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/make_op.hpp>
......@@ -38,16 +39,37 @@ struct parse_batchnorm : op_parser<parse_batchnorm>
instruction_ref parse(const op_desc& /*opd*/,
const tf_parser& /*parser*/,
tf_parser::node_info info,
const std::vector<instruction_ref>& args) const
std::vector<instruction_ref> args) const
{
float epsilon = 1e-5f;
float momentum = 0.9f;
// different default epsilon than from ONNX
float epsilon = 1e-4f;
if(contains(info.attributes, "epsilon"))
{
epsilon = info.attributes.at("epsilon").f();
}
auto op = make_op("batch_norm_inference", {{"epsilon", epsilon}, {"momentum", momentum}});
return info.add_instruction(op, args);
auto x_lens = args[0]->get_shape().lens();
auto x_type = args[0]->get_shape().type();
// unsqueeze tensors of shape (C) to broadcast correctly
auto rt = info.add_literal(migraphx::literal{migraphx::shape{x_type}, {0.5}});
auto eps = info.add_literal(migraphx::literal{migraphx::shape{x_type}, {epsilon}});
auto scale_unsqueeze =
info.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), args[1]);
auto bias_unsqueeze =
info.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), args[2]);
auto mean_unsqueeze =
info.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), args[3]);
auto var_unsqueeze =
info.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), args[4]);
auto numer = info.add_broadcastable_binary_op("sub", args[0], mean_unsqueeze);
auto var_eps = info.add_broadcastable_binary_op("add", var_unsqueeze, eps);
auto denom = info.add_broadcastable_binary_op("pow", var_eps, rt);
auto div0 = info.add_broadcastable_binary_op("div", numer, denom);
auto r0 = info.add_broadcastable_binary_op("mul", div0, scale_unsqueeze);
return info.add_broadcastable_binary_op("add", r0, bias_unsqueeze);
}
};
......
......@@ -43,6 +43,8 @@ struct sigmoid_custom_op final : migraphx::experimental_custom_op_base
return inputs[1];
}
virtual bool runs_on_offload_target() const override { return true; }
virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override
{
if(inputs.size() != 2)
......@@ -111,4 +113,45 @@ TEST_CASE(run_sigmoid_with_incorrect_shape)
"Error in compute_shape of: sigmoid_custom_op: op must have two inputs"));
}
struct identity_custom_op final : migraphx::experimental_custom_op_base
{
virtual std::string name() const override { return "identity_custom_op"; }
virtual migraphx::argument
compute(migraphx::context, migraphx::shape, migraphx::arguments inputs) const override
{
return inputs[0];
}
virtual bool runs_on_offload_target() const override { return true; }
virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override
{
if(inputs.size() != 1)
{
throw std::runtime_error("Identity op must have only one input");
}
return inputs.back();
}
virtual std::vector<size_t> output_alias(migraphx::shapes) const override { return {0, 1}; }
};
TEST_CASE(run_custom_op_with_invalid_output_alias)
{
identity_custom_op i_op;
migraphx::register_experimental_custom_op(i_op);
auto op = migraphx::operation("identity_custom_op");
EXPECT(op.name() == "identity_custom_op");
migraphx::program p;
migraphx::shape s{migraphx_shape_float_type, {12}};
migraphx::module m = p.get_main_module();
auto x = m.add_parameter("x", s);
auto i_ins = m.add_instruction(migraphx::operation("identity_custom_op"), {x});
migraphx_test_private_disable_exception_catch(true);
EXPECT(test::throws<std::exception>(
[&] { p.compile(migraphx::target("ref")); },
"Currently, CustomOps in MIGraphX only supports one output_alias"));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -24,40 +24,91 @@
#include <hip/hip_runtime_api.h>
#include <migraphx/migraphx.h>
#include <migraphx/migraphx.hpp>
#include <numeric>
#include <stdexcept>
#include "test.hpp"
#define MIGRAPHX_HIP_ASSERT(x) (EXPECT(x == hipSuccess))
struct simple_custom_op final : migraphx::experimental_custom_op_base
struct half_copy_host final : migraphx::experimental_custom_op_base
{
virtual std::string name() const override { return "simple_custom_op"; }
virtual std::string name() const override { return "half_copy_host"; }
virtual bool runs_on_offload_target() const override { return false; }
virtual migraphx::argument
compute(migraphx::context ctx, migraphx::shape, migraphx::arguments inputs) const override
{
// sets first half size_bytes of the input 0, and rest of the half bytes are copied.
int* h_output = nullptr;
auto* d_output = reinterpret_cast<int*>(inputs[0].data());
auto input_bytes = inputs[0].get_shape().bytes();
auto* output_ptr = inputs[1].data();
auto copy_bytes = input_bytes / 2;
// This custom op simply sets first half size_bytes of the input to 0, and rest of the half
// bytes are copied. for this custom_op, it does its computation on the host. Therefore,
// `runs_on_offload_target()` is set to false. MIGraphX would inject necessary buffer copies
// to and from GPU to Host based on `runs_on_offload_targe()` flag for input buffers as well
// as the output buffers
auto* input_buffer_ptr = inputs[0].data();
auto* output_buffer_ptr = inputs[1].data();
auto input_bytes = inputs[0].get_shape().bytes();
auto copy_bytes = input_bytes / 2;
MIGRAPHX_HIP_ASSERT(hipSetDevice(0));
MIGRAPHX_HIP_ASSERT(hipHostMalloc(&h_output, input_bytes));
MIGRAPHX_HIP_ASSERT(hipMemcpyAsync(
h_output, d_output, input_bytes, hipMemcpyDeviceToHost, ctx.get_queue<hipStream_t>()));
MIGRAPHX_HIP_ASSERT(hipMemcpyAsync(output_buffer_ptr,
input_buffer_ptr,
input_bytes,
hipMemcpyHostToHost,
ctx.get_queue<hipStream_t>()));
MIGRAPHX_HIP_ASSERT(hipDeviceSynchronize());
MIGRAPHX_HIP_ASSERT(hipMemset(h_output, 0, copy_bytes));
MIGRAPHX_HIP_ASSERT(
hipMemsetAsync(output_buffer_ptr, 0, copy_bytes, ctx.get_queue<hipStream_t>()));
MIGRAPHX_HIP_ASSERT(hipDeviceSynchronize());
MIGRAPHX_HIP_ASSERT(hipMemcpy(output_ptr, h_output, input_bytes, hipMemcpyHostToDevice));
return inputs[1];
}
virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override
{
if(not inputs[0].standard() or not inputs[1].standard())
{
throw std::runtime_error("Input args must be standard shaped");
}
if(inputs.size() != 2)
{
throw std::runtime_error("number of inputs must be 2");
}
return inputs.back();
}
};
struct half_copy_device final : migraphx::experimental_custom_op_base
{
virtual std::string name() const override { return "half_copy_device"; }
virtual bool runs_on_offload_target() const override { return true; }
virtual migraphx::argument
compute(migraphx::context ctx, migraphx::shape, migraphx::arguments inputs) const override
{
// This custom op simply sets first half size_bytes of the input to 0, and rest of the half
// bytes are copied. for this custom_op, it does its computation on the "GPU". Therefore,
// `runs_on_offload_target()` is set to "true".
auto* input_buffer_ptr = inputs[0].data();
auto* output_buffer_ptr = inputs[1].data();
auto input_bytes = inputs[0].get_shape().bytes();
auto copy_bytes = input_bytes / 2;
MIGRAPHX_HIP_ASSERT(hipSetDevice(0));
MIGRAPHX_HIP_ASSERT(hipMemcpyAsync(output_buffer_ptr,
input_buffer_ptr,
input_bytes,
hipMemcpyDeviceToDevice,
ctx.get_queue<hipStream_t>()));
MIGRAPHX_HIP_ASSERT(hipDeviceSynchronize());
MIGRAPHX_HIP_ASSERT(
hipMemsetAsync(output_buffer_ptr, 0, copy_bytes, ctx.get_queue<hipStream_t>()));
MIGRAPHX_HIP_ASSERT(hipDeviceSynchronize());
MIGRAPHX_HIP_ASSERT(hipHostFree(h_output));
return inputs[1];
}
virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override
{
if(not inputs[0].standard())
if(not inputs[0].standard() or not inputs[1].standard())
{
throw std::runtime_error("first arg must be standard shaped");
throw std::runtime_error("Input args must be standard shaped");
}
if(inputs.size() != 2)
{
......@@ -67,36 +118,209 @@ struct simple_custom_op final : migraphx::experimental_custom_op_base
}
};
TEST_CASE(run_simple_custom_op)
// overwrites input buffer
struct half_copy_device_same_buffer final : migraphx::experimental_custom_op_base
{
simple_custom_op simple_op;
migraphx::register_experimental_custom_op(simple_op);
virtual std::string name() const override { return "half_copy_device_same_buffer"; }
virtual bool runs_on_offload_target() const override { return true; }
virtual migraphx::argument
compute(migraphx::context ctx, migraphx::shape, migraphx::arguments inputs) const override
{
// This custom op simply sets first half size_bytes of the input 0, and rest of the half
// bytes are copied. for this custom_op, it does its computation on the "device". Therefore,
// `runs_on_offload_target()` is set to "true"
auto* buffer_ptr = inputs[0].data();
auto input_bytes = inputs[0].get_shape().bytes();
auto copy_bytes = input_bytes / 2;
MIGRAPHX_HIP_ASSERT(hipSetDevice(0));
MIGRAPHX_HIP_ASSERT(
hipMemsetAsync(buffer_ptr, 0, copy_bytes, ctx.get_queue<hipStream_t>()));
MIGRAPHX_HIP_ASSERT(hipDeviceSynchronize());
return inputs[0];
}
virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override
{
if(not inputs[0].standard())
{
throw std::runtime_error("Input arg must be standard shaped");
}
return inputs.front();
}
};
TEST_CASE(register_half_copy_op)
{
half_copy_host hch;
migraphx::register_experimental_custom_op(hch);
auto op = migraphx::operation("half_copy_host");
EXPECT(op.name() == "half_copy_host");
half_copy_device hcd;
migraphx::register_experimental_custom_op(hcd);
op = migraphx::operation("half_copy_device");
EXPECT(op.name() == "half_copy_device");
half_copy_device_same_buffer hcdsb;
migraphx::register_experimental_custom_op(hcdsb);
op = migraphx::operation("half_copy_device_same_buffer");
EXPECT(op.name() == "half_copy_device_same_buffer");
}
TEST_CASE(half_copy_custom_op_test)
{
auto run_test_prog = [](const std::string& op_name, bool buffer_alloc) {
migraphx::program p;
migraphx::module m = p.get_main_module();
migraphx::shape s{migraphx_shape_float_type, {4, 3}};
auto x = m.add_parameter("x", s);
migraphx::instructions inputs = {x};
if(buffer_alloc)
{
auto alloc = m.add_allocation(s);
inputs = {x, alloc};
}
auto half_copy_ins = m.add_instruction(migraphx::operation(op_name.c_str()), inputs);
m.add_return({half_copy_ins});
migraphx::compile_options options;
options.set_offload_copy();
p.compile(migraphx::target("gpu"), options);
migraphx::program_parameters pp;
std::vector<float> x_data(12);
std::iota(x_data.begin(), x_data.end(), 0);
pp.add("x", migraphx::argument(s, x_data.data()));
auto results = p.eval(pp);
auto result = results[0];
auto result_vec = result.as_vector<float>();
std::vector<float> expected_result(12, 0);
std::iota(expected_result.begin() + 6, expected_result.end(), 6);
EXPECT(bool{result == migraphx::argument(s, expected_result.data())});
};
// register all the ops
half_copy_host hch;
migraphx::register_experimental_custom_op(hch);
half_copy_device hcd;
migraphx::register_experimental_custom_op(hcd);
half_copy_device_same_buffer hcdsb;
migraphx::register_experimental_custom_op(hcdsb);
std::vector<std::pair<std::string, bool>> tests_config = {
{"half_copy_host", true},
{"half_copy_device", true},
{"half_copy_device_same_buffer", false}};
for(const auto& i : tests_config)
{
run_test_prog(i.first, i.second);
}
}
struct stride_two final : migraphx::experimental_custom_op_base
{
virtual std::string name() const override { return "stride_two"; }
virtual migraphx::argument
compute(migraphx::context, migraphx::shape out_shape, migraphx::arguments inputs) const override
{
return {out_shape, inputs[0].data()};
}
virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override
{
if(inputs.size() != 1)
{
throw std::runtime_error("stride_two op must have only one input argument");
};
if(not inputs[0].standard())
{
throw std::runtime_error("stride_two op only works on the standard input shapes");
}
migraphx::shape input_s = inputs[0];
std::vector<size_t> dims = input_s.lengths();
std::vector<size_t> new_dims;
std::vector<size_t> strides = input_s.strides();
std::vector<size_t> new_strides;
std::for_each(dims.begin(), dims.end(), [&](auto i) { new_dims.push_back(i / 2); });
std::for_each(
strides.begin(), strides.end(), [&](auto i) { new_strides.push_back(i * 2); });
migraphx::shape output_shape{input_s.type(), new_dims, new_strides};
return output_shape;
}
virtual bool runs_on_offload_target() const override { return true; }
virtual std::vector<size_t> output_alias(migraphx::shapes) const override { return {0}; };
};
TEST_CASE(stride_two_custom_op_test)
{
stride_two st;
migraphx::register_experimental_custom_op(st);
migraphx::program p;
migraphx::module m = p.get_main_module();
migraphx::shape s{migraphx_shape_float_type, {4, 4, 4}};
auto x = m.add_parameter("x", s);
auto stride_two_ins = m.add_instruction(migraphx::operation("stride_two"), {x});
m.add_return({stride_two_ins});
migraphx::compile_options options;
options.set_offload_copy();
p.compile(migraphx::target("gpu"), options);
migraphx::program_parameters pp;
std::vector<float> x_data(64);
std::iota(x_data.begin(), x_data.end(), 0);
pp.add("x", migraphx::argument(s, x_data.data()));
auto results = p.eval(pp);
auto result = results[0];
auto result_vec = result.as_vector<float>();
std::vector<float> expected_result = {0, 2, 8, 10, 32, 34, 40, 42};
EXPECT(result_vec == expected_result);
}
TEST_CASE(custom_op_with_pre_and_post_subgraph_test)
{
half_copy_host hco;
migraphx::register_experimental_custom_op(hco);
stride_two st;
migraphx::register_experimental_custom_op(st);
migraphx::program p;
migraphx::shape s{migraphx_shape_int32_type, {4, 3}};
migraphx::shape trans_shape{migraphx_shape_int32_type, {3, 4}};
migraphx::shape s{migraphx_shape_float_type, {4, 6}};
migraphx::module m = p.get_main_module();
auto x = m.add_parameter("x", s);
auto neg = m.add_instruction(migraphx::operation("neg"), x);
auto alloc = m.add_allocation(trans_shape);
auto neg_trans =
m.add_instruction(migraphx::operation("transpose", "{permutation: [1, 0]}"), {neg});
auto neg_cont = m.add_instruction(migraphx::operation("contiguous"), {neg_trans});
auto custom_kernel =
m.add_instruction(migraphx::operation("simple_custom_op"), {neg_cont, alloc});
auto relu = m.add_instruction(migraphx::operation("relu"), custom_kernel);
m.add_return({relu});
// pre-subgraph
auto neg_ins = m.add_instruction(migraphx::operation("neg"), x);
auto trans_ins =
m.add_instruction(migraphx::operation("transpose", "{permutation: [1, 0]}"), {neg_ins});
auto cont_ins = m.add_instruction(migraphx::operation("contiguous"), {trans_ins});
// custom_op
migraphx::shape trans_shape{migraphx_shape_float_type, {6, 4}};
auto alloc = m.add_allocation(trans_shape);
auto half_copy_ins =
m.add_instruction(migraphx::operation("half_copy_host"), {cont_ins, alloc});
// post-subgraph
auto abs_ins = m.add_instruction(migraphx::operation("abs"), {half_copy_ins});
// another custom_op
auto stride_two_ins = m.add_instruction(migraphx::operation("stride_two"), {abs_ins});
// post-subgraph
auto relu_ins = m.add_instruction(migraphx::operation("relu"), {stride_two_ins});
m.add_return({relu_ins});
migraphx::compile_options options;
options.set_offload_copy();
p.compile(migraphx::target("gpu"), options);
migraphx::program_parameters pp;
std::vector<int> x_data(12, -3);
std::vector<float> x_data(s.elements());
std::iota(x_data.begin(), x_data.end(), 0);
pp.add("x", migraphx::argument(s, x_data.data()));
auto results = p.eval(pp);
auto result = results[0];
auto result_vec = result.as_vector<int>();
std::vector<int> expected_result(12, 0);
std::fill(expected_result.begin() + 6, expected_result.end(), 3);
EXPECT(bool{result == migraphx::argument(trans_shape, expected_result.data())});
auto results = p.eval(pp);
auto result = results[0];
auto result_vec = result.as_vector<float>();
std::vector<float> expected_result = {0, 0, 0, 0, 4, 16};
EXPECT(bool{result == migraphx::argument(migraphx::shape{migraphx_shape_float_type, {3, 2}},
expected_result.data())});
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -25,6 +25,8 @@
#include <hip/hip_runtime_api.h>
#include <migraphx/migraphx.h>
#include <migraphx/migraphx.hpp>
#include <migraphx/manage_ptr.hpp>
#include "test.hpp"
TEST_CASE(load_and_run)
......@@ -44,11 +46,67 @@ TEST_CASE(load_and_run)
{
pp.add(name, migraphx::argument::generate(param_shapes[name]));
}
auto outputs = p.eval(pp);
CHECK(shapes_before.size() == outputs.size());
CHECK(bool{shapes_before.front() == outputs.front().get_shape()});
}
using hip_ptr = MIGRAPHX_MANAGE_PTR(void, hipFree);
using stream_ptr = MIGRAPHX_MANAGE_PTR(hipStream_t, hipStreamDestroy);
stream_ptr get_stream()
{
hipStream_t stream;
auto err = hipStreamCreateWithFlags(&stream, 0);
EXPECT(err == hipSuccess);
return stream_ptr{stream};
}
hip_ptr get_hip_buffer(size_t size)
{
void* ptr;
auto err = hipMalloc(&ptr, size);
EXPECT(err == hipSuccess);
return hip_ptr{ptr};
}
TEST_CASE(load_and_run_async)
{
auto p = migraphx::parse_onnx("conv_relu_maxpool_test.onnx");
auto shapes_before = p.get_output_shapes();
migraphx::compile_options options;
options.set_offload_copy(false);
p.compile(migraphx::target("gpu"), options);
auto shapes_after = p.get_output_shapes();
CHECK(shapes_before.size() == 1);
CHECK(shapes_before.size() == shapes_after.size());
CHECK(bool{shapes_before.front() == shapes_after.front()});
migraphx::program_parameters pp;
auto param_shapes = p.get_parameter_shapes();
stream_ptr stream = get_stream();
std::vector<hip_ptr> buffs;
std::vector<migraphx::argument> args;
for(auto&& name : param_shapes.names())
{
args.push_back(migraphx::argument::generate(param_shapes[name]));
buffs.push_back(get_hip_buffer(args.rbegin()->get_shape().bytes()));
auto err = hipMemcpy(buffs.rbegin()->get(),
args.rbegin()->data(),
args.rbegin()->get_shape().bytes(),
hipMemcpyHostToDevice);
EXPECT(err == hipSuccess);
pp.add(name, migraphx::argument(args.rbegin()->get_shape(), buffs.rbegin()->get()));
}
auto outputs = p.run_async(pp, stream.get());
CHECK(shapes_before.size() == outputs.size());
CHECK(bool{shapes_before.front() == outputs.front().get_shape()});
}
TEST_CASE(load_and_run_ctx)
{
auto p = migraphx::parse_onnx("conv_relu_maxpool_test.onnx");
......@@ -82,10 +140,10 @@ TEST_CASE(if_pl_test)
migraphx::program_parameters pp;
auto param_shapes = p.get_parameter_shapes();
auto xs = param_shapes["x"];
std::vector<float> xd(xs.bytes() / sizeof(float), 1.0);
std::vector<float> xd(xs.elements(), 1.0);
pp.add("x", migraphx::argument(xs, xd.data()));
auto ys = param_shapes["y"];
std::vector<float> yd(ys.bytes() / sizeof(float), 2.0);
std::vector<float> yd(ys.elements(), 2.0);
pp.add("y", migraphx::argument(ys, yd.data()));
char ccond = cond;
pp.add("cond", migraphx::argument(param_shapes["cond"], &ccond));
......
......@@ -48,4 +48,4 @@ void gpu_literal_test()
}
}
int main() { gpu_literal_test(); }
int main() { gpu_literal_test(); } // NOLINT (bugprone-exception-escape)
......@@ -84,7 +84,7 @@ migraphx::program create_program_from_mlir(const migraphx::module& mmlir)
inputs.push_back(mm->add_parameter("output", mmlir.get_output_shapes().front()));
migraphx::gpu::context ctx;
migraphx::gpu::insert_mlir(*mm, mm->end(), compile_mlir(ctx, mmlir), inputs);
migraphx::gpu::insert_mlir(*mm, mm->end(), compile_mlir(ctx, mmlir, inputs), inputs);
return p;
}
......
......@@ -30,7 +30,6 @@
#include <migraphx/ref/target.hpp>
#include <migraphx/gpu/target.hpp>
#include <migraphx/verify.hpp>
#include <migraphx/quantization.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/propagate_constant.hpp>
#include <migraphx/pass_manager.hpp>
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <iostream>
#include <vector>
#include <migraphx/gpu/context.hpp>
#include <migraphx/context.hpp>
#include <migraphx/gpu/compile_hip.hpp>
#include <migraphx/gpu/kernel.hpp>
#include <migraphx/gpu/device_name.hpp>
#include <migraphx/par_for.hpp>
#include <migraphx/program.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/module.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/gpu/target.hpp>
#include "test.hpp"
using hip_stream_ptr = MIGRAPHX_MANAGE_PTR(hipStream_t, hipStreamDestroy);
constexpr uint32_t stream_sync_test_val = 1337;
// NOLINTNEXTLINE
const std::string compare_numbers = R"__migraphx__(
#include <hip/hip_runtime.h>
extern "C" {
__global__ void compare(float* data)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (data[i] != 1337)
{
abort();
}
}
}
int main() {}
)__migraphx__";
migraphx::src_file make_src_file(const std::string& name, const std::string& content)
{
return {name, std::make_pair(content.data(), content.data() + content.size())};
}
hip_stream_ptr get_stream()
{
hipStream_t stream;
auto status = hipStreamCreate(&stream);
if(status != hipSuccess)
{
MIGRAPHX_THROW("Failed to get stream");
}
return hip_stream_ptr{stream};
}
TEST_CASE(test_stream_sync_compare_kernel)
{
auto binaries = migraphx::gpu::compile_hip_src(
{make_src_file("check_stuff.cpp", compare_numbers)}, "", migraphx::gpu::get_device_name());
EXPECT(binaries.size() == 1);
migraphx::gpu::kernel k1{binaries.front(), "compare"};
auto input =
migraphx::fill_argument({migraphx::shape::float_type, {128}}, stream_sync_test_val);
auto ginput = migraphx::gpu::to_gpu(input);
hip_stream_ptr pstream = get_stream();
k1.launch(pstream.get(), input.get_shape().elements(), 1024)(ginput.cast<float>());
auto output = migraphx::gpu::from_gpu(ginput);
EXPECT(output == input);
}
TEST_CASE(test_stream_sync)
{
auto binaries = migraphx::gpu::compile_hip_src(
{make_src_file("check_stuff.cpp", compare_numbers)}, "", migraphx::gpu::get_device_name());
EXPECT(binaries.size() == 1);
migraphx::gpu::kernel k1{binaries.front(), "compare"};
const unsigned int m = 128;
const unsigned int k = 8192;
// Setup empty GPU memory buffer
migraphx::shape input_shape{migraphx::shape::float_type, {m, k}};
migraphx::shape output_shape{migraphx::shape::float_type, {m, m}};
auto input = migraphx::fill_argument(input_shape, 0);
auto ginput = migraphx::gpu::to_gpu(input);
auto output = migraphx::fill_argument(output_shape, 0);
auto goutput = migraphx::gpu::to_gpu(output);
hip_stream_ptr pstream = get_stream();
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {m, k}});
auto y = mm->add_literal(
migraphx::generate_literal(migraphx::shape{migraphx::shape::float_type, {k, m}}));
std::vector<float> data(m * m, stream_sync_test_val);
auto test_val = mm->add_literal(output_shape, data);
auto mult_out = mm->add_instruction(migraphx::make_op("dot"), x, y);
mm->add_instruction(migraphx::make_op("add"), mult_out, test_val);
p.compile(migraphx::gpu::target{});
// Run network and then verify with kernel
auto args = p.eval({{"x", ginput}, {"output", goutput}}, {pstream.get(), true});
k1.launch(pstream.get(), m * m, 1024)(goutput.cast<float>());
output = migraphx::gpu::from_gpu(goutput);
EXPECT(output != input);
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
batch_norm_invalid_rank_test:
7
batch_norm_rank_2_test:
J
x
scale
bias
mean
variancey"BatchNormalizationbatch_norm_invalid_rank_testZ
variancey"BatchNormalization*
epsilon75batch_norm_rank_2_testZ
x


Z

Z
scale

Z
Z
bias

Z
Z
mean

Z
Z
variance

b
b
y


B
\ No newline at end of file

B
\ No newline at end of file
......@@ -331,6 +331,24 @@ def batch_norm_flat_test():
return ([node], [x, scale, bias, mean, var], [out])
@onnx_test
def batch_norm_rank_2_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 5])
scale = helper.make_tensor_value_info('scale', TensorProto.FLOAT, [5])
bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [5])
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT, [5])
var = helper.make_tensor_value_info('variance', TensorProto.FLOAT, [5])
out = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 5])
node = onnx.helper.make_node(
'BatchNormalization',
inputs=['x', 'scale', 'bias', 'mean', 'variance'],
outputs=['y'],
epsilon=1e-6)
return ([node], [x, scale, bias, mean, var], [out])
@onnx_test
def batch_norm_1d_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16, [2, 3, 4])
......@@ -385,23 +403,6 @@ def batch_norm_3d_test():
return ([node], [x, scale, bias, mean, var], [out])
@onnx_test
def batch_norm_invalid_rank_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [8, 8])
scale = helper.make_tensor_value_info('scale', TensorProto.FLOAT, [8])
bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [8])
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT, [8])
var = helper.make_tensor_value_info('variance', TensorProto.FLOAT, [8])
out = helper.make_tensor_value_info('y', TensorProto.FLOAT, [8, 8])
node = onnx.helper.make_node(
'BatchNormalization',
inputs=['x', 'scale', 'bias', 'mean', 'variance'],
outputs=['y'])
return ([node], [x, scale, bias, mean, var], [out])
@onnx_test
def batch_norm_invalid_bias_rank_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3, 4, 4])
......@@ -419,6 +420,57 @@ def batch_norm_invalid_bias_rank_test():
return ([node], [x, scale, bias, mean, var], [out])
@onnx_test
def binary_dyn_brcst_prelu_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[None, 3, 4, 5])
arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 5])
arg_out = helper.make_tensor_value_info('out', TensorProto.FLOAT,
[None, 3, 4, 5])
node = onnx.helper.make_node(
'PRelu',
inputs=['0', '1'],
outputs=['out'],
)
return ([node], [arg0, arg1], [arg_out])
@onnx_test
def binary_dyn_brcst_add_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT16, [4, 5])
arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT,
[None, 3, 4, 5])
arg_out = helper.make_tensor_value_info('out', TensorProto.FLOAT,
[None, 3, 4, 5])
node = onnx.helper.make_node(
'Add',
inputs=['0', '1'],
outputs=['out'],
)
return ([node], [arg0, arg1], [arg_out])
@onnx_test
def binary_dyn_brcst_mul_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[None, 3, 4, 5])
arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 1])
arg_out = helper.make_tensor_value_info('out', TensorProto.FLOAT,
[None, 3, 4, 5])
node = onnx.helper.make_node(
'Mul',
inputs=['0', '1'],
outputs=['out'],
)
return ([node], [arg0, arg1], [arg_out])
@onnx_test
def cast_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16, [10])
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment