Commit 4dc2f1a1 authored by Paul's avatar Paul
Browse files

Merge

parents f774299b 6ac586a9
......@@ -164,6 +164,10 @@ struct module
instruction_ref replace_return(std::vector<instruction_ref> args);
instruction_ref insert_literal(instruction_ref ins, literal l);
instruction_ref insert_parameter(instruction_ref ins, std::string name, shape s);
std::vector<std::string> get_parameter_names() const;
shape get_parameter_shape(std::string name) const;
......
......@@ -439,11 +439,7 @@ module::insert_instructions(instruction_ref ins,
return insert_generic_instructions(*this, ins, iterator_for(r), std::move(map_ins));
}
instruction_ref module::add_literal(literal l)
{
impl->emplace_front(std::move(l));
return impl->instructions.begin();
}
instruction_ref module::add_literal(literal l) { return insert_literal(begin(), std::move(l)); }
instruction_ref module::add_outline(const shape& s)
{
......@@ -453,10 +449,7 @@ instruction_ref module::add_outline(const shape& s)
instruction_ref module::add_parameter(std::string name, shape s)
{
assert(get_parameter_shape(name) == shape{});
impl->push_front({builtin::param{std::move(name), impl->nparams}, std::move(s), {}});
impl->nparams++;
return impl->instructions.begin();
return insert_parameter(begin(), std::move(name), std::move(s));
}
instruction_ref module::add_return(std::vector<instruction_ref> args)
......@@ -469,6 +462,20 @@ instruction_ref module::add_return(std::vector<instruction_ref> args)
return result;
}
instruction_ref module::insert_literal(instruction_ref ins, literal l)
{
impl->emplace(ins, std::move(l));
return std::prev(ins);
}
instruction_ref module::insert_parameter(instruction_ref ins, std::string name, shape s)
{
assert(get_parameter_shape(name) == shape{});
impl->insert(ins, {builtin::param{std::move(name), impl->nparams}, std::move(s), {}});
impl->nparams++;
return std::prev(ins);
}
instruction_ref module::replace_return(std::vector<instruction_ref> args)
{
auto last = std::prev(this->end());
......
......@@ -504,12 +504,14 @@ static void mod_from_val(module_ref mod,
if(name == "@param")
{
output = mod->add_parameter(fields["parameter"].to<std::string>(),
output = mod->insert_parameter(mod->end(),
fields["parameter"].to<std::string>(),
migraphx::from_value<shape>(node.at("shape")));
}
else if(name == "@literal")
{
output = mod->add_literal(migraphx::from_value<literal>(node.at("literal")));
output =
mod->insert_literal(mod->end(), migraphx::from_value<literal>(node.at("literal")));
}
else
{
......@@ -544,11 +546,11 @@ static void mod_from_val(module_ref mod,
}
else if(module_inputs.empty())
{
output = mod->add_instruction(op, inputs);
output = mod->insert_instruction(mod->end(), op, inputs);
}
else
{
output = mod->add_instruction(op, inputs, module_inputs);
output = mod->insert_instruction(mod->end(), op, inputs, module_inputs);
}
}
output->set_normalized(normalized);
......
......@@ -36,7 +36,7 @@ void raw_data_to_value(value& v, const RawData& rd)
result["shape"] = migraphx::to_value(rd.get_shape());
if(rd.get_shape().type() == shape::tuple_type)
result["sub"] = migraphx::to_value(rd.get_sub_objects());
else
else if(not rd.empty())
result["data"] = migraphx::value::binary(rd.data(), rd.get_shape().bytes());
v = result;
}
......@@ -56,7 +56,7 @@ void migraphx_from_value(const value& v, argument& a)
literal l = migraphx::from_value<literal>(v);
a = l.get_argument();
}
else
else if(v.contains("sub"))
{
a = migraphx::from_value<std::vector<argument>>(v.at("sub"));
}
......
......@@ -25,6 +25,7 @@
#include <migraphx/module.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/register_op.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
......@@ -52,6 +53,7 @@ struct cpu_literal
return os;
}
};
MIGRAPHX_REGISTER_OP(cpu_literal);
void write_literals::apply(module& m) const
{
......
......@@ -59,31 +59,30 @@ argument miopen_deconvolution::compute(context& ctx,
auto w_desc = make_tensor(reshape_if_1d(args[1].get_shape()));
auto y_desc = make_tensor(reshape_if_1d(output_shape));
float alpha = 1;
float beta = 0;
auto status = miopenConvolutionForward(ctx.get_stream().get_miopen(),
&alpha,
x_desc.get(),
args[0].implicit(),
if(solution_id == 0)
MIGRAPHX_THROW("MIOpen Deconvolution: invalid solution ID");
auto status = miopenConvolutionForwardImmediate(ctx.get_stream().get_miopen(),
w_desc.get(),
args[1].implicit(),
x_desc.get(),
args[0].implicit(),
cd.get(),
algo,
&beta,
y_desc.get(),
args[3].implicit(),
args[2].implicit(),
args[2].get_shape().bytes());
args[2].get_shape().bytes(),
solution_id);
if(status != miopenStatusSuccess)
MIGRAPHX_THROW("Running deconvolution failed");
MIGRAPHX_THROW("MIOpen Deconvolution: running convolution failed");
return args[3];
}
shape miopen_deconvolution::compile(context& ctx,
const shape& output_shape,
std::vector<shape> inputs)
shape miopen_deconvolution::find(context& ctx, const shape& output_shape, std::vector<shape> inputs)
{
shape workspace_shape{};
auto x_desc = make_tensor(reshape_if_1d(inputs[0]));
auto w_desc = make_tensor(reshape_if_1d(inputs[1]));
auto y_desc = make_tensor(reshape_if_1d(output_shape));
......@@ -119,9 +118,35 @@ shape miopen_deconvolution::compile(context& ctx,
workspace_size,
false);
if(status != miopenStatusSuccess)
MIGRAPHX_THROW("Find deconvolution failed");
handle = ctx.get_stream().get_miopen();
MIGRAPHX_THROW("MIOpen Deconvolution: find convolution failed");
algo = perf.fwd_algo;
size_t solution_count;
status = miopenConvolutionForwardGetSolutionCount(ctx.get_stream().get_miopen(),
w_desc.get(),
x_desc.get(),
cd.get(),
y_desc.get(),
&solution_count);
if(status != miopenStatusSuccess)
MIGRAPHX_THROW("MIOpen Deconvolution: get solution count failed");
std::vector<miopenConvSolution_t> solutions(solution_count);
status = miopenConvolutionForwardGetSolution(ctx.get_stream().get_miopen(),
w_desc.get(),
x_desc.get(),
cd.get(),
y_desc.get(),
solution_count,
&solution_count,
solutions.data());
if(status != miopenStatusSuccess)
MIGRAPHX_THROW("MIOpen Deconvolution: get solution failed");
solution_id = solutions.front().solution_id;
return shape{shape::int8_type, {perf.memory}};
}
......@@ -129,13 +154,29 @@ void miopen_deconvolution::finalize(context& ctx,
const shape& output_shape,
std::vector<shape> inputs)
{
if(handle == ctx.get_stream().get_miopen())
return;
if(cd == nullptr)
cd = make_deconv(op);
if(solution_id == 0)
{
// Check that workspace hasn't changed
auto size = inputs.at(2).bytes();
auto ws = compile(ctx, output_shape, std::move(inputs));
auto ws = find(ctx, output_shape, inputs);
if(ws.bytes() > size)
MIGRAPHX_THROW("Workspace has changed during finalization.");
MIGRAPHX_THROW("MIOpen Deconvolution: workspace has changed during finalization.");
}
auto x_desc = make_tensor(reshape_if_1d(inputs[0]));
auto w_desc = make_tensor(reshape_if_1d(inputs[1]));
auto y_desc = make_tensor(reshape_if_1d(output_shape));
auto status = miopenConvolutionForwardCompileSolution(ctx.get_stream().get_miopen(),
w_desc.get(),
x_desc.get(),
cd.get(),
y_desc.get(),
solution_id);
if(status != miopenStatusSuccess)
MIGRAPHX_THROW("MIOpen Deconvolution: compile solution failed");
}
} // namespace gpu
......
......@@ -39,20 +39,20 @@ struct miopen_deconvolution
op::deconvolution op;
shared<convolution_descriptor> cd;
miopenConvFwdAlgorithm_t algo{};
miopenHandle_t handle = nullptr;
uint64_t solution_id = 0;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
// TODO: Add algo
return op::convolution::reflect(self.op, f);
return pack_join(op::deconvolution::reflect(self.op, f),
pack(f(self.solution_id, "solution_id")));
}
std::string name() const { return "gpu::deconv"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const;
shape compile(context& ctx, const shape& output_shape, std::vector<shape> inputs);
shape find(context& ctx, const shape& output_shape, std::vector<shape> inputs);
void finalize(context& ctx, const shape& output_shape, std::vector<shape> inputs);
std::ptrdiff_t output_alias(const std::vector<shape>& shapes) const
{
......
......@@ -41,7 +41,7 @@ struct miopen_quant_convolution
bool int8_x4_format = false;
shared<convolution_descriptor> cd;
miopenConvFwdAlgorithm_t algo{};
miopenHandle_t handle = nullptr;
uint64_t solution_id = 0;
template <class Self, class F>
static auto reflect(Self& self, F f)
......@@ -55,7 +55,7 @@ struct miopen_quant_convolution
shape compute_shape(const std::vector<shape>& inputs) const;
argument
compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const;
shape compile(context& ctx, const shape& output_shape, std::vector<shape> inputs);
shape find(context& ctx, const shape& output_shape, std::vector<shape> inputs);
void finalize(context& ctx, const shape& output_shape, std::vector<shape> inputs);
std::ptrdiff_t output_alias(const std::vector<shape>& shapes) const
{
......
......@@ -300,7 +300,7 @@ struct miopen_apply
auto&& op = any_cast<op::deconvolution>(ins->get_operator());
auto conv = miopen_deconvolution{op, make_deconv(op)};
auto ws = conv.compile(get_context(), ins->get_shape(), to_shapes(ins->inputs()));
auto ws = conv.find(get_context(), ins->get_shape(), to_shapes(ins->inputs()));
auto workspace = insert_allocation(ins, ws);
auto output = insert_allocation(ins, ins->get_shape());
......@@ -331,7 +331,7 @@ struct miopen_apply
miopen_quant_convolution conv;
auto compile_quant_conv_with_format = [&](bool format) {
conv = miopen_quant_convolution{op, format, make_conv(op)};
ws = conv.compile(get_context(), ins->get_shape(), to_shapes(ins->inputs()));
ws = conv.find(get_context(), ins->get_shape(), to_shapes(ins->inputs()));
};
try
......
......@@ -67,7 +67,7 @@ argument miopen_quant_convolution::compute(context& ctx,
return args[3];
}
shape miopen_quant_convolution::compile(context& ctx,
shape miopen_quant_convolution::find(context& ctx,
const shape& output_shape,
std::vector<shape> inputs)
{
......@@ -92,8 +92,8 @@ shape miopen_quant_convolution::compile(context& ctx,
x_shape = pack_int8_shape(x_shape);
w_shape = pack_int8_shape(w_shape);
}
auto arg_vec4_x = to_gpu(generate_argument(x_shape));
auto arg_vec4_w = to_gpu(generate_argument(w_shape));
auto x = to_gpu(generate_argument(x_shape));
auto w = to_gpu(generate_argument(w_shape));
auto y = allocate_gpu(output_shape);
auto workspace = allocate_gpu(workspace_shape);
......@@ -101,9 +101,9 @@ shape miopen_quant_convolution::compile(context& ctx,
miopenConvAlgoPerf_t perf;
auto status = miopenFindConvolutionForwardAlgorithm(ctx.get_stream().get_miopen(),
x_desc.get(),
arg_vec4_x.implicit(),
x.implicit(),
w_desc.get(),
arg_vec4_w.implicit(),
w.implicit(),
cd.get(),
y_desc.get(),
y.implicit(),
......@@ -114,11 +114,35 @@ shape miopen_quant_convolution::compile(context& ctx,
workspace_size,
false);
if(status != miopenStatusSuccess)
{
MIGRAPHX_THROW("QUANT_CONVOLUTION: find convolution failed");
}
handle = ctx.get_stream().get_miopen();
MIGRAPHX_THROW("MIOpen Quant Convolution: find convolution failed");
algo = perf.fwd_algo;
size_t solution_count;
status = miopenConvolutionForwardGetSolutionCount(ctx.get_stream().get_miopen(),
w_desc.get(),
x_desc.get(),
cd.get(),
y_desc.get(),
&solution_count);
if(status != miopenStatusSuccess)
MIGRAPHX_THROW("MIOpen Quant Convolution: get solution count failed");
std::vector<miopenConvSolution_t> solutions(solution_count);
status = miopenConvolutionForwardGetSolution(ctx.get_stream().get_miopen(),
w_desc.get(),
x_desc.get(),
cd.get(),
y_desc.get(),
solution_count,
&solution_count,
solutions.data());
if(status != miopenStatusSuccess)
MIGRAPHX_THROW("MIOpen Quant Convolution: get solution failed");
solution_id = solutions.front().solution_id;
return shape{shape::int8_type, {perf.memory}};
}
......@@ -126,13 +150,29 @@ void miopen_quant_convolution::finalize(context& ctx,
const shape& output_shape,
std::vector<shape> inputs)
{
if(handle == ctx.get_stream().get_miopen())
return;
if(cd == nullptr)
cd = make_conv(op);
if(solution_id == 0)
{
// Check that workspace hasn't changed
auto size = inputs.at(2).bytes();
auto ws = compile(ctx, output_shape, std::move(inputs));
auto ws = find(ctx, output_shape, inputs);
if(ws.bytes() > size)
MIGRAPHX_THROW("Workspace has changed during finalization.");
MIGRAPHX_THROW("MIOpen Quant Convolution: workspace has changed during finalization.");
}
auto x_desc = make_tensor(inputs[0], int8_x4_format);
auto w_desc = make_tensor(inputs[1], int8_x4_format);
auto y_desc = make_tensor(output_shape);
auto status = miopenConvolutionForwardCompileSolution(ctx.get_stream().get_miopen(),
w_desc.get(),
x_desc.get(),
cd.get(),
y_desc.get(),
solution_id);
if(status != miopenStatusSuccess)
MIGRAPHX_THROW("MIOpen Quant Convolution: compile solution failed");
}
shape miopen_quant_convolution::pack_int8_shape(const shape& s) const
......
......@@ -1231,12 +1231,12 @@ TEST_CASE(transpose_slice_single_transpose)
auto sqrt1 = m1.add_instruction(migraphx::make_op("sqrt"), slice1);
auto slice2 = m1.add_instruction(
migraphx::make_op("slice", {{"axes", {2}}, {"starts", {12}}, {"ends", {24}}}), x);
auto transpose2 = m1.add_instruction(
auto transpose = m1.add_instruction(
migraphx::make_op("transpose", {{"permutation", {0, 2, 1, 3}}}), slice2);
auto slice3 = m1.add_instruction(
migraphx::make_op("slice", {{"axes", {2}}, {"starts", {24}}, {"ends", {36}}}), x);
auto sqrt3 = m1.add_instruction(migraphx::make_op("sqrt"), slice3);
m1.add_return({sqrt1, transpose2, sqrt3});
m1.add_return({sqrt1, transpose, sqrt3});
}
migraphx::module m2 = m1;
run_pass(m1);
......
......@@ -30,6 +30,7 @@
#include <migraphx/ranges.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/load_save.hpp>
#include <migraphx/tmp_dir.hpp>
#include <migraphx/verify_args.hpp>
#include <set>
......@@ -57,6 +58,15 @@ std::future<typename std::result_of<Function()>::type> detach_async(Function&& f
return std::async(std::launch::deferred, std::forward<Function>(f));
}
inline void verify_load_save(const migraphx::program& p)
{
migraphx::tmp_dir td{"migraphx_test"};
auto path = td.path / "test.mxr";
migraphx::save(p, path.string());
auto loaded = migraphx::load(path.string());
EXPECT(p == loaded);
}
inline void compile_check(migraphx::program& p, const migraphx::target& t, bool show_trace = false)
{
auto name = t.name();
......@@ -82,6 +92,8 @@ inline void compile_check(migraphx::program& p, const migraphx::target& t, bool
throw std::runtime_error("Compiling program with " + name + " alters its shape");
}
}
if(t.name() != "ref")
verify_load_save(p);
}
target_info run_verify::get_target_info(const std::string& name) const
......@@ -152,6 +164,7 @@ void run_verify::verify(const std::string& name, const migraphx::program& p) con
auto_print::set_terminate_handler(name);
if(migraphx::enabled(MIGRAPHX_DUMP_TEST{}))
migraphx::save(p, name + ".mxr");
verify_load_save(p);
std::vector<std::string> target_names;
for(const auto& tname : migraphx::get_targets())
{
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/instruction.hpp>
struct test_conv_add_relu : verify_program<test_conv_add_relu>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input =
mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}});
auto weights =
mm->add_parameter("w", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}});
auto bias_literal = migraphx::literal{migraphx::shape{migraphx::shape::float_type, {4}},
{2.0f, 2.0f, 2.0f, 2.0f}};
auto bias = mm->add_literal(bias_literal);
auto conv = mm->add_instruction(migraphx::make_op("convolution"), input, weights);
auto bcast_bias = mm->add_instruction(
migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", conv->get_shape().lens()}}),
bias);
auto bias_add = mm->add_instruction(migraphx::make_op("add"), conv, bcast_bias);
mm->add_instruction(migraphx::make_op("relu"), bias_add);
return p;
}
};
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment