Unverified Commit 63c5582a authored by Paul Fultz II's avatar Paul Fultz II Committed by GitHub
Browse files

Add load/save function for program (#623)



* Add save/load functions

* Formatting

* Add loading and saving to the driver

* Formatting

* Add return

* Serialize the context with the program

* Formatting

* Add python API

* Formatting

* Add c/c++ apis

* Formatting

* Add tests

* Formatting

* Fix tidy error

* Fix python doc

* Restore python code

* Add function name to errors

* Formatting

* Use lvalue for writing

* Serialize context

* Fix convolution and pooling operator for miopen

* Formatting

* Add const ref

* Set target name to gpu

* Add target tests

* Formatting

* Move register target to cpp file

* Fix target test

* Use make_target in driver

* Formatting

* Use make_target for the API

* Formatting

* Add cpu include

* Increase timeout

* Add more tests

* Formatting
Co-authored-by: default avatarShucai Xiao <shucai.xiao@amd.com>
Co-authored-by: default avatarmvermeulen <5479696+mvermeulen@users.noreply.github.com>
parent e67aa78c
...@@ -145,11 +145,15 @@ std::vector<char> to_msgpack(const value& v) ...@@ -145,11 +145,15 @@ std::vector<char> to_msgpack(const value& v)
msgpack::pack(vs, v); msgpack::pack(vs, v);
return vs.buffer; return vs.buffer;
} }
value from_msgpack(const std::vector<char>& buffer) value from_msgpack(const char* buffer, std::size_t size)
{ {
msgpack::object_handle oh = msgpack::unpack(buffer.data(), buffer.size()); msgpack::object_handle oh = msgpack::unpack(buffer, size);
return oh.get().as<value>(); return oh.get().as<value>();
} }
value from_msgpack(const std::vector<char>& buffer)
{
return from_msgpack(buffer.data(), buffer.size());
}
} // namespace MIGRAPHX_INLINE_NS } // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx } // namespace migraphx
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
#include <migraphx/time.hpp> #include <migraphx/time.hpp>
#include <migraphx/iterator_for.hpp> #include <migraphx/iterator_for.hpp>
#include <migraphx/pass_manager.hpp> #include <migraphx/pass_manager.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/register_target.hpp>
#include <iostream> #include <iostream>
#include <sstream> #include <sstream>
#include <algorithm> #include <algorithm>
...@@ -24,6 +26,7 @@ struct program_impl ...@@ -24,6 +26,7 @@ struct program_impl
std::list<instruction> instructions; std::list<instruction> instructions;
std::vector<std::string> input_names; std::vector<std::string> input_names;
context ctx; context ctx;
std::string target_name;
}; };
const operation& get_operation(instruction_ref ins) { return ins->get_operator(); } const operation& get_operation(instruction_ref ins) { return ins->get_operator(); }
...@@ -416,10 +419,14 @@ instruction_ref program::validate() const ...@@ -416,10 +419,14 @@ instruction_ref program::validate() const
[&](const instruction& i) { return !i.valid(impl->instructions.begin()); }); [&](const instruction& i) { return !i.valid(impl->instructions.begin()); });
} }
bool program::is_compiled() const { return not this->impl->target_name.empty(); }
void program::compile(const target& t, compile_options options) void program::compile(const target& t, compile_options options)
{ {
assert(this->validate() == impl->instructions.end()); assert(this->validate() == impl->instructions.end());
this->impl->ctx = t.get_context(); assert(not this->is_compiled());
this->impl->target_name = t.name();
this->impl->ctx = t.get_context();
if(enabled(MIGRAPHX_TRACE_COMPILE{})) if(enabled(MIGRAPHX_TRACE_COMPILE{}))
options.trace = tracer{std::cout}; options.trace = tracer{std::cout};
options.trace(*this); options.trace(*this);
...@@ -546,6 +553,81 @@ std::vector<argument> program::eval(parameter_map params) const ...@@ -546,6 +553,81 @@ std::vector<argument> program::eval(parameter_map params) const
} }
} }
const int program_file_version = 1;
value program::to_value() const
{
value result;
result["version"] = program_file_version;
result["target"] = this->impl->target_name;
if(not this->impl->target_name.empty())
result["context"] = this->impl->ctx.to_value();
value nodes;
print_program(*this, [&](auto ins, const auto& names) {
value node;
node["output"] = names.at(ins);
node["name"] = ins->name();
node["shape"] = migraphx::to_value(ins->get_shape());
if(ins->name() == "@literal")
node["literal"] = migraphx::to_value(ins->get_literal());
node["operator"] = ins->get_operator().to_value();
std::vector<std::string> inputs;
std::transform(ins->inputs().begin(),
ins->inputs().end(),
std::back_inserter(inputs),
[&](auto i) { return names.at(i); });
node["inputs"] = inputs;
nodes.push_back(node);
});
result["nodes"] = nodes;
return result;
}
void program::from_value(const value& v)
{
auto version = v.at("version").to<int>();
if(version != program_file_version)
std::cout << "Warning: Version mismatch" << std::endl;
this->impl->target_name = v.at("target").to<std::string>();
if(not this->impl->target_name.empty())
{
target t = make_target(this->impl->target_name);
this->impl->ctx = t.get_context();
this->impl->ctx.from_value(v.at("context"));
}
std::unordered_map<std::string, instruction_ref> instructions;
for(const value& node : v.at("nodes"))
{
instruction_ref output;
auto name = node.at("name").to<std::string>();
auto fields = node.at("operator");
if(name == "@param")
{
output = this->add_parameter(fields["parameter"].to<std::string>(),
migraphx::from_value<shape>(node.at("shape")));
}
else if(name == "@literal")
{
output = this->add_literal(migraphx::from_value<literal>(node.at("literal")));
}
else
{
auto op = make_op(name, fields);
std::vector<instruction_ref> inputs;
std::transform(node.at("inputs").begin(),
node.at("inputs").end(),
std::back_inserter(inputs),
[&](const value& i) { return instructions[i.to<std::string>()]; });
if(name == "@return")
output = this->add_return(inputs);
else
output = this->add_instruction(op, inputs);
}
instructions[node.at("output").to<std::string>()] = output;
}
this->finalize();
}
double common_average(const std::vector<double>& v) double common_average(const std::vector<double>& v)
{ {
std::size_t n = v.size() / 4; std::size_t n = v.size() / 4;
......
...@@ -10,9 +10,10 @@ ...@@ -10,9 +10,10 @@
#include <migraphx/tf.hpp> #include <migraphx/tf.hpp>
#include <migraphx/onnx.hpp> #include <migraphx/onnx.hpp>
#include <migraphx/type_name.hpp> #include <migraphx/type_name.hpp>
#include <migraphx/load_save.hpp>
#include <migraphx/register_target.hpp>
#ifdef HAVE_GPU #ifdef HAVE_GPU
#include <migraphx/gpu/target.hpp>
#include <migraphx/gpu/hip.hpp> #include <migraphx/gpu/hip.hpp>
#endif #endif
...@@ -175,6 +176,7 @@ PYBIND11_MODULE(migraphx, m) ...@@ -175,6 +176,7 @@ PYBIND11_MODULE(migraphx, m)
py::arg("t"), py::arg("t"),
py::arg("offload_copy") = true) py::arg("offload_copy") = true)
.def("run", &migraphx::program::eval) .def("run", &migraphx::program::eval)
.def("sort", &migraphx::program::sort)
.def("__eq__", std::equal_to<migraphx::program>{}) .def("__eq__", std::equal_to<migraphx::program>{})
.def("__ne__", std::not_equal_to<migraphx::program>{}) .def("__ne__", std::not_equal_to<migraphx::program>{})
.def("__repr__", [](const migraphx::program& p) { return migraphx::to_string(p); }); .def("__repr__", [](const migraphx::program& p) { return migraphx::to_string(p); });
...@@ -228,16 +230,28 @@ PYBIND11_MODULE(migraphx, m) ...@@ -228,16 +230,28 @@ PYBIND11_MODULE(migraphx, m)
py::arg("skip_unknown_operators") = false, py::arg("skip_unknown_operators") = false,
py::arg("print_program_on_error") = false); py::arg("print_program_on_error") = false);
m.def("get_target", [](const std::string& name) -> migraphx::target { m.def("load",
if(name == "cpu") [](const std::string& name, const std::string& format) {
return migraphx::cpu::target{}; migraphx::file_options options;
#ifdef HAVE_GPU options.format = format;
if(name == "gpu") return migraphx::load(name, options);
return migraphx::gpu::target{}; },
#endif "Load MIGraphX program",
throw std::runtime_error("Target not found: " + name); py::arg("filename"),
}); py::arg("format") = "msgpack");
m.def("save",
[](const migraphx::program& p, const std::string& name, const std::string& format) {
migraphx::file_options options;
options.format = format;
return migraphx::save(p, name, options);
},
"Save MIGraphX program",
py::arg("p"),
py::arg("filename"),
py::arg("format") = "msgpack");
m.def("get_target", &migraphx::make_target);
m.def("generate_argument", &migraphx::generate_argument, py::arg("s"), py::arg("seed") = 0); m.def("generate_argument", &migraphx::generate_argument, py::arg("s"), py::arg("seed") = 0);
m.def("quantize_fp16", m.def("quantize_fp16",
&migraphx::quantize_fp16, &migraphx::quantize_fp16,
......
#include <migraphx/register_target.hpp>
#include <unordered_map>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
std::unordered_map<std::string, target>& target_map()
{
static std::unordered_map<std::string, target> m;
return m;
}
void register_target(const target& t) { target_map()[t.name()] = t; }
target make_target(const std::string& name) { return target_map().at(name); }
std::vector<std::string> get_targets()
{
std::vector<std::string> result;
std::transform(target_map().begin(),
target_map().end(),
std::back_inserter(result),
[&](auto&& p) { return p.first; });
std::sort(result.begin(), result.end());
return result;
}
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
...@@ -11,18 +11,15 @@ void raw_data_to_value(value& v, const RawData& rd) ...@@ -11,18 +11,15 @@ void raw_data_to_value(value& v, const RawData& rd)
{ {
value result; value result;
result["shape"] = migraphx::to_value(rd.get_shape()); result["shape"] = migraphx::to_value(rd.get_shape());
rd.visit([&](auto x) { result["data"] = std::vector<value>(x.begin(), x.end()); }); result["data"] = std::string(rd.data(), rd.data() + rd.get_shape().bytes());
v = result; v = result;
} }
void migraphx_to_value(value& v, const literal& l) { raw_data_to_value(v, l); } void migraphx_to_value(value& v, const literal& l) { raw_data_to_value(v, l); }
void migraphx_from_value(const value& v, literal& l) void migraphx_from_value(const value& v, literal& l)
{ {
auto s = migraphx::from_value<shape>(v.at("shape")); auto s = migraphx::from_value<shape>(v.at("shape"));
s.visit_type([&](auto as) { l = literal(s, v.at("data").get_string().data());
using type = typename decltype(as)::type;
l = literal{s, v.at("data").to_vector<type>()};
});
} }
void migraphx_to_value(value& v, const argument& a) { raw_data_to_value(v, a); } void migraphx_to_value(value& v, const argument& a) { raw_data_to_value(v, a); }
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#define MIGRAPHX_GUARD_MIGRAPHLIB_CPU_TARGET_HPP #define MIGRAPHX_GUARD_MIGRAPHLIB_CPU_TARGET_HPP
#include <migraphx/program.hpp> #include <migraphx/program.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/compile_options.hpp> #include <migraphx/compile_options.hpp>
#include <migraphx/cpu/context.hpp> #include <migraphx/cpu/context.hpp>
#include <migraphx/config.hpp> #include <migraphx/config.hpp>
...@@ -22,6 +23,8 @@ struct target ...@@ -22,6 +23,8 @@ struct target
argument allocate(const shape& s) const; argument allocate(const shape& s) const;
}; };
MIGRAPHX_REGISTER_TARGET(target);
} // namespace cpu } // namespace cpu
} // namespace MIGRAPHX_INLINE_NS } // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx } // namespace migraphx
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#include <migraphx/cpu/lowering.hpp> #include <migraphx/cpu/lowering.hpp>
#include <migraphx/instruction.hpp> #include <migraphx/instruction.hpp>
#include <migraphx/dfor.hpp> #include <migraphx/dfor.hpp>
#include <migraphx/op/identity.hpp>
#include <migraphx/op/batch_norm_inference.hpp> #include <migraphx/op/batch_norm_inference.hpp>
#include <migraphx/op/convolution.hpp> #include <migraphx/op/convolution.hpp>
#include <migraphx/op/deconvolution.hpp> #include <migraphx/op/deconvolution.hpp>
...@@ -25,6 +26,7 @@ ...@@ -25,6 +26,7 @@
#include <migraphx/clamp.hpp> #include <migraphx/clamp.hpp>
#include <migraphx/cpu/gemm.hpp> #include <migraphx/cpu/gemm.hpp>
#include <migraphx/register_op.hpp> #include <migraphx/register_op.hpp>
#include <migraphx/make_op.hpp>
#include <unordered_map> #include <unordered_map>
#include <utility> #include <utility>
#include <iostream> #include <iostream>
...@@ -520,7 +522,7 @@ struct cpu_pooling : auto_register_op<cpu_pooling<Op>> ...@@ -520,7 +522,7 @@ struct cpu_pooling : auto_register_op<cpu_pooling<Op>>
struct cpu_op struct cpu_op
{ {
operation op; operation op = op::identity{};
template <class Self, class F> template <class Self, class F>
static auto reflect(Self& self, F f) static auto reflect(Self& self, F f)
{ {
...@@ -532,6 +534,17 @@ struct cpu_op ...@@ -532,6 +534,17 @@ struct cpu_op
{ {
return op.compute(output_shape, args); return op.compute(output_shape, args);
} }
value to_value() const
{
value v;
v["name"] = op.name();
v["operator"] = op.to_value();
return v;
}
void from_value(const value& v)
{
op = make_op(v.at("name").to<std::string>(), v.at("operator"));
}
friend std::ostream& operator<<(std::ostream& os, const cpu_op& x) friend std::ostream& operator<<(std::ostream& os, const cpu_op& x)
{ {
os << "cpu::" << x.op; os << "cpu::" << x.op;
......
#include <migraphx/cpu/target.hpp> #include <migraphx/cpu/target.hpp>
#include <migraphx/cpu/lowering.hpp> #include <migraphx/cpu/lowering.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/pass.hpp> #include <migraphx/pass.hpp>
#include <migraphx/auto_contiguous.hpp> #include <migraphx/auto_contiguous.hpp>
#include <migraphx/rewrite_rnn.hpp> #include <migraphx/rewrite_rnn.hpp>
...@@ -25,6 +26,8 @@ std::vector<pass> target::get_passes(migraphx::context&, const compile_options&) ...@@ -25,6 +26,8 @@ std::vector<pass> target::get_passes(migraphx::context&, const compile_options&)
argument target::allocate(const shape& s) const { return fill_argument(s, 0); } argument target::allocate(const shape& s) const { return fill_argument(s, 0); }
MIGRAPHX_REGISTER_TARGET(target);
} // namespace cpu } // namespace cpu
} // namespace MIGRAPHX_INLINE_NS } // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx } // namespace migraphx
...@@ -96,19 +96,22 @@ shape miopen_convolution::find(context& ctx, const shape& output_shape, std::vec ...@@ -96,19 +96,22 @@ shape miopen_convolution::find(context& ctx, const shape& output_shape, std::vec
false); false);
if(status != miopenStatusSuccess) if(status != miopenStatusSuccess)
MIGRAPHX_THROW("MIOpen Convolution: find convolution failed"); MIGRAPHX_THROW("MIOpen Convolution: find convolution failed");
handle = ctx.get_stream().get_miopen(); algo = perf.fwd_algo;
algo = perf.fwd_algo;
size_t solution_count; size_t solution_count;
status = miopenConvolutionForwardGetSolutionCount( status = miopenConvolutionForwardGetSolutionCount(ctx.get_stream().get_miopen(),
handle, w_desc.get(), x_desc.get(), cd.get(), y_desc.get(), &solution_count); w_desc.get(),
x_desc.get(),
cd.get(),
y_desc.get(),
&solution_count);
if(status != miopenStatusSuccess) if(status != miopenStatusSuccess)
MIGRAPHX_THROW("MIOpen Convolution: get solution count failed"); MIGRAPHX_THROW("MIOpen Convolution: get solution count failed");
std::vector<miopenConvSolution_t> solutions(solution_count); std::vector<miopenConvSolution_t> solutions(solution_count);
status = miopenConvolutionForwardGetSolution(handle, status = miopenConvolutionForwardGetSolution(ctx.get_stream().get_miopen(),
w_desc.get(), w_desc.get(),
x_desc.get(), x_desc.get(),
cd.get(), cd.get(),
...@@ -128,9 +131,8 @@ void miopen_convolution::finalize(context& ctx, ...@@ -128,9 +131,8 @@ void miopen_convolution::finalize(context& ctx,
const shape& output_shape, const shape& output_shape,
std::vector<shape> inputs) std::vector<shape> inputs)
{ {
if(handle == ctx.get_stream().get_miopen()) if(cd == nullptr)
return; cd = make_conv(op);
if(solution_id == 0) if(solution_id == 0)
{ {
// Check that workspace hasn't changed // Check that workspace hasn't changed
...@@ -144,8 +146,12 @@ void miopen_convolution::finalize(context& ctx, ...@@ -144,8 +146,12 @@ void miopen_convolution::finalize(context& ctx,
auto w_desc = make_tensor(reshape_if_1d(inputs[1])); auto w_desc = make_tensor(reshape_if_1d(inputs[1]));
auto y_desc = make_tensor(reshape_if_1d(output_shape)); auto y_desc = make_tensor(reshape_if_1d(output_shape));
auto status = miopenConvolutionForwardCompileSolution( auto status = miopenConvolutionForwardCompileSolution(ctx.get_stream().get_miopen(),
handle, w_desc.get(), x_desc.get(), cd.get(), y_desc.get(), solution_id); w_desc.get(),
x_desc.get(),
cd.get(),
y_desc.get(),
solution_id);
if(status != miopenStatusSuccess) if(status != miopenStatusSuccess)
MIGRAPHX_THROW("MIOpen Convolution: compile solution failed"); MIGRAPHX_THROW("MIOpen Convolution: compile solution failed");
} }
......
...@@ -14,10 +14,9 @@ struct context; ...@@ -14,10 +14,9 @@ struct context;
struct miopen_convolution struct miopen_convolution
{ {
op::convolution op; op::convolution op;
shared<convolution_descriptor> cd; shared<convolution_descriptor> cd = nullptr;
miopenConvFwdAlgorithm_t algo{}; miopenConvFwdAlgorithm_t algo{};
miopenHandle_t handle = nullptr; uint64_t solution_id = 0;
uint64_t solution_id = 0;
template <class Self, class F> template <class Self, class F>
static auto reflect(Self& self, F f) static auto reflect(Self& self, F f)
......
...@@ -25,6 +25,7 @@ struct miopen_pooling ...@@ -25,6 +25,7 @@ struct miopen_pooling
std::string name() const { return "gpu::pooling"; } std::string name() const { return "gpu::pooling"; }
shape compute_shape(const std::vector<shape>& inputs) const; shape compute_shape(const std::vector<shape>& inputs) const;
void finalize(context&, const shape&, const std::vector<shape>&);
argument argument
compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const; compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const;
std::ptrdiff_t output_alias(const std::vector<shape>& shapes) const std::ptrdiff_t output_alias(const std::vector<shape>& shapes) const
......
...@@ -29,7 +29,6 @@ ...@@ -29,7 +29,6 @@
#include <migraphx/gpu/leaky_relu.hpp> #include <migraphx/gpu/leaky_relu.hpp>
#include <migraphx/gpu/lrn.hpp> #include <migraphx/gpu/lrn.hpp>
#include <migraphx/gpu/miopen.hpp> #include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/pooling.hpp>
#include <migraphx/gpu/quant_convolution.hpp> #include <migraphx/gpu/quant_convolution.hpp>
#include <migraphx/gpu/rocblas.hpp> #include <migraphx/gpu/rocblas.hpp>
#include <migraphx/iterator_for.hpp> #include <migraphx/iterator_for.hpp>
...@@ -139,6 +138,7 @@ struct miopen_apply ...@@ -139,6 +138,7 @@ struct miopen_apply
add_extend_op("logsoftmax"); add_extend_op("logsoftmax");
add_extend_op("lrn"); add_extend_op("lrn");
add_extend_op("pad"); add_extend_op("pad");
add_extend_op("pooling");
add_extend_op("reduce_max"); add_extend_op("reduce_max");
add_extend_op("reduce_mean"); add_extend_op("reduce_mean");
add_extend_op("reduce_min"); add_extend_op("reduce_min");
...@@ -154,7 +154,6 @@ struct miopen_apply ...@@ -154,7 +154,6 @@ struct miopen_apply
add_convolution_op(); add_convolution_op();
add_deconvolution_op(); add_deconvolution_op();
add_quant_convolution_op(); add_quant_convolution_op();
add_pooling_op();
add_batch_norm_inference_op(); add_batch_norm_inference_op();
add_neg_op(); add_neg_op();
} }
...@@ -313,18 +312,6 @@ struct miopen_apply ...@@ -313,18 +312,6 @@ struct miopen_apply
}); });
} }
void add_pooling_op()
{
apply_map.emplace("pooling", [=](instruction_ref ins) {
auto&& op = any_cast<op::pooling>(ins->get_operator());
auto pd = make_pooling(op);
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(
ins, miopen_pooling{op, std::move(pd)}, ins->inputs().at(0), output);
});
}
void add_generic_op(const std::string& name) { add_generic_op(name, "gpu::" + name); } void add_generic_op(const std::string& name) { add_generic_op(name, "gpu::" + name); }
void add_generic_op(const std::string& op_name, const std::string& gpu_name) void add_generic_op(const std::string& op_name, const std::string& gpu_name)
......
...@@ -56,6 +56,12 @@ argument miopen_pooling::compute(context& ctx, ...@@ -56,6 +56,12 @@ argument miopen_pooling::compute(context& ctx,
return args[1]; return args[1];
} }
void miopen_pooling::finalize(context&, const shape&, const std::vector<shape>&)
{
if(pd == nullptr)
pd = make_pooling(op);
}
} // namespace gpu } // namespace gpu
} // namespace MIGRAPHX_INLINE_NS } // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx } // namespace migraphx
#include <migraphx/gpu/target.hpp> #include <migraphx/gpu/target.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/gpu/lowering.hpp> #include <migraphx/gpu/lowering.hpp>
#include <migraphx/memory_coloring.hpp> #include <migraphx/memory_coloring.hpp>
#include <migraphx/gpu/write_literals.hpp> #include <migraphx/gpu/write_literals.hpp>
...@@ -91,7 +92,7 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti ...@@ -91,7 +92,7 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti
// clang-format on // clang-format on
} }
std::string target::name() const { return "miopen"; } std::string target::name() const { return "gpu"; }
migraphx::context target::get_context() const { return context{}; } migraphx::context target::get_context() const { return context{}; }
...@@ -101,6 +102,8 @@ argument target::copy_from(const argument& arg) const { return gpu::from_gpu(arg ...@@ -101,6 +102,8 @@ argument target::copy_from(const argument& arg) const { return gpu::from_gpu(arg
argument target::allocate(const shape& s) const { return gpu::allocate_gpu(s); } argument target::allocate(const shape& s) const { return gpu::allocate_gpu(s); }
MIGRAPHX_REGISTER_TARGET(target);
} // namespace gpu } // namespace gpu
} // namespace MIGRAPHX_INLINE_NS } // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx } // namespace migraphx
...@@ -7,7 +7,7 @@ find_package(Threads REQUIRED) ...@@ -7,7 +7,7 @@ find_package(Threads REQUIRED)
include(ProcessorCount) include(ProcessorCount)
ProcessorCount(N) ProcessorCount(N)
set(CTEST_PARALLEL_LEVEL ${N} CACHE STRING "CTest parallel level") set(CTEST_PARALLEL_LEVEL ${N} CACHE STRING "CTest parallel level")
add_custom_target(check COMMAND ${CMAKE_CTEST_COMMAND} --output-on-failure -j ${CTEST_PARALLEL_LEVEL} -C ${CMAKE_CFG_INTDIR} --timeout 1500) add_custom_target(check COMMAND ${CMAKE_CTEST_COMMAND} --output-on-failure -j ${CTEST_PARALLEL_LEVEL} -C ${CMAKE_CFG_INTDIR} --timeout 2000)
add_custom_target(tests) add_custom_target(tests)
find_program(MIGRAPHX_GDB gdb) find_program(MIGRAPHX_GDB gdb)
......
...@@ -12,6 +12,7 @@ endfunction() ...@@ -12,6 +12,7 @@ endfunction()
add_api_test(cpu test_cpu.cpp) add_api_test(cpu test_cpu.cpp)
add_api_test(save_load test_save_load.cpp)
if(MIGRAPHX_ENABLE_GPU) if(MIGRAPHX_ENABLE_GPU)
add_api_test(gpu test_gpu.cpp) add_api_test(gpu test_gpu.cpp)
# GPU-based tests # GPU-based tests
......
#include <migraphx/migraphx.h>
#include <migraphx/migraphx.hpp>
#include "test.hpp"
TEST_CASE(load_save_default)
{
std::string filename = "migraphx_api_load_save.dat";
auto p1 = migraphx::parse_onnx("conv_relu_maxpool_test.onnx");
auto s1 = p1.get_output_shapes();
migraphx::save(p1, filename.c_str());
auto p2 = migraphx::load(filename.c_str());
auto s2 = p2.get_output_shapes();
EXPECT(s1.size() == s2.size());
EXPECT(bool{s1.front() == s2.front()});
EXPECT(bool{p1.sort() == p2.sort()});
std::remove(filename.c_str());
}
TEST_CASE(load_save_json)
{
std::string filename = "migraphx_api_load_save.json";
auto p1 = migraphx::parse_onnx("conv_relu_maxpool_test.onnx");
auto s1 = p1.get_output_shapes();
migraphx_file_options options;
options.format = "json";
migraphx::save(p1, filename.c_str(), options);
auto p2 = migraphx::load(filename.c_str(), options);
auto s2 = p2.get_output_shapes();
EXPECT(s1.size() == s2.size());
EXPECT(bool{s1.front() == s2.front()});
EXPECT(bool{p1.sort() == p2.sort()});
std::remove(filename.c_str());
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -18,6 +18,7 @@ add_dependencies(tests migraphx_py) ...@@ -18,6 +18,7 @@ add_dependencies(tests migraphx_py)
add_dependencies(check migraphx_py) add_dependencies(check migraphx_py)
add_py_test(cpu test_cpu.py WORKING_DIRECTORY ${TEST_ONNX_DIR}) add_py_test(cpu test_cpu.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(save_load test_save_load.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
if(MIGRAPHX_ENABLE_GPU) if(MIGRAPHX_ENABLE_GPU)
add_py_test(gpu_offload test_gpu_offload.py WORKING_DIRECTORY ${TEST_ONNX_DIR}) add_py_test(gpu_offload test_gpu_offload.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(gpu test_gpu.py WORKING_DIRECTORY ${TEST_ONNX_DIR}) add_py_test(gpu test_gpu.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
......
import sys import sys
if sys.version_info < (3, 0):
sys.exit()
import migraphx import migraphx
import numpy as np try:
import numpy as np
except:
sys.exit()
def test_conv_relu(): def test_conv_relu():
......
import migraphx, tempfile
def test_conv_relu(format):
p1 = migraphx.parse_onnx("conv_relu_maxpool_test.onnx")
print(p1)
s1 = p1.get_output_shapes()[-1]
with tempfile.NamedTemporaryFile() as t:
migraphx.save(p1, t.name, format=format)
p2 = migraphx.load(t.name, format=format)
print(p2)
s2 = p2.get_output_shapes()[-1]
assert s1 == s2
assert p1.sort() == p2.sort()
test_conv_relu('msgpack')
test_conv_relu('json')
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment