Commit f9437603 authored by Khalique Ahmed's avatar Khalique Ahmed
Browse files

Merge branch 'develop' of https://github.com/ROCmSoftwarePlatform/AMDMIGraphX into mi100_opts

parents 781ce146 658cdab0
#include <migraphx/gpu/compile_hip.hpp>
#include <migraphx/file_buffer.hpp>
#include <migraphx/tmp_dir.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/errors.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/compile_src.hpp>
#include <migraphx/process.hpp>
#include <cassert>
namespace migraphx {
......@@ -24,13 +24,11 @@ bool is_hip_clang_compiler()
std::vector<std::vector<char>>
compile_hip_src(const std::vector<src_file>& srcs, std::string params, const std::string& arch)
{
std::vector<std::vector<char>> hsacos;
assert(not srcs.empty());
if(not is_hcc_compiler() and not is_hip_clang_compiler())
MIGRAPHX_THROW("Unknown hip compiler: " +
std::string(MIGRAPHX_STRINGIZE(MIGRAPHX_HIP_COMPILER)));
assert(not srcs.empty());
tmp_dir td{};
params += " -Wno-cuda-compat";
if(params.find("-std=") == std::string::npos)
params += " --std=c++17";
params += " -fno-gpu-rdc";
......@@ -46,55 +44,31 @@ compile_hip_src(const std::vector<src_file>& srcs, std::string params, const std
params += " -O3 ";
}
params += " -Wno-unused-command-line-argument -I. ";
params += " -Wno-unused-command-line-argument -Wno-cuda-compat ";
params += MIGRAPHX_STRINGIZE(MIGRAPHX_HIP_COMPILER_FLAGS);
std::string output_flags{};
for(const auto& src : srcs)
{
fs::path full_path = td.path / src.path;
fs::path parent_path = full_path.parent_path();
fs::create_directories(parent_path);
write_buffer(full_path.string(), src.content.first, src.len());
if(src.path.extension().string() == ".cpp")
{
params += " " + src.path.filename().string();
output_flags = " -o " + src.path.stem().string() + ".o";
}
}
params += output_flags;
td.execute(MIGRAPHX_STRINGIZE(MIGRAPHX_HIP_COMPILER), params);
src_compiler compiler;
compiler.flags = params;
compiler.compiler = MIGRAPHX_STRINGIZE(MIGRAPHX_HIP_COMPILER);
for(const auto& entry : fs::directory_iterator{td.path})
{
const auto& obj_path = entry.path();
if(not fs::is_regular_file(obj_path))
continue;
if(obj_path.extension() != ".o")
continue;
if(is_hcc_compiler())
compiler.process = [&](const fs::path& obj_path) -> fs::path {
process{MIGRAPHX_STRINGIZE(MIGRAPHX_EXTRACT_KERNEL) + std::string{" -i "} +
obj_path.string()}
.cwd(obj_path.parent_path());
for(const auto& entry : fs::directory_iterator{obj_path.parent_path()})
{
// call extract kernel
td.execute(MIGRAPHX_STRINGIZE(MIGRAPHX_EXTRACT_KERNEL), " -i " + obj_path.string());
}
}
const std::string ext = is_hcc_compiler() ? ".hsaco" : ".o";
for(const auto& entry : fs::directory_iterator{td.path})
{
const auto& obj_path = entry.path();
if(not fs::is_regular_file(obj_path))
const auto& hsaco_path = entry.path();
if(not fs::is_regular_file(hsaco_path))
continue;
if(obj_path.extension() != ext)
if(hsaco_path.extension() != ".hsaco")
continue;
hsacos.push_back(read_buffer(obj_path.string()));
return hsaco_path;
}
MIGRAPHX_THROW("Missing hsaco");
};
return hsacos;
return {compiler.compile(srcs)};
}
} // namespace gpu
......
......@@ -3,6 +3,7 @@
#include <migraphx/config.hpp>
#include <migraphx/filesystem.hpp>
#include <migraphx/compile_src.hpp>
#include <string>
#include <utility>
#include <vector>
......@@ -11,13 +12,6 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct src_file
{
fs::path path;
std::pair<const char*, const char*> content;
std::size_t len() const { return content.second - content.first; }
};
std::vector<std::vector<char>>
compile_hip_src(const std::vector<src_file>& srcs, std::string params, const std::string& arch);
......
#include <migraphx/tmp_dir.hpp>
#include <migraphx/env.hpp>
#include <migraphx/errors.hpp>
#include <migraphx/process.hpp>
#include <algorithm>
#include <random>
#include <thread>
......@@ -14,7 +15,6 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_DEBUG_SAVE_TEMP_DIR)
MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_TRACE_CMD_EXECUTE)
std::string random_string(std::string::size_type length)
{
......@@ -35,34 +35,22 @@ std::string unique_string(const std::string& prefix)
{
auto pid = getpid();
auto tid = std::this_thread::get_id();
auto clk = std::chrono::steady_clock::now().time_since_epoch().count();
std::stringstream ss;
ss << prefix << "-" << pid << "-" << tid << "-" << random_string(64);
ss << std::hex << prefix << "-" << pid << "-" << tid << "-" << clk << "-" << random_string(16);
return ss.str();
}
tmp_dir::tmp_dir() : path(fs::temp_directory_path() / unique_string("migraphx"))
tmp_dir::tmp_dir(const std::string& prefix)
: path(fs::temp_directory_path() /
unique_string(prefix.empty() ? "migraphx" : "migraphx-" + prefix))
{
fs::create_directories(this->path);
}
void system_cmd(const std::string& cmd)
{
// We shouldn't call system commands
#ifdef MIGRAPHX_USE_CLANG_TIDY
(void)cmd;
#else
if(std::system(cmd.c_str()) != 0)
MIGRAPHX_THROW("Can't execute " + cmd);
#endif
}
void tmp_dir::execute(const std::string& exe, const std::string& args) const
{
std::string cd = "cd " + this->path.string() + "; ";
std::string cmd = cd + exe + " " + args; // + " > /dev/null";
if(enabled(MIGRAPHX_TRACE_CMD_EXECUTE{}))
std::cout << cmd << std::endl;
system_cmd(cmd);
process{exe + " " + args}.cwd(this->path).exec();
}
tmp_dir::~tmp_dir()
......
......@@ -204,7 +204,7 @@ const std::string& value::get_key() const { return key; }
std::vector<value>* if_array_impl(const std::shared_ptr<value_base_impl>& x)
{
if(!x)
if(x == nullptr)
return nullptr;
return x->if_array();
}
......@@ -286,10 +286,26 @@ const value* value::begin() const
value* value::end() { return begin() + size(); }
const value* value::end() const { return begin() + size(); }
value& value::front() { return *begin(); }
const value& value::front() const { return *begin(); }
value& value::back() { return *std::prev(end()); }
const value& value::back() const { return *std::prev(end()); }
value& value::front()
{
assert(this->size() > 0);
return *begin();
}
const value& value::front() const
{
assert(this->size() > 0);
return *begin();
}
value& value::back()
{
assert(this->size() > 0);
return *std::prev(end());
}
const value& value::back() const
{
assert(this->size() > 0);
return *std::prev(end());
}
value& value::at(std::size_t i)
{
auto* a = if_array_impl(x);
......@@ -322,8 +338,16 @@ const value& value::at(const std::string& pkey) const
MIGRAPHX_THROW("Key not found: " + pkey);
return *r;
}
value& value::operator[](std::size_t i) { return *(begin() + i); }
const value& value::operator[](std::size_t i) const { return *(begin() + i); }
value& value::operator[](std::size_t i)
{
assert(i < this->size());
return *(begin() + i);
}
const value& value::operator[](std::size_t i) const
{
assert(i < this->size());
return *(begin() + i);
}
value& value::operator[](const std::string& pkey) { return *emplace(pkey, nullptr).first; }
void value::clear() { get_array_throw(x).clear(); }
......@@ -409,8 +433,8 @@ template <class F>
bool compare(const value& x, const value& y, F f)
{
bool result = false;
x.visit([&](auto&& a) {
y.visit([&](auto&& b) {
x.visit_value([&](auto&& a) {
y.visit_value([&](auto&& b) {
result = compare_common_impl(rank<1>{}, f, x.get_key(), a, y.get_key(), b);
});
});
......@@ -436,6 +460,8 @@ bool operator<=(const value& x, const value& y) { return x == y or x < y; }
bool operator>(const value& x, const value& y) { return y < x; }
bool operator>=(const value& x, const value& y) { return x == y or x > y; }
void print_value(std::ostream& os, std::nullptr_t) { os << "null"; }
template <class T>
void print_value(std::ostream& os, const T& x)
{
......@@ -450,7 +476,6 @@ void print_value(std::ostream& os, const std::pair<T, U>& x)
print_value(os, x.second);
}
void print_value(std::ostream& os, const std::nullptr_t&) { os << "null"; }
void print_value(std::ostream& os, const std::vector<value>& x)
{
os << "{";
......
// clang-format off
#define MIGRAPHX_VERSION_MAJOR @PROJECT_VERSION_MAJOR@
#define MIGRAPHX_VERSION_MINOR @PROJECT_VERSION_MINOR@
// clang-format on
......@@ -69,7 +69,7 @@ int main() {}
)__migraphx__";
migraphx::gpu::src_file make_src_file(const std::string& name, const std::string& content)
migraphx::src_file make_src_file(const std::string& name, const std::string& content)
{
return {name, std::make_pair(content.data(), content.data() + content.size())};
}
......@@ -122,13 +122,13 @@ TEST_CASE(code_object_hip)
auto input_literal = migraphx::generate_literal(input);
auto output_literal = migraphx::transform(input_literal, [](auto x) { return x + 2; });
auto x = mm->add_literal(input_literal);
auto y = mm->add_instruction(
migraphx::make_op("hip::allocate", {{"shape", migraphx::to_value(input)}}));
auto y = mm->add_parameter("output", input);
mm->add_instruction(co, x, y);
migraphx::compile_options options;
p.compile(migraphx::gpu::target{}, options);
auto result = migraphx::gpu::from_gpu(p.eval({}).front());
auto result =
migraphx::gpu::from_gpu(p.eval({{"output", migraphx::gpu::allocate_gpu(input)}}).front());
EXPECT(result == output_literal.get_argument());
}
......@@ -149,12 +149,12 @@ TEST_CASE(compile_code_object_hip)
auto input_literal = migraphx::generate_literal(input);
auto output_literal = migraphx::transform(input_literal, [](auto x) { return x + 1; });
auto x = mm->add_literal(input_literal);
auto y = mm->add_instruction(
migraphx::make_op("hip::allocate", {{"shape", migraphx::to_value(input)}}));
auto y = mm->add_parameter("output", input);
mm->add_instruction(co, x, y);
p.compile(migraphx::gpu::target{}, migraphx::compile_options{});
auto result = migraphx::gpu::from_gpu(p.eval({}).front());
auto result =
migraphx::gpu::from_gpu(p.eval({{"output", migraphx::gpu::allocate_gpu(input)}}).front());
EXPECT(result == output_literal.get_argument());
}
......
#include <algorithm>
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <functional>
#include <iostream>
#include <sstream>
#include <unordered_map>
#include <vector>
......@@ -67,6 +69,27 @@ struct nop
}
};
struct function
{
static std::string as_string() { return ""; }
template <class T>
static decltype(auto) call(T&& x)
{
return x();
}
};
template <class Iterator>
inline std::ostream& stream_range(std::ostream& s, Iterator start, Iterator last)
{
if(start != last)
{
s << *start;
std::for_each(std::next(start), last, [&](auto&& x) { s << ", " << x; });
}
return s;
}
inline std::ostream& operator<<(std::ostream& s, std::nullptr_t)
{
s << "nullptr";
......@@ -77,10 +100,7 @@ template <class T>
inline std::ostream& operator<<(std::ostream& s, const std::vector<T>& v)
{
s << "{ ";
for(auto&& x : v)
{
s << x << ", ";
}
stream_range(s, v.begin(), v.end());
s << "}";
return s;
}
......@@ -88,10 +108,7 @@ inline std::ostream& operator<<(std::ostream& s, const std::vector<T>& v)
inline std::ostream& operator<<(std::ostream& s, const std::vector<bool>& v)
{
s << "{ ";
for(auto x : v)
{
s << x << ", ";
}
stream_range(s, v.begin(), v.end());
s << "}";
return s;
}
......@@ -142,7 +159,10 @@ struct lhs_expression
friend std::ostream& operator<<(std::ostream& s, const lhs_expression& self)
{
s << Operator::as_string() << " " << self.lhs;
std::string op = Operator::as_string();
if(not op.empty())
s << Operator::as_string() << " ";
s << self.lhs;
return s;
}
......@@ -180,6 +200,55 @@ struct lhs_expression
TEST_LHS_REOPERATOR (^)
};
template <class F>
struct predicate
{
std::string msg;
F f;
friend std::ostream& operator<<(std::ostream& s, const predicate& self)
{
s << self.msg;
return s;
}
decltype(auto) operator()() const { return f(); }
operator decltype(auto)() const { return f(); }
};
template <class F>
auto make_predicate(const std::string& msg, F f)
{
return make_lhs_expression(predicate<F>{msg, f}, function{});
}
template <class T>
std::string as_string(const T& x)
{
std::stringstream ss;
ss << x;
return ss.str();
}
template <class Iterator>
std::string as_string(Iterator start, Iterator last)
{
std::stringstream ss;
stream_range(ss, start, last);
return ss.str();
}
template <class F>
auto make_function(const std::string& name, F f)
{
return [=](auto&&... xs) {
std::vector<std::string> args = {as_string(xs)...};
return make_predicate(name + "(" + as_string(args.begin(), args.end()) + ")",
[=] { return f(xs...); });
};
}
struct capture
{
template <class T>
......@@ -236,6 +305,13 @@ bool throws(F f, const std::string& msg = "")
}
}
template <class T, class U>
auto near(T px, U py, double ptol = 1e-6f)
{
return make_function("near", [](auto x, auto y, auto tol) { return std::abs(x - y) < tol; })(
px, py, ptol);
}
using string_map = std::unordered_map<std::string, std::vector<std::string>>;
template <class Keyword>
......
#include <migraphx/compile_src.hpp>
#include <migraphx/dynamic_loader.hpp>
#include <migraphx/cpp_generator.hpp>
#include <migraphx/module.hpp>
#include <migraphx/make_op.hpp>
#include <test.hpp>
// NOLINTNEXTLINE
const std::string add_42_src = R"migraphx(
extern "C" int add(int x)
{
return x+42;
}
)migraphx";
// NOLINTNEXTLINE
const std::string preamble = R"migraphx(
#include <cmath>
)migraphx";
template <class F>
std::function<F>
compile_function(const std::string& src, const std::string& flags, const std::string& fname)
{
migraphx::src_compiler compiler;
compiler.flags = flags + "-std=c++14 -fPIC -shared";
compiler.output = "libsimple.so";
migraphx::src_file f;
f.path = "main.cpp";
f.content = std::make_pair(src.data(), src.data() + src.size());
auto image = compiler.compile({f});
return migraphx::dynamic_loader{image}.get_function<F>(fname);
}
template <class F>
std::function<F> compile_module(const migraphx::module& m, const std::string& flags = "")
{
migraphx::cpp_generator g;
g.fmap([](auto&& name) { return "std::" + name; });
g.create_function(g.generate_module(m).set_attributes({"extern \"C\""}));
return compile_function<F>(preamble + g.str(), flags, m.name());
}
TEST_CASE(simple_run)
{
auto f = compile_function<int(int)>(add_42_src, "", "add");
EXPECT(f(8) == 50);
EXPECT(f(10) == 52);
}
TEST_CASE(generate_module)
{
migraphx::module m("foo");
auto x = m.add_parameter("x", migraphx::shape::float_type);
auto y = m.add_parameter("y", migraphx::shape::float_type);
auto sum = m.add_instruction(migraphx::make_op("add"), x, y);
m.add_instruction(migraphx::make_op("sqrt"), sum);
auto f = compile_module<float(float, float)>(m);
EXPECT(test::near(f(2, 2), 2));
EXPECT(test::near(f(10, 6), 4));
EXPECT(test::near(f(1, 2), std::sqrt(3)));
}
TEST_CASE(generate_module_with_literals)
{
migraphx::module m("foo");
auto x = m.add_parameter("x", migraphx::shape::float_type);
auto y = m.add_parameter("y", migraphx::shape::float_type);
auto z = m.add_literal(1.f);
auto sum1 = m.add_instruction(migraphx::make_op("add"), x, z);
auto sum2 = m.add_instruction(migraphx::make_op("add"), sum1, y);
m.add_instruction(migraphx::make_op("sqrt"), sum2);
auto f = compile_module<float(float, float)>(m);
EXPECT(test::near(f(1, 2), 2));
EXPECT(test::near(f(9, 6), 4));
EXPECT(test::near(f(0, 2), std::sqrt(3)));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -1595,9 +1595,12 @@ def if_literal_test():
onnx.TensorProto.FLOAT, [5])
else_out = onnx.helper.make_tensor_value_info('else_out',
onnx.TensorProto.FLOAT, [5])
empty_out = onnx.helper.make_tensor_value_info('empty_out',
onnx.TensorProto.FLOAT, [])
x = np.array([1, 2, 3, 4, 5]).astype(np.float32)
y = np.array([5, 4, 3, 2, 1]).astype(np.float32)
z = np.array([]).astype(np.float32)
then_const_node = onnx.helper.make_node(
'Constant',
......@@ -1611,11 +1614,17 @@ def if_literal_test():
outputs=['else_out'],
value=onnx.numpy_helper.from_array(y))
then_body = onnx.helper.make_graph([then_const_node], 'then_body', [],
[then_out])
empty_const_node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['empty_out'],
value=onnx.numpy_helper.from_array(z))
else_body = onnx.helper.make_graph([else_const_node], 'else_body', [],
[else_out])
then_body = onnx.helper.make_graph([then_const_node, empty_const_node],
'then_body', [], [then_out])
else_body = onnx.helper.make_graph([else_const_node, empty_const_node],
'else_body', [], [else_out])
cond_input = onnx.helper.make_tensor_value_info('cond',
onnx.TensorProto.BOOL, [])
......@@ -2940,6 +2949,44 @@ def reducesum_test():
return ([node], [x], [y])
@onnx_test
def reducesum_empty_axes_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 6])
axes = np.array([], dtype=np.int64)
axes_tensor = helper.make_tensor(name="axes",
data_type=TensorProto.INT64,
dims=axes.shape,
vals=axes.astype(np.int64))
node = onnx.helper.make_node('ReduceSum',
inputs=['x', 'axes'],
outputs=['y'],
keepdims=0,
noop_with_empty_axes=False)
return ([node], [x], [y], [axes_tensor])
@onnx_test
def reducesum_noop_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 6])
axes = np.array([], dtype=np.int64)
axes_tensor = helper.make_tensor(name="axes",
data_type=TensorProto.INT64,
dims=axes.shape,
vals=axes.astype(np.int64))
node = onnx.helper.make_node('ReduceSum',
inputs=['x', 'axes'],
outputs=['y'],
keepdims=0,
noop_with_empty_axes=True)
return ([node], [x], [y], [axes_tensor])
@onnx_test
def reducesum_keepdims_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
......@@ -3470,6 +3517,40 @@ def sqrt_test():
return ([node], [x], [y])
@onnx_test
def squeeze_axes_input_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 1, 5, 1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 5])
axes = np.array([1, 3], dtype=np.int64)
axes_tensor = helper.make_tensor(name="axes",
data_type=TensorProto.INT64,
dims=axes.shape,
vals=axes.astype(np.int64))
node = onnx.helper.make_node('Squeeze',
inputs=['x', 'axes'],
outputs=['y'])
return ([node], [x], [y], [axes_tensor])
@onnx_test
def squeeze_empty_axes_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 1, 5, 1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 5])
axes = np.array([], dtype=np.int64)
axes_tensor = helper.make_tensor(name="axes",
data_type=TensorProto.INT64,
dims=axes.shape,
vals=axes.astype(np.int64))
node = onnx.helper.make_node('Squeeze',
inputs=['x', 'axes'],
outputs=['y'])
return ([node], [x], [y], [axes_tensor])
@onnx_test
def squeeze_unsqueeze_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT,
......
......@@ -1420,11 +1420,13 @@ TEST_CASE(if_literal_test)
auto* then_mod = p.create_module("If_1_if");
std::vector<float> data1 = {1, 2, 3, 4, 5};
auto l1 = then_mod->add_literal(migraphx::literal(s, data1));
then_mod->add_literal({});
then_mod->add_return({l1});
auto* else_mod = p.create_module("If_1_else");
std::vector<float> data2 = {5, 4, 3, 2, 1};
auto l2 = else_mod->add_literal(migraphx::literal(s, data2));
else_mod->add_literal({});
else_mod->add_return({l2});
auto ret = mm->add_instruction(migraphx::make_op("if"), {cond}, {then_mod, else_mod});
......@@ -1749,7 +1751,9 @@ TEST_CASE(lessorequal_test)
auto input1 = mm->add_parameter("x1", migraphx::shape{migraphx::shape::float_type, {3}});
auto input2 = mm->add_parameter("x2", migraphx::shape{migraphx::shape::float_type, {3}});
auto temp = mm->add_instruction(migraphx::make_op("greater"), input1, input2);
auto le = mm->add_instruction(migraphx::make_op("not"), temp);
auto bt = mm->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::bool_type}}), temp);
auto le = mm->add_instruction(migraphx::make_op("not"), bt);
mm->add_return({le});
......@@ -1833,7 +1837,7 @@ TEST_CASE(logsoftmax_nonstd_input_test)
auto l0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {6, 9}});
auto l1 = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0, 1}}, {"starts", {1, 0}}, {"ends", {4, 4}}}), l0);
auto l2 = mm->add_instruction(migraphx::make_op("logsoftmax", {{"axis", 1}}), l1);
auto l2 = mm->add_instruction(migraphx::make_op("logsoftmax", {{"axis", -1}}), l1);
mm->add_return({l2});
auto prog = migraphx::parse_onnx("logsoftmax_nonstd_input_test.onnx");
......@@ -2317,6 +2321,10 @@ TEST_CASE(quantizelinear_test)
{{"target_type", migraphx::to_value(migraphx::shape::int32_type)}}),
round);
auto add = mm->add_instruction(migraphx::make_op("add"), round, l2_mbcast);
min_val =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"output_lens", {5}}}), min_val);
max_val =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"output_lens", {5}}}), max_val);
auto clip = mm->add_instruction(migraphx::make_op("clip"), add, min_val, max_val);
mm->add_instruction(
migraphx::make_op("convert",
......@@ -2356,6 +2364,10 @@ migraphx::program make_quantizelinear_axis_prog()
{{"target_type", migraphx::to_value(migraphx::shape::int32_type)}}),
round);
auto add = mm->add_instruction(migraphx::make_op("add"), round, l2_bcast);
min_val = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"output_lens", {1, 1, 5, 1}}}), min_val);
max_val = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"output_lens", {1, 1, 5, 1}}}), max_val);
auto clip = mm->add_instruction(migraphx::make_op("clip"), add, min_val, max_val);
mm->add_instruction(
migraphx::make_op("convert",
......@@ -2541,6 +2553,33 @@ TEST_CASE(reducesum_test)
EXPECT(p == prog);
}
TEST_CASE(reducesum_empty_axes_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal({});
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3, 4, 5, 6}});
auto l1 = mm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {0, 1, 2, 3}}}), x);
auto r = mm->add_instruction(migraphx::make_op("squeeze", {{"axes", {0, 1, 2, 3}}}), l1);
mm->add_return({r});
auto prog = migraphx::parse_onnx("reducesum_empty_axes_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(reducesum_noop_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal({});
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3, 4, 5, 6}});
mm->add_return({x});
auto prog = migraphx::parse_onnx("reducesum_noop_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(reducesum_multiaxis_test)
{
migraphx::program p;
......@@ -2961,7 +3000,7 @@ TEST_CASE(softmax_nonstd_input_test)
auto l0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {6, 8}});
auto l1 = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0, 1}}, {"starts", {1, 0}}, {"ends", {4, 4}}}), l0);
auto l2 = mm->add_instruction(migraphx::make_op("softmax", {{"axis", 1}}), l1);
auto l2 = mm->add_instruction(migraphx::make_op("softmax", {{"axis", -1}}), l1);
mm->add_return({l2});
auto prog = migraphx::parse_onnx("softmax_nonstd_input_test.onnx");
......@@ -3045,6 +3084,34 @@ TEST_CASE(squeeze_unsqueeze_test)
EXPECT(p == prog);
}
TEST_CASE(squeeze_axes_input_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal(migraphx::literal({migraphx::shape::int64_type, {2}}, {1, 3}));
auto l0 = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3, 1, 5, 1}});
auto l1 = mm->add_instruction(migraphx::make_op("squeeze", {{"axes", {1, 3}}}), l0);
mm->add_return({l1});
auto prog = migraphx::parse_onnx("squeeze_axes_input_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(squeeze_empty_axes_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal({});
auto l0 = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3, 1, 5, 1}});
auto l1 = mm->add_instruction(migraphx::make_op("squeeze"), l0);
mm->add_return({l1});
auto prog = migraphx::parse_onnx("squeeze_empty_axes_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(sub_bcast_test)
{
migraphx::program p;
......
squeeze_axes_input_test:r

x
axesy"Squeezesqueeze_axes_input_test*:BaxesZ
x




b
y


B
\ No newline at end of file
......@@ -46,6 +46,33 @@ struct simple_operation_no_print
}
};
struct compilable_op
{
std::string name() const { return "compilable"; }
migraphx::argument
compute(migraphx::context&, const migraphx::shape&, std::vector<migraphx::argument> args) const
{
if(args.empty())
return {};
return args.front();
}
migraphx::shape compute_shape(std::vector<migraphx::shape> inputs) const
{
if(inputs.empty())
return {};
return inputs.front();
}
int output_alias(const std::vector<migraphx::shape>&) const { return 0; }
migraphx::value
compile(migraphx::context&, const migraphx::shape&, const std::vector<migraphx::shape>&)
{
return {{"compiled", true}};
}
};
TEST_CASE(operation_copy_test)
{
simple_operation s{};
......@@ -204,4 +231,20 @@ TEST_CASE(check_from_value2)
EXPECT(op1 == op2);
}
TEST_CASE(compile)
{
migraphx::operation op = compilable_op{};
migraphx::context ctx{};
auto v = op.compile(ctx, {}, {});
EXPECT(v.at("compiled").to<bool>() == true);
}
TEST_CASE(compile_non_compilable)
{
migraphx::operation op = simple_operation{};
migraphx::context ctx{};
auto v = op.compile(ctx, {}, {});
EXPECT(v.empty());
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -10,6 +10,7 @@ import onnx
import onnx.backend.test
import numpy as np
from onnx_migraphx.backend import MIGraphXBackend as c2
from packaging import version
pytest_plugins = 'onnx.backend.test.report',
......@@ -40,6 +41,36 @@ class MIGraphXBackendTest(onnx.backend.test.BackendTest):
err_msg=prog_string)
def disabled_tests_onnx_1_7_0(backend_test):
backend_test.exclude(r'test_logsoftmax_axis_0_cpu')
backend_test.exclude(r'test_logsoftmax_axis_1_cpu')
backend_test.exclude(r'test_logsoftmax_default_axis_cpu')
backend_test.exclude(r'test_softmax_axis_0_cpu')
backend_test.exclude(r'test_softmax_axis_1_cpu')
backend_test.exclude(r'test_softmax_default_axis_cpu')
def disabled_tests_onnx_1_8_1(backend_test):
backend_test.exclude(r'test_if_seq_cpu')
backend_test.exclude(r'test_reduce_sum_default_axes_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_sum_default_axes_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_sum_do_not_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_sum_do_not_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_sum_empty_axes_input_noop_example_cpu')
backend_test.exclude(r'test_reduce_sum_empty_axes_input_noop_random_cpu')
backend_test.exclude(r'test_reduce_sum_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_sum_keepdims_random_cpu')
backend_test.exclude(r'test_reduce_sum_negative_axes_keepdims_example_cpu')
backend_test.exclude(r'test_reduce_sum_negative_axes_keepdims_random_cpu')
backend_test.exclude(r'test_unsqueeze_axis_0_cpu')
backend_test.exclude(r'test_unsqueeze_axis_1_cpu')
backend_test.exclude(r'test_unsqueeze_axis_2_cpu')
backend_test.exclude(r'test_unsqueeze_negative_axes_cpu')
backend_test.exclude(r'test_unsqueeze_three_axes_cpu')
backend_test.exclude(r'test_unsqueeze_two_axes_cpu')
backend_test.exclude(r'test_unsqueeze_unsorted_axes_cpu')
def create_backend_test(testname=None, target_device=None):
if target_device is not None:
c2.set_device(target_device)
......@@ -203,9 +234,6 @@ def create_backend_test(testname=None, target_device=None):
)
backend_test.exclude(
r'test_argmin_no_keepdims_example_select_last_index_cpu')
backend_test.exclude(r'test_logsoftmax_axis_0_cpu')
backend_test.exclude(r'test_logsoftmax_axis_1_cpu')
backend_test.exclude(r'test_logsoftmax_default_axis_cpu')
backend_test.exclude(r'test_lrn_cpu')
backend_test.exclude(r'test_lrn_default_cpu')
backend_test.exclude(r'test_maxpool_2d_dilations_cpu')
......@@ -213,9 +241,6 @@ def create_backend_test(testname=None, target_device=None):
r'test_maxpool_with_argmax_2d_precomputed_pads_cpu')
backend_test.exclude(
r'test_maxpool_with_argmax_2d_precomputed_strides_cpu')
backend_test.exclude(r'test_softmax_axis_0_cpu')
backend_test.exclude(r'test_softmax_axis_1_cpu')
backend_test.exclude(r'test_softmax_default_axis_cpu')
# error cases
backend_test.exclude(r'test_constant_pad_cpu')
......@@ -228,6 +253,10 @@ def create_backend_test(testname=None, target_device=None):
backend_test.exclude(r'test_depthtospace_example_cpu')
backend_test.exclude(r'test_expand_dim_changed_cpu')
backend_test.exclude(r'test_expand_dim_unchanged_cpu')
backend_test.exclude(r'test_expand_shape_model1_cpu')
backend_test.exclude(r'test_expand_shape_model2_cpu')
backend_test.exclude(r'test_expand_shape_model3_cpu')
backend_test.exclude(r'test_expand_shape_model4_cpu')
backend_test.exclude(r'test_gathernd_example_float32_cpu')
backend_test.exclude(r'test_gathernd_example_int32_batch_dim1_cpu')
backend_test.exclude(r'test_gathernd_example_int32_cpu')
......@@ -238,18 +267,13 @@ def create_backend_test(testname=None, target_device=None):
backend_test.exclude(r'test_hardsigmoid_cpu')
backend_test.exclude(r'test_hardsigmoid_default_cpu')
backend_test.exclude(r'test_hardsigmoid_example_cpu')
backend_test.exclude(r'test_less_equal_bcast_cpu')
backend_test.exclude(r'test_less_equal_bcast_expanded_cpu')
backend_test.exclude(r'test_less_equal_cpu')
backend_test.exclude(r'test_less_equal_expanded_cpu')
backend_test.exclude(r'test_maxpool_2d_uint8_cpu')
backend_test.exclude(r'test_mean_example_cpu')
backend_test.exclude(r'test_mean_one_input_cpu')
backend_test.exclude(r'test_mean_two_inputs_cpu')
backend_test.exclude(r'test_negative_log_likelihood_loss_*')
backend_test.exclude(r'test_not_2d_cpu')
backend_test.exclude(r'test_not_3d_cpu')
backend_test.exclude(r'test_not_4d_cpu')
# all reduce ops have dynamic axes inputs
backend_test.exclude(r'test_size_cpu')
backend_test.exclude(r'test_size_example_cpu')
backend_test.exclude(r'test_softmax_cross_entropy_*')
......@@ -262,18 +286,19 @@ def create_backend_test(testname=None, target_device=None):
backend_test.exclude(r'test_thresholdedrelu_example_cpu')
backend_test.exclude(r'test_Embedding_cpu')
backend_test.exclude(r'test_Softplus_cpu')
backend_test.exclude(r'test_operator_selu_cpu')
backend_test.exclude(r'test_expand_shape_model1_cpu')
backend_test.exclude(r'test_expand_shape_model2_cpu')
backend_test.exclude(r'test_expand_shape_model3_cpu')
backend_test.exclude(r'test_expand_shape_model4_cpu')
# These three tests failed because of bugs in fuse_ops related to conv
# to be investigated later
# real model tests
backend_test.exclude(r'test_inception_v1_cpu')
backend_test.exclude(r'test_resnet50_cpu')
backend_test.exclude(r'test_squeezenet_cpu')
# additional cases disabled for a specific onnx version
if version.parse(onnx.__version__) <= version.parse("1.7.0"):
disabled_tests_onnx_1_7_0(backend_test)
if version.parse(onnx.__version__) >= version.parse("1.8.0"):
disabled_tests_onnx_1_8_1(backend_test)
# import all test cases at global scope to make
# them visible to python.unittest.
......
......@@ -3401,7 +3401,7 @@ TEST_CASE(softmax_test)
migraphx::shape a_shape{migraphx::shape::float_type, {5, 3, 4, 2}};
auto al = mm->add_literal(migraphx::literal{a_shape, a});
mm->add_instruction(migraphx::make_op("softmax"), al);
mm->add_instruction(migraphx::make_op("softmax", {{"axis", 1}}), al);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector(120);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment