Commit b6a9b597 authored by Shucai Xiao's avatar Shucai Xiao
Browse files

Merge branch 'develop' of https://github.com/ROCmSoftwarePlatform/AMDMIGraphX into rnn_operator

parents 128b0b65 b5090737
......@@ -117,7 +117,9 @@ rocm_enable_cppcheck(
passedByValue
unusedStructMember
functionStatic
functionConst
functionConst:*program.*
shadowFunction
shadowVar
definePrefix:*test/include/test.hpp
FORCE
INCONCLUSIVE
......
......@@ -8,10 +8,16 @@ def rocmtestnode(variant, name, body) {
mkdir build
cd build
CXX=${compiler} CXXFLAGS='-Werror -Wno-fallback' cmake ${flags} ..
CTEST_PARALLEL_LEVEL=32 make -j32 generate all doc check
CTEST_PARALLEL_LEVEL=32 make -j32 generate all doc package check
"""
echo cmd
sh cmd
if (compiler == "hcc") {
// Only archive from master or develop
if (env.BRANCH_NAME == "develop" || env.BRANCH_NAME == "master") {
archiveArtifacts artifacts: "build/*.deb", allowEmptyArchive: true, fingerprint: true
}
}
}
node(name) {
stage("checkout ${variant}") {
......
ignore:
- "test/"
......@@ -38,7 +38,7 @@
<message>
<id>definePrefix</id>
<severity>style</severity>
<summary>Macros must be prefixed with MIGRAPH_</summary>
<summary>Macros must be prefixed with MIGRAPHX_</summary>
</message>
</rule>
<rule>
......
......@@ -17,9 +17,9 @@ struct iterator_for_range
struct iterator
{
base_iterator i;
base_iterator operator*() { return i; }
base_iterator operator*() const { return i; }
base_iterator operator++() { return ++i; }
bool operator!=(const iterator& rhs) { return i != rhs.i; }
bool operator!=(const iterator& rhs) const { return i != rhs.i; }
};
iterator begin()
......
......@@ -214,7 +214,6 @@ void find_matches(program& p, Ms&&... ms)
bool match = false;
each_args(
[&](auto&& m) {
// cppcheck-suppress knownConditionTrueFalse
if(match)
return;
auto r = match_instruction(p, ins, m.matcher());
......
......@@ -640,49 +640,58 @@ struct as_shape
struct gather
{
std::size_t axis = 0;
int axis = 0;
std::string name() const { return "gather"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs, *this}.has(2);
auto lens = inputs[0].lens();
if(axis >= lens.size())
int n_dim = static_cast<int>(lens.size());
if(axis >= n_dim || axis < -n_dim)
{
MIGRAPHX_THROW("Gather, axis is out of range.");
MIGRAPHX_THROW("Gather: axis is out of range.");
}
auto type = inputs[0].type();
lens[axis] = inputs[1].elements();
// negative axis means counting dimensions from back
int axis_index = (axis < 0) ? (n_dim + axis) : axis;
auto type = inputs[0].type();
lens[axis_index] = inputs[1].elements();
return {type, lens};
}
template <class T>
void compute_index(const T& out_idx,
const int axis_index,
const std::vector<std::size_t>& vec_indices,
const std::size_t max_dim,
T& in_idx) const
{
in_idx = out_idx;
std::size_t idx = vec_indices.at(out_idx[axis]);
std::size_t idx = vec_indices.at(out_idx[axis_index]);
if(idx >= max_dim)
{
MIGRAPHX_THROW("Gather: indices are out of range in input tensor");
}
in_idx[axis] = idx;
in_idx[axis_index] = idx;
}
argument compute(const shape& output_shape, std::vector<argument> args) const
{
argument result{output_shape};
// negative axis means counting dimensions from back
int axis_index = (axis < 0) ? (output_shape.lens().size() + axis) : axis;
// max dimension in axis
std::size_t max_dim = args[0].get_shape().lens()[axis];
std::size_t max_dim = args[0].get_shape().lens()[axis_index];
std::vector<std::size_t> vec_indices;
args[1].visit([&](auto indices) { vec_indices.assign(indices.begin(), indices.end()); });
visit_all(result, args[0])([&](auto output, auto input) {
std::vector<std::size_t> in_idx;
shape_for_each(output.get_shape(), [&](const auto& idx) {
this->compute_index(idx, vec_indices, max_dim, in_idx);
this->compute_index(idx, axis_index, vec_indices, max_dim, in_idx);
output(idx.begin(), idx.end()) = input(in_idx.begin(), in_idx.end());
});
});
......@@ -960,8 +969,7 @@ struct scalar
shape compute_shape(std::vector<shape> inputs) const
{
assert(check_shapes{inputs}.has(1).only_dims(1).size() == 1);
auto t = inputs.at(0).type();
auto input = inputs.at(0);
auto t = inputs.at(0).type();
std::vector<std::size_t> strides(scalar_bcast.lens().size(), 0);
return {t, scalar_bcast.lens(), strides};
}
......
......@@ -54,6 +54,7 @@ void par_for_impl(std::size_t n, std::size_t threadsize, F f)
f(i);
}
});
// cppcheck-suppress unreadVariable
work += grainsize;
return result;
});
......
......@@ -24,7 +24,8 @@ struct onnx_parser
{
using attribute_map = std::unordered_map<std::string, onnx::AttributeProto>;
using node_map = std::unordered_map<std::string, onnx::NodeProto>;
using op_func = std::function<instruction_ref(attribute_map, std::vector<instruction_ref>)>;
using op_func =
std::function<std::vector<instruction_ref>(attribute_map, std::vector<instruction_ref>)>;
node_map nodes;
std::unordered_map<std::string, instruction_ref> instructions;
program prog = program();
......@@ -102,6 +103,15 @@ struct onnx_parser
template <class F>
void add_op(std::string name, F f)
{
ops.emplace(name, [=](auto&&... xs) {
return std::vector<instruction_ref>{f(std::forward<decltype(xs)>(xs)...)};
});
}
// Multi output op
template <class F>
void add_multi_op(std::string name, F f)
{
ops.emplace(name, f);
}
......@@ -109,7 +119,7 @@ struct onnx_parser
template <class F>
void add_mem_op(std::string name, F f)
{
ops.emplace(name, [=](auto&&... xs) {
add_op(name, [=](auto&&... xs) {
return std::mem_fn(f)(*this, name, std::forward<decltype(xs)>(xs)...);
});
}
......@@ -117,17 +127,15 @@ struct onnx_parser
template <class T>
void add_binary_op(std::string name, T x)
{
ops.emplace(name, [this, x](attribute_map attributes, std::vector<instruction_ref> args) {
add_op(name, [this, x](attribute_map attributes, std::vector<instruction_ref> args) {
if(args.size() != 2)
MIGRAPHX_THROW("binary operators should have 2 operands");
if(contains(attributes, "broadcast"))
if(contains(attributes, "broadcast") and contains(attributes, "axis"))
{
uint64_t broadcasted = parse_value(attributes.at("broadcast")).at<uint64_t>();
if(broadcasted != 0)
{
uint64_t axis = (contains(attributes, "axis"))
? parse_value(attributes.at("axis")).at<uint64_t>()
: 0;
uint64_t axis = parse_value(attributes.at("axis")).at<uint64_t>();
auto l =
prog.add_instruction(op::broadcast{axis, args[0]->get_shape()}, args[1]);
return prog.add_instruction(x, args[0], l);
......@@ -188,7 +196,7 @@ struct onnx_parser
template <class T>
void add_generic_op(std::string name, T x)
{
ops.emplace(name, [this, x](attribute_map, std::vector<instruction_ref> args) {
add_op(name, [this, x](attribute_map, std::vector<instruction_ref> args) {
return prog.add_instruction(x, args);
});
}
......@@ -196,7 +204,7 @@ struct onnx_parser
template <class T>
void add_variadic_op(std::string name, T x)
{
ops.emplace(name, [this, x](attribute_map, std::vector<instruction_ref> args) {
add_op(name, [this, x](attribute_map, std::vector<instruction_ref> args) {
return std::accumulate(std::next(args.begin()),
args.end(),
args.front(),
......@@ -376,7 +384,7 @@ struct onnx_parser
instruction_ref
parse_gather(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
{
std::size_t axis = 0;
int axis = 0;
if(contains(attributes, "axis"))
{
axis = parse_value(attributes.at("axis")).at<int>();
......@@ -734,7 +742,7 @@ struct onnx_parser
}
else
{
throw std::runtime_error("Failed reading");
MIGRAPHX_THROW("Failed reading onnx file.");
}
}
......@@ -764,7 +772,7 @@ struct onnx_parser
}
for(auto&& p : nodes)
{
this->parse_node(get_name(p.second));
this->parse_node(p.first);
}
}
......@@ -790,23 +798,37 @@ struct onnx_parser
if(nodes.count(input) > 0)
{
auto&& iname = get_name(nodes.at(input));
assert(name != iname);
this->parse_node(iname);
args.push_back(instructions.at(iname));
assert(name != input);
this->parse_node(input);
args.push_back(instructions.at(input));
}
else
{
args.push_back(instructions.at(input));
}
}
std::vector<instruction_ref> result;
if(ops.count(node.op_type()) == 0)
{
instructions[name] = prog.add_instruction(unknown{node.op_type()}, args);
result.push_back(prog.add_instruction(unknown{node.op_type()}, args));
}
else
{
result = ops[node.op_type()](get_attributes(node), args);
}
// Even no output nodes produce output in migraphx
if(node.output().empty() and result.size() == 1)
{
instructions[name] = result.front();
}
else
{
instructions[name] = ops[node.op_type()](get_attributes(node), args);
assert(node.output().size() >= result.size());
std::transform(result.begin(),
result.end(),
node.output().begin(),
std::inserter(instructions, instructions.end()),
[](auto&& x, auto&& y) { return std::make_pair(y, x); });
}
}
}
......@@ -821,25 +843,24 @@ struct onnx_parser
return result;
}
static std::string get_name(const onnx::NodeProto& node)
{
if(node.name().empty())
{
std::string generated = "migraphx_unnamed_node";
return std::accumulate(node.output().begin(),
node.output().end(),
generated,
[](auto x, auto y) { return x + "_" + y; });
}
return node.name();
}
static node_map get_nodes(const onnx::GraphProto& graph)
{
std::unordered_map<std::string, onnx::NodeProto> result;
std::size_t n = 0;
for(auto&& node : graph.node())
{
result[get_name(node)] = node;
if(node.output().empty())
{
if(node.name().empty())
{
result["migraphx_unamed_node_" + std::to_string(n)] = node;
n++;
}
else
{
result[node.name()] = node;
}
}
for(auto&& output : node.output())
{
result[output] = node;
......
......@@ -84,7 +84,7 @@ struct memory_coloring_impl
{
return is_param(ins) && any_cast<builtin::param>(ins->get_operator()).parameter == "output";
}
bool is_allocate(const instruction_ref ins) { return ins->name() == allocation_op; }
bool is_allocate(const instruction_ref ins) const { return ins->name() == allocation_op; }
static bool is_outline(const instruction_ref ins) { return ins->name() == "@outline"; }
static bool is_literal(const instruction_ref ins) { return ins->name() == "@literal"; }
static bool is_check_context(const instruction_ref ins)
......
......@@ -14,8 +14,9 @@ namespace device {
argument gather(hipStream_t stream,
const migraphx::shape& output_shape,
std::vector<migraphx::argument> args,
std::size_t axis)
int axis)
{
int axis_index = (axis < 0) ? (axis + output_shape.lens().size()) : axis;
visit_all(args.back(), args[0])([&](auto output, auto input) {
std::size_t nelements = output_shape.elements();
args[1].visit([&](auto indices) {
......@@ -26,9 +27,9 @@ argument gather(hipStream_t stream,
hip_tensor_descriptor<ndim> desc_input(input.get_shape());
hip_tensor_descriptor<ndim> desc_output(output.get_shape());
gs_launch(stream, nelements)([=](auto i) {
auto lens = desc_output.multi(i);
lens[axis] = indices_ptr[lens[axis]];
outptr[i] = inptr[desc_input.linear(lens)];
auto lens = desc_output.multi(i);
lens[axis_index] = indices_ptr[lens[axis_index]];
outptr[i] = inptr[desc_input.linear(lens)];
});
});
});
......
......@@ -344,7 +344,7 @@ void apply_conv_bias(context& ctx, program& p, match::matcher_result r)
Op cb{conv_op, input_ins->get_shape(), weights_ins->get_shape(), bias_ins->get_shape()};
// TODO: Insert ws allocation
auto ws = cb.get_workspace(ctx);
(void)ws;
p.replace_instruction(ins, cb, input_ins, weights_ins, old_ws_ins, bias_ins, alloc_ins);
}
......
......@@ -13,7 +13,7 @@ namespace device {
argument gather(hipStream_t stream,
const migraphx::shape& output_shape,
std::vector<migraphx::argument> args,
std::size_t axis);
int axis);
} // namespace device
} // namespace gpu
......
......@@ -113,8 +113,8 @@ TEST_CASE(gather_test)
auto a0 = p.add_literal(migraphx::literal{s, data});
migraphx::shape s_indices{migraphx::shape::int32_type, {1, 2}};
std::vector<int> indices{0, 2};
auto a1 = p.add_literal(migraphx::literal{s_indices, indices});
std::size_t axis = 0;
auto a1 = p.add_literal(migraphx::literal{s_indices, indices});
int axis = 0;
p.add_instruction(migraphx::op::gather{axis}, a0, a1);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
......@@ -133,8 +133,28 @@ TEST_CASE(gather_test)
auto a0 = p.add_literal(migraphx::literal{s, data});
migraphx::shape s_indices{migraphx::shape::int32_type, {1, 2}};
std::vector<int> indices{0, 2};
auto a1 = p.add_literal(migraphx::literal{s_indices, indices});
std::size_t axis = 1;
auto a1 = p.add_literal(migraphx::literal{s_indices, indices});
int axis = 1;
p.add_instruction(migraphx::op::gather{axis}, a0, a1);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> res_data(4 * 5);
std::vector<float> golden = {0.5f, 2.5f, 3.5f, 5.5f, 6.5f, 8.5f};
result.visit([&](auto output) { res_data.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(res_data, golden));
}
{
migraphx::program p;
std::vector<float> data(3 * 3);
std::iota(data.begin(), data.end(), 0.5);
migraphx::shape s{migraphx::shape::float_type, {3, 3}};
auto a0 = p.add_literal(migraphx::literal{s, data});
migraphx::shape s_indices{migraphx::shape::int32_type, {1, 2}};
std::vector<int> indices{0, 2};
auto a1 = p.add_literal(migraphx::literal{s_indices, indices});
int axis = -1;
p.add_instruction(migraphx::op::gather{axis}, a0, a1);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
......
......@@ -942,9 +942,25 @@ struct test_gather
migraphx::shape s{migraphx::shape::float_type, {3, 3}};
migraphx::shape s_indices{migraphx::shape::int32_type, {2, 2}};
std::vector<int> indices{1, 2, 2, 1};
auto a0 = p.add_parameter("data", s);
auto a1 = p.add_literal(migraphx::literal{s_indices, indices});
std::size_t axis = 0;
auto a0 = p.add_parameter("data", s);
auto a1 = p.add_literal(migraphx::literal{s_indices, indices});
int axis = 0;
p.add_instruction(migraphx::op::gather{axis}, a0, a1);
return p;
}
};
struct test_gather_neg_axis
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {3, 3}};
migraphx::shape s_indices{migraphx::shape::int32_type, {2, 2}};
std::vector<int> indices{1, 2, 2, 1};
auto a0 = p.add_parameter("data", s);
auto a1 = p.add_literal(migraphx::literal{s_indices, indices});
int axis = -1;
p.add_instruction(migraphx::op::gather{axis}, a0, a1);
return p;
}
......@@ -1090,4 +1106,6 @@ int main()
verify_program<test_conv_bn_relu_pooling>();
verify_program<test_conv_bn_relu_pooling2>();
verify_program<test_slice>();
verify_program<test_gather>();
verify_program<test_gather_neg_axis>();
}
......@@ -111,7 +111,7 @@ struct lhs_expression
struct capture
{
template <class T>
auto operator->*(const T& x)
auto operator->*(const T& x) const
{
return make_lhs_expression(x);
}
......
......@@ -296,7 +296,7 @@ TEST_CASE(max_test)
auto l0 = p.add_instruction(migraphx::op::max{}, input0, input1);
p.add_instruction(migraphx::op::max{}, l0, input2);
auto prog = migraphx::parse_onnx("max_test.onnx");
migraphx::parse_onnx("max_test.onnx");
}
TEST_CASE(acos_test)
......@@ -319,7 +319,7 @@ TEST_CASE(min_test)
auto l0 = p.add_instruction(migraphx::op::min{}, input0, input1);
p.add_instruction(migraphx::op::min{}, l0, input2);
auto prog = migraphx::parse_onnx("min_test.onnx");
migraphx::parse_onnx("min_test.onnx");
}
TEST_CASE(atan_test)
......@@ -415,9 +415,9 @@ TEST_CASE(shape_test)
TEST_CASE(gather_test)
{
migraphx::program p;
auto l0 = p.add_parameter("data", migraphx::shape{migraphx::shape::float_type, {3, 4, 5, 6}});
auto l1 = p.add_parameter("indices", migraphx::shape{migraphx::shape::int32_type, {2, 3}});
std::size_t axis = 1;
auto l0 = p.add_parameter("data", migraphx::shape{migraphx::shape::float_type, {3, 4, 5, 6}});
auto l1 = p.add_parameter("indices", migraphx::shape{migraphx::shape::int32_type, {2, 3}});
int axis = 1;
p.add_instruction(migraphx::op::gather{axis}, l0, l1);
auto prog = migraphx::parse_onnx("gather_test.onnx");
......@@ -431,8 +431,8 @@ TEST_CASE(shape_gather_test)
auto l1 =
p.add_literal(migraphx::shape{migraphx::shape::int64_type, {3}}, l0->get_shape().lens());
migraphx::shape const_shape{migraphx::shape::int32_type, {1}};
auto l2 = p.add_literal(migraphx::literal{const_shape, {1}});
std::size_t axis = 0;
auto l2 = p.add_literal(migraphx::literal{const_shape, {1}});
int axis = 0;
p.add_instruction(migraphx::op::gather{axis}, l1, l2);
auto prog = migraphx::parse_onnx("shape_gather.onnx");
......@@ -558,7 +558,7 @@ TEST_CASE(group_conv_test)
migraphx::op::convolution op;
op.group = 4;
p.add_instruction(op, l0, l1);
auto prog = migraphx::parse_onnx("group_conv_test.onnx");
migraphx::parse_onnx("group_conv_test.onnx");
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -217,7 +217,7 @@ TEST_CASE(gather)
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
migraphx::shape indices{migraphx::shape::int32_type, {2, 3}};
std::size_t axis = 1;
int axis = 1;
expect_shape(migraphx::shape{migraphx::shape::float_type, {2, 6, 4, 5}},
migraphx::op::gather{axis},
input,
......@@ -227,7 +227,24 @@ TEST_CASE(gather)
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
migraphx::shape indices{migraphx::shape::int32_type, {2, 3}};
std::size_t axis = 4;
int axis = -4;
expect_shape(migraphx::shape{migraphx::shape::float_type, {6, 3, 4, 5}},
migraphx::op::gather{axis},
input,
indices);
}
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
migraphx::shape indices{migraphx::shape::int32_type, {2, 3}};
int axis = 4;
throws_shape(migraphx::op::gather{axis}, input, indices);
}
{
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4, 5}};
migraphx::shape indices{migraphx::shape::int32_type, {2, 3}};
int axis = -5;
throws_shape(migraphx::op::gather{axis}, input, indices);
}
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment