Commit 6816a475 authored by Khalique's avatar Khalique
Browse files

Merge branch 'develop' of https://github.com/ROCmSoftwarePlatform/AMDMIGraphX into rsqrt_op

parents b4b93f00 9680c147
......@@ -37,6 +37,7 @@ add_library(migraphx_device
device/pad.cpp
device/gather.cpp
device/sub.cpp
device/div.cpp
device/clip.cpp
device/reduce_sum.cpp
device/rsqrt.cpp
......
#include <migraphx/gpu/device/div.hpp>
#include <migraphx/gpu/device/nary.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void div(hipStream_t stream, const argument& result, const argument& arg1, const argument& arg2)
{
nary(stream, result, arg1, arg2)([](auto x, auto y) { return x / y; });
}
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
......@@ -8,7 +8,7 @@ namespace device {
void sub(hipStream_t stream, const argument& result, const argument& arg1, const argument& arg2)
{
nary(stream, result, arg1, arg2)([](auto x, auto y) { return y - x; });
nary(stream, result, arg1, arg2)([](auto x, auto y) { return x - y; });
}
} // namespace device
......
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_DIV_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_DIV_HPP
#include <migraphx/argument.hpp>
#include <migraphx/config.hpp>
#include <hip/hip_runtime_api.h>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void div(hipStream_t stream, const argument& result, const argument& arg1, const argument& arg2);
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_DIV_HPP
#define MIGRAPHX_GUARD_RTGLIB_DIV_HPP
#include <migraphx/gpu/oper.hpp>
#include <migraphx/gpu/device/div.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct hip_div : binary_device<hip_div, device::div>
{
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
......@@ -88,7 +88,7 @@ struct binary_device : oper<Derived>
argument compute(context& ctx, const shape&, const std::vector<argument>& args) const
{
F(ctx.get_stream().get(), args[2], args[1], args[0]);
F(ctx.get_stream().get(), args[2], args[0], args[1]);
return args[2];
}
......
......@@ -26,6 +26,7 @@
#include <migraphx/gpu/logsoftmax.hpp>
#include <migraphx/gpu/add.hpp>
#include <migraphx/gpu/sub.hpp>
#include <migraphx/gpu/div.hpp>
#include <migraphx/gpu/exp.hpp>
#include <migraphx/gpu/erf.hpp>
#include <migraphx/gpu/log.hpp>
......@@ -100,6 +101,7 @@ struct miopen_apply
add_generic_op<hip_acos>("acos");
add_generic_op<hip_atan>("atan");
add_generic_op<hip_mul>("mul");
add_generic_op<hip_div>("div");
add_generic_op<hip_max>("max");
add_generic_op<hip_min>("min");
add_generic_op<hip_rsqrt>("rsqrt");
......
......@@ -79,7 +79,8 @@ struct tf_parser
return result;
}
std::vector<size_t> parse_axes(const attribute_map& attributes, const std::string& s) const
std::vector<size_t>
parse_axes(const attribute_map& attributes, const std::string& s, const size_t num_dims) const
{
auto attrs = attributes.at(s).list().i();
std::vector<size_t> axes;
......@@ -87,14 +88,14 @@ struct tf_parser
if(is_nhwc)
{
std::transform(axes.begin(), axes.end(), axes.begin(), [&](size_t axis) {
return parse_axis(axis);
return parse_axis(axis, num_dims);
});
}
return axes;
}
template <class T>
std::vector<T> parse_axes(std::vector<T> axes) const
std::vector<T> parse_axes(std::vector<T> axes, const size_t num_dims) const
{
if(is_nhwc)
{
......@@ -102,7 +103,7 @@ struct tf_parser
std::transform(axes.begin(),
axes.end(),
std::back_inserter(new_axes),
[&](size_t axis) { return parse_axis(axis); });
[&](size_t axis) { return parse_axis(axis, num_dims); });
return new_axes;
}
return axes;
......@@ -117,17 +118,17 @@ struct tf_parser
std::vector<T> new_data(prev_data.size());
for(size_t i = 0; i < new_data.size(); i++)
{
auto new_idx = parse_axis(i);
auto new_idx = parse_axis(i, new_data.size());
new_data.at(new_idx) = prev_data.at(i);
}
prev_data = new_data;
}
template <class T>
T parse_axis(const T& dim) const
T parse_axis(const T& dim, const size_t num_dims) const
{
T new_dim = dim;
if(is_nhwc)
if(is_nhwc and num_dims >= 4)
{
switch(dim)
{
......@@ -155,6 +156,7 @@ struct tf_parser
add_generic_op("Relu6", op::clip{6.0, 0.0});
add_generic_op("Rsqrt", op::rsqrt{});
add_generic_op("Tanh", op::tanh{});
add_generic_op("StopGradient", op::identity{});
add_binary_op("Add", op::add{});
add_binary_op("Mul", op::mul{});
......@@ -166,6 +168,7 @@ struct tf_parser
add_mem_op("Const", &tf_parser::parse_constant);
add_mem_op("Conv2D", &tf_parser::parse_conv);
add_mem_op("DepthwiseConv2dNative", &tf_parser::parse_depthwiseconv);
add_mem_op("ExpandDims", &tf_parser::parse_expanddims, false);
add_mem_op("FusedBatchNorm", &tf_parser::parse_batchnorm);
add_mem_op("MatMul", &tf_parser::parse_matmul, false);
add_mem_op("MaxPool", &tf_parser::parse_pooling);
......@@ -491,6 +494,25 @@ struct tf_parser
return prog.add_instruction(op, {l0, new_weights});
}
instruction_ref
parse_expanddims(const std::string&, const attribute_map&, std::vector<instruction_ref> args)
{
std::vector<size_t> input_dims = args[0]->get_shape().lens();
std::vector<int64_t> new_dims(input_dims.begin(), input_dims.end());
size_t num_dims = input_dims.size();
int32_t dim = args[1]->eval().at<int32_t>();
if(dim < 0)
{
new_dims.insert(new_dims.begin() + (num_dims + dim + 1), 1);
}
else
{
new_dims.insert(new_dims.begin() + dim, 1);
}
return prog.add_instruction(op::reshape{new_dims}, args[0]);
}
instruction_ref
parse_matmul(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
{
......@@ -520,11 +542,12 @@ struct tf_parser
instruction_ref
parse_mean(const std::string&, attribute_map attributes, std::vector<instruction_ref> args)
{
auto axes = parse_axes(args[1]->eval().get<int32_t>().to_vector());
bool keep_dims = attributes.at("keep_dims").b();
std::vector<int32_t> hw_axes{2, 3};
// check if conditions for GlobalAvgPool are met
auto lens = args[0]->get_shape().lens();
auto axes = parse_axes(args[1]->eval().get<int32_t>().to_vector(), lens.size());
if(axes == hw_axes and lens.size() == 4)
{
op::pooling op{"average"};
......@@ -695,14 +718,15 @@ struct tf_parser
std::vector<instruction_ref> args)
{
op::squeeze op;
auto axes = attributes.at("squeeze_dims").list().i();
auto input_dims = args[0]->get_shape().lens();
auto axes = attributes.at("squeeze_dims").list().i();
copy(axes, std::back_inserter(op.axes));
auto args0_dims = args[0]->get_shape().lens();
if(op.axes.empty()) // no squeeze_dims provided, remove any dim that equals 1
{
for(size_t i = 0; i < args0_dims.size(); i++)
for(size_t i = 0; i < input_dims.size(); i++)
{
if(args0_dims.at(i) == 1)
if(input_dims.at(i) == 1)
{
op.axes.push_back(i);
}
......
......@@ -581,6 +581,38 @@ struct test_sub2 : verify_program<test_sub2>
}
};
struct test_div : verify_program<test_div>
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {3}};
auto x = p.add_parameter("x", s);
auto y = p.add_parameter("y", s);
auto z = p.add_parameter("z", s);
auto diff = p.add_instruction(migraphx::op::div{}, x, y);
p.add_instruction(migraphx::op::div{}, diff, z);
return p;
}
};
struct test_div2 : verify_program<test_div2>
{
migraphx::program create_program() const
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
migraphx::shape b{migraphx::shape::float_type, {3}};
auto x = p.add_parameter("x", s);
auto y = p.add_parameter("y", s);
auto z = p.add_parameter("z", b);
auto zb = p.add_instruction(migraphx::op::broadcast{1, s.lens()}, z);
auto diff = p.add_instruction(migraphx::op::div{}, x, y);
p.add_instruction(migraphx::op::div{}, diff, zb);
return p;
}
};
struct test_softmax1 : verify_program<test_softmax1>
{
migraphx::program create_program() const
......
:
0 Placeholder*
dtype0*
shape:
(
stopgradient StopGradient0*
T0"
\ No newline at end of file
......@@ -159,6 +159,31 @@ TEST_CASE(depthwiseconv_test)
EXPECT(p == prog);
}
TEST_CASE(expanddims_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 3, 4}});
p.add_literal(0);
p.add_instruction(migraphx::op::reshape{{1, 2, 3, 4}}, l0);
auto prog = optimize_tf("expanddims_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(expanddims_test_neg_dims)
{
// this check makes sure the pb parses negative dim value correctly
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 3, 4}});
p.add_literal(-1);
p.add_instruction(migraphx::op::reshape{{2, 3, 4, 1}}, l0);
auto prog = optimize_tf("expanddims_neg_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(identity_test)
{
migraphx::program p;
......@@ -359,6 +384,16 @@ TEST_CASE(squeeze_test)
EXPECT(p == prog);
}
TEST_CASE(stopgradient_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 16, 16}});
p.add_instruction(migraphx::op::identity{}, l0);
auto prog = optimize_tf("stopgradient_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(stridedslice_test)
{
migraphx::program p;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment