Commit 988bf26b authored by turneram's avatar turneram
Browse files

Merge remote-tracking branch 'origin/jit-softmax' into HEAD

parents f99a3036 bb0fff52
......@@ -9,6 +9,7 @@ namespace migraphx {
template <class Lens, class Strides>
struct shape
{
using shape_type = shape;
using index_array = typename Lens::base_array;
Lens lens = {};
Strides strides = {};
......
#ifndef MIGRAPHX_GUARD_KERNELS_SOFTMAX_HPP
#define MIGRAPHX_GUARD_KERNELS_SOFTMAX_HPP
#include <migraphx/kernels/reduce.hpp>
#include <migraphx/kernels/ops.hpp>
namespace migraphx {
template <index_int Axis, class Input, class Output>
__device__ void softmax(Input input, Output output)
{
reduce::block::run<reduce::with_axis<Input, Axis>>([&](auto, auto r) {
auto batch_max = r.reduce(op::max{}, lowest{}, op::id{})(input);
auto batch_sum =
r.reduce(op::sum{}, 0, [&](auto x) { return migraphx::exp(x - batch_max); })(input);
r.inner([&](auto& y, auto x) { y = migraphx::exp(x - batch_max) / batch_sum; })(output,
input);
});
}
} // namespace migraphx
#endif // MIGRAPHX_GUARD_KERNELS_SOFTMAX_HPP
......@@ -29,11 +29,23 @@ struct tensor_view
constexpr Shape get_shape() const { return Shape{}; }
constexpr auto size() const { return get_shape().elements(); }
template <class U>
constexpr T& operator[](U i) const
struct index_to_offset
{
MIGRAPHX_ASSERT(get_shape().index(i) < get_shape().element_space());
return x[get_shape().index(i)];
index_int offset;
template <class U>
constexpr index_to_offset(U i) : offset(Shape{}.index(i))
{
}
};
constexpr T& operator[](MIGRAPHX_CAPTURE_SOURCE_LOCATION(index_to_offset) i) const
{
index_to_offset ito = i;
MIGRAPHX_WARN(ito.offset < get_shape().element_space(),
i,
"Out of bounds access at offset: ",
ito.offset);
return x[ito.offset];
}
constexpr T* data() const { return x; }
......
......@@ -4,6 +4,7 @@
#include <migraphx/kernels/types.hpp>
#include <migraphx/kernels/integral_constant.hpp>
#include <migraphx/kernels/functional.hpp>
#include <migraphx/kernels/debug.hpp>
namespace migraphx {
......
......@@ -189,7 +189,6 @@ struct miopen_apply
add_extend_op("rnn_var_sl_shift_output");
add_extend_op("rnn_var_sl_shift_sequence");
add_extend_op("scatter_none");
add_extend_op("softmax");
add_extend_op("topk");
add_batch_norm_inference_op();
......
function(add_api_test TEST_NAME TEST_SRC TEST_DIR)
set(NAME test_api_${TEST_NAME})
add_executable(${NAME} EXCLUDE_FROM_ALL ${TEST_SRC})
......@@ -10,6 +9,7 @@ function(add_api_test TEST_NAME TEST_SRC TEST_DIR)
add_dependencies(check ${NAME})
endfunction()
add_api_test(array_base test_array_base.cpp ${TEST_ONNX_DIR})
add_api_test(assign test_assign.cpp ${TEST_ONNX_DIR})
add_api_test(custom_op test_custom_op.cpp ${TEST_ONNX_DIR})
add_api_test(compile_options test_compile_options.cpp ${TEST_ONNX_DIR})
......@@ -19,7 +19,8 @@ add_api_test(ref test_cpu.cpp ${TEST_ONNX_DIR})
add_api_test(save_load test_save_load.cpp ${TEST_ONNX_DIR})
add_api_test(op test_op_construct.cpp ${TEST_ONNX_DIR})
add_api_test(tf_parser test_tf_parser.cpp ${TEST_TF_DIR})
# GPU-based tests
if(MIGRAPHX_ENABLE_GPU)
add_api_test(gpu test_gpu.cpp ${TEST_ONNX_DIR})
# GPU-based tests
target_link_libraries(test_api_gpu migraphx_gpu)
endif()
#include <migraphx/migraphx.hpp>
#include "test.hpp"
struct array2 : migraphx::array_base<array2>
{
std::vector<int> v;
array2() = default;
array2(std::initializer_list<int> x) : v(x) {}
std::size_t size() const { return v.size(); }
int operator[](std::size_t i) const { return v[i]; }
};
TEST_CASE(iterators)
{
array2 a = {1, 2, 3};
EXPECT(bool{std::equal(a.begin(), a.end(), a.v.begin())});
}
TEST_CASE(front_back)
{
array2 a = {1, 2, 3};
EXPECT(a.front() == 1);
EXPECT(a.back() == 3);
}
TEST_CASE(empty)
{
array2 a = {1, 2, 3};
EXPECT(not a.empty());
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
#include <numeric>
#include <hip/hip_runtime_api.h>
#include <migraphx/migraphx.h>
#include <migraphx/migraphx.hpp>
#include "test.hpp"
......@@ -38,6 +39,7 @@ TEST_CASE(load_and_run_ctx)
pp.add(name, migraphx::argument::generate(param_shapes[name]));
}
auto ctx = p.experimental_get_context();
EXPECT(ctx.get_queue<hipStream_t>() != nullptr);
p.eval(pp);
ctx.finish();
}
......
......@@ -60,16 +60,16 @@ TEST_CASE(if_then_else_op)
p.compile(migraphx::target("ref"));
auto outputs =
p.eval({{"cond", migraphx::argument(cond_s, &cond)}, {"x", x_arg}, {"y", y_arg}});
return outputs;
return outputs[0];
};
// then branch
auto then_res = run_prog(true);
CHECK(bool{then_res[0] == x_arg});
CHECK(bool{then_res == x_arg});
// else branch
auto else_res = run_prog(false);
CHECK(bool{else_res[0] == y_arg});
CHECK(bool{else_res == y_arg});
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
gathernd_batch_dims_test:
/
data
indicesy"GatherND*
batch_dimsgathernd_batch_dims_testZ
data



Z
indices


b
y


B
\ No newline at end of file
 gathernd_test:q

data
indicesy"GatherND gathernd_testZ
data


Z
indices


b
y

B
\ No newline at end of file
......@@ -1686,6 +1686,34 @@ def fastgelu_test():
return ([node], [x], [y])
def gathernd_test():
x = helper.make_tensor_value_info('data', TensorProto.FLOAT, [2, 2])
i = helper.make_tensor_value_info('indices', TensorProto.INT64, [2, 2])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2])
node = onnx.helper.make_node('GatherND',
inputs=['data', 'indices'],
outputs=['y'])
return ([node], [x, i], [y])
@onnx_test
def gathernd_batch_dims_test():
x = helper.make_tensor_value_info('data', TensorProto.FLOAT, [2, 2, 2])
i = helper.make_tensor_value_info('indices', TensorProto.INT64, [2, 1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 2])
node = onnx.helper.make_node(
'GatherND',
inputs=['data', 'indices'],
outputs=['y'],
batch_dims=1,
)
return ([node], [x, i], [y])
@onnx_test
def gemm_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [5, 7])
......
......@@ -1582,6 +1582,31 @@ TEST_CASE(gather_elements_axis1_test)
EXPECT(p == prog);
}
TEST_CASE(gathernd_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("data", migraphx::shape{migraphx::shape::float_type, {2, 2}});
auto l1 = mm->add_parameter("indices", migraphx::shape{migraphx::shape::int64_type, {2, 2}});
mm->add_instruction(migraphx::make_op("gathernd"), l0, l1);
auto prog = optimize_onnx("gathernd_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(gathernd_batch_dims_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("data", migraphx::shape{migraphx::shape::float_type, {2, 2, 2}});
auto l1 = mm->add_parameter("indices", migraphx::shape{migraphx::shape::int64_type, {2, 1}});
int batch_dims = 1;
mm->add_instruction(migraphx::make_op("gathernd", {{"batch_dims", batch_dims}}), l0, l1);
auto prog = optimize_onnx("gathernd_batch_dims_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(gemm_test)
{
migraphx::program p;
......
......@@ -268,9 +268,6 @@ def create_backend_test(testname=None, target_device=None):
backend_test.exclude(r'test_expand_shape_model2_cpu')
backend_test.exclude(r'test_expand_shape_model3_cpu')
backend_test.exclude(r'test_expand_shape_model4_cpu')
backend_test.exclude(r'test_gathernd_example_float32_cpu')
backend_test.exclude(r'test_gathernd_example_int32_batch_dim1_cpu')
backend_test.exclude(r'test_gathernd_example_int32_cpu')
backend_test.exclude(r'test_identity_sequence_cpu')
backend_test.exclude(r'test_maxpool_2d_uint8_cpu')
backend_test.exclude(r'test_negative_log_likelihood_loss_*')
......
......@@ -1653,6 +1653,203 @@ TEST_CASE(gather_test)
}
}
TEST_CASE(gathernd_test)
{
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ds{migraphx::shape::float_type, {2, 2}};
migraphx::shape is{migraphx::shape::int64_type, {2, 2}};
std::vector<float> data_vec(2 * 2);
std::iota(data_vec.begin(), data_vec.end(), 0);
std::vector<int64_t> indices_vec{0, 0, 1, 1};
auto data = mm->add_literal(migraphx::literal{ds, data_vec});
auto indices = mm->add_literal(migraphx::literal{is, indices_vec});
mm->add_instruction(migraphx::make_op("gathernd"), data, indices);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> res_data{};
std::vector<float> gold{0, 3};
result.visit([&](auto output) { res_data.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(res_data, gold));
}
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ds{migraphx::shape::float_type, {2, 2}};
migraphx::shape is{migraphx::shape::int64_type, {2, 1}};
std::vector<float> data_vec(2 * 2);
std::iota(data_vec.begin(), data_vec.end(), 0);
std::vector<int64_t> indices_vec{1, 0};
auto data = mm->add_literal(migraphx::literal{ds, data_vec});
auto indices = mm->add_literal(migraphx::literal{is, indices_vec});
mm->add_instruction(migraphx::make_op("gathernd"), data, indices);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> res_data{};
std::vector<float> gold{2, 3, 0, 1};
result.visit([&](auto output) { res_data.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(res_data, gold));
}
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ds{migraphx::shape::float_type, {2, 3, 1}};
migraphx::shape is{migraphx::shape::int64_type, {2, 2, 1}};
std::vector<float> data_vec(2 * 3 * 1);
std::iota(data_vec.begin(), data_vec.end(), 0);
std::vector<int64_t> indices_vec{1, 0, 0, 1};
auto data = mm->add_literal(migraphx::literal{ds, data_vec});
auto indices = mm->add_literal(migraphx::literal{is, indices_vec});
mm->add_instruction(migraphx::make_op("gathernd"), data, indices);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> res_data{};
std::vector<float> gold{3, 4, 5, 0, 1, 2, 0, 1, 2, 3, 4, 5};
result.visit([&](auto output) { res_data.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(res_data, gold));
}
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ds{migraphx::shape::float_type, {2, 3, 2, 3}};
migraphx::shape is{migraphx::shape::int64_type, {2, 2, 2}};
std::vector<float> data_vec(2 * 3 * 2 * 3);
std::iota(data_vec.begin(), data_vec.end(), 0);
std::vector<int64_t> indices_vec{0, 0, 0, 1, 0, 0, 0, 1};
const int batch_dims = 1;
auto data = mm->add_literal(migraphx::literal{ds, data_vec});
auto indices = mm->add_literal(migraphx::literal{is, indices_vec});
mm->add_instruction(
migraphx::make_op("gathernd", {{"batch_dims", batch_dims}}), data, indices);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> res_data{};
std::vector<float> gold{0, 1, 2, 3, 4, 5, 18, 19, 20, 21, 22, 23};
result.visit([&](auto output) { res_data.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(res_data, gold));
}
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ds{migraphx::shape::float_type, {2, 3, 1, 3}};
migraphx::shape is{migraphx::shape::int64_type, {2, 3, 2}};
std::vector<float> data_vec(2 * 3 * 1 * 3);
std::iota(data_vec.begin(), data_vec.end(), 0);
std::vector<int64_t> indices_vec{0, 0, 0, 1, 0, 2, 0, 2, 0, 1, 0, 0};
const int batch_dims = 2;
auto data = mm->add_literal(migraphx::literal{ds, data_vec});
auto indices = mm->add_literal(migraphx::literal{is, indices_vec});
mm->add_instruction(
migraphx::make_op("gathernd", {{"batch_dims", batch_dims}}), data, indices);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> res_data{};
std::vector<float> gold{0, 4, 8, 11, 13, 15};
result.visit([&](auto output) { res_data.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(res_data, gold));
}
{
// k > r - batch_dims
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ds{migraphx::shape::float_type, {2, 3, 1, 3}};
migraphx::shape is{migraphx::shape::int64_type, {2, 3, 3}};
std::vector<float> data_vec(2 * 3 * 1 * 3);
std::iota(data_vec.begin(), data_vec.end(), 0);
std::vector<int64_t> indices_vec(2 * 3 * 3, 0);
const int batch_dims = 2;
auto data = mm->add_literal(migraphx::literal{ds, data_vec});
auto indices = mm->add_literal(migraphx::literal{is, indices_vec});
EXPECT(test::throws([&] {
mm->add_instruction(
migraphx::make_op("gathernd", {{"batch_dims", batch_dims}}), data, indices);
}));
}
}
TEST_CASE(gathernd_negative_index_test)
{
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ds{migraphx::shape::float_type, {2, 2}};
migraphx::shape is{migraphx::shape::int64_type, {2, 1, 1}};
std::vector<float> data_vec(2 * 2);
std::iota(data_vec.begin(), data_vec.end(), 0);
std::vector<int64_t> indices_vec{-1, 0};
auto data = mm->add_literal(migraphx::literal{ds, data_vec});
auto indices = mm->add_literal(migraphx::literal{is, indices_vec});
mm->add_instruction(migraphx::make_op("gathernd"), data, indices);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> res_data{};
std::vector<float> gold{2, 3, 0, 1};
result.visit([&](auto output) { res_data.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(res_data, gold));
}
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ds{migraphx::shape::float_type, {2, 2}};
migraphx::shape is{migraphx::shape::int64_type, {2, 1, 1}};
std::vector<float> data_vec(2 * 2);
std::iota(data_vec.begin(), data_vec.end(), 0);
std::vector<int64_t> indices_vec{-3, 0};
auto data = mm->add_literal(migraphx::literal{ds, data_vec});
auto indices = mm->add_literal(migraphx::literal{is, indices_vec});
mm->add_instruction(migraphx::make_op("gathernd"), data, indices);
p.compile(migraphx::ref::target{});
EXPECT(test::throws([&] { p.eval({}); }));
}
}
TEST_CASE(globalavgpool_test)
{
migraphx::program p;
......
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_gathernd_batch_dims_1 : verify_program<test_gathernd_batch_dims_1>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ds{migraphx::shape::float_type, {2, 3, 2, 3}};
migraphx::shape is{migraphx::shape::int64_type, {2, 3, 2}};
std::vector<int64_t> indices{1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0};
auto a0 = mm->add_parameter("data", ds);
auto a1 = mm->add_literal(migraphx::literal{is, indices});
int batch_dims = 1;
mm->add_instruction(migraphx::make_op("gathernd", {{"batch_dims", batch_dims}}), a0, a1);
return p;
}
};
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_gathernd_batch_dims_2 : verify_program<test_gathernd_batch_dims_2>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ds{migraphx::shape::float_type, {2, 3, 1, 3}};
migraphx::shape is{migraphx::shape::int64_type, {2, 3, 2}};
std::vector<int64_t> indices{0, 0, 0, 1, 0, 2, 0, 2, 0, 1, 0, 0};
auto a0 = mm->add_parameter("data", ds);
auto a1 = mm->add_literal(migraphx::literal{is, indices});
int batch_dims = 2;
mm->add_instruction(migraphx::make_op("gathernd", {{"batch_dims", batch_dims}}), a0, a1);
return p;
}
};
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_gathernd_default : verify_program<test_gathernd_default>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ds{migraphx::shape::float_type, {2, 2}};
migraphx::shape is{migraphx::shape::int64_type, {2, 2}};
std::vector<int64_t> indices{0, 0, 1, 1};
auto a0 = mm->add_parameter("data", ds);
auto a1 = mm->add_literal(migraphx::literal{is, indices});
mm->add_instruction(migraphx::make_op("gathernd"), a0, a1);
return p;
}
};
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_gathernd_negative_indices : verify_program<test_gathernd_negative_indices>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ds{migraphx::shape::float_type, {2, 2}};
migraphx::shape is{migraphx::shape::int64_type, {2, 1, 1}};
std::vector<int64_t> indices{-1, 0};
auto a0 = mm->add_parameter("data", ds);
auto a1 = mm->add_literal(migraphx::literal{is, indices});
int batch_dims = 1;
mm->add_instruction(migraphx::make_op("gathernd", {{"batch_dims", batch_dims}}), a0, a1);
return p;
}
};
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment