Unverified Commit fdc3f00a authored by Charlie Lin's avatar Charlie Lin Committed by GitHub
Browse files

Refactor non-standard literal construction (#1443)

Fix problem with the contiguous operator constructing non-standard shape literals.  A non-standard literal will almost never be used, since a literal is known at compile time.  Added some comments on the intended behavior:

- literal{shape, vector} constructor with a non-standard shape is intended to keep the same ordering as the given vector. The data buffer will be populated such that when the non-standard indexing is used the original order is as given.
- literal{shape, argument} constructor directly copies the data buffer from the argument
- Changed non-standard literal fill() to use tensor_view iterators as it handles non-standard shapes now
- Changed the contiguous ref_ops_test to be more helpful
parent 0e40ebaa
...@@ -80,6 +80,7 @@ struct literal : raw_data<literal> ...@@ -80,6 +80,7 @@ struct literal : raw_data<literal>
fill(start, end); fill(start, end);
} }
// Directly copies buffer of x
template <class T, MIGRAPHX_REQUIRES(sizeof(T) == 1)> template <class T, MIGRAPHX_REQUIRES(sizeof(T) == 1)>
literal(const shape& s, T* x) : buffer(make_shared_array<char>(s.bytes())), m_shape(s) literal(const shape& s, T* x) : buffer(make_shared_array<char>(s.bytes())), m_shape(s)
{ {
...@@ -107,25 +108,15 @@ struct literal : raw_data<literal> ...@@ -107,25 +108,15 @@ struct literal : raw_data<literal>
std::shared_ptr<char> buffer; std::shared_ptr<char> buffer;
shape m_shape; shape m_shape;
// Keeps the same data ordering as the given container
template <class Iterator> template <class Iterator>
void fill(Iterator start, Iterator end) void fill(Iterator start, Iterator end)
{ {
assert(std::distance(start, end) == m_shape.elements()); assert(std::distance(start, end) == m_shape.elements());
if(m_shape.standard()) m_shape.visit_type([&](auto as) {
{ auto output = make_view(m_shape, as.from(buffer.get()));
m_shape.visit_type([&](auto as) { std::copy(start, end, as.from(buffer.get())); }); std::copy(start, end, output.begin());
} });
else
{
auto it = start;
m_shape.visit_type([&](auto as) {
auto output = make_view(m_shape, as.from(buffer.get()));
shape_for_each(output.get_shape(), [&](const auto& idx) {
output(idx.begin(), idx.end()) = *it; // NOLINT(bugprone-signed-char-misuse)
it++;
});
});
}
} }
}; };
......
...@@ -31,6 +31,9 @@ ...@@ -31,6 +31,9 @@
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
/**
* Iterates the given function over the indices from the shape in order.
*/
template <class F> template <class F>
void shape_for_each(const migraphx::shape& s, F f) void shape_for_each(const migraphx::shape& s, F f)
{ {
...@@ -51,7 +54,6 @@ void shape_for_each(const migraphx::shape& s, F f) ...@@ -51,7 +54,6 @@ void shape_for_each(const migraphx::shape& s, F f)
call(indices); call(indices);
} }
} }
} // namespace MIGRAPHX_INLINE_NS } // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx } // namespace migraphx
......
...@@ -49,6 +49,25 @@ TEST_CASE(literal_test) ...@@ -49,6 +49,25 @@ TEST_CASE(literal_test)
EXPECT(l4.empty()); EXPECT(l4.empty());
} }
TEST_CASE(literal_nstd_shape_vector)
{
migraphx::shape nstd_shape{migraphx::shape::float_type, {1, 3, 2, 2}, {12, 1, 6, 3}};
std::vector<float> data(12);
std::iota(data.begin(), data.end(), 0);
auto l0 = migraphx::literal{nstd_shape, data};
// check data buffer is read in correctly
std::vector<float> expected_buffer = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11};
const auto* start = reinterpret_cast<const float*>(l0.data());
std::vector<float> l0_data{start, start + 12};
EXPECT(l0_data == expected_buffer);
// check that using visit() (that uses a tensor view) gives data in correct order
std::vector<float> results_vector(12);
l0.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(results_vector == data);
}
TEST_CASE(literal_os1) TEST_CASE(literal_os1)
{ {
migraphx::literal l{1}; migraphx::literal l{1};
......
...@@ -944,11 +944,38 @@ TEST_CASE(contiguous_test) ...@@ -944,11 +944,38 @@ TEST_CASE(contiguous_test)
p.compile(migraphx::ref::target{}); p.compile(migraphx::ref::target{});
auto result = p.eval({}).back(); auto result = p.eval({}).back();
std::vector<size_t> new_strides = {12, 4, 2, 1};
EXPECT(result.get_shape().strides() == new_strides);
std::vector<float> results_vector(12); std::vector<float> results_vector(12);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); }); result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<size_t> new_lens = {1, 3, 2, 2};
std::vector<size_t> new_strides = {12, 1, 6, 3}; std::vector<float> gold = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
EXPECT(migraphx::verify_range(results_vector, data)); EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(contiguous_param_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape a_shape{migraphx::shape::float_type, {1, 3, 2, 2}, {12, 1, 6, 3}};
auto a = mm->add_parameter("X", a_shape);
mm->add_instruction(migraphx::make_op("contiguous"), a);
p.compile(migraphx::ref::target{});
std::vector<float> data(12);
std::iota(data.begin(), data.end(), 0);
migraphx::parameter_map params;
params["X"] = migraphx::argument(a_shape, data.data());
auto result = p.eval(params).back();
std::vector<size_t> new_strides = {12, 4, 2, 1};
EXPECT(result.get_shape().strides() == new_strides);
std::vector<float> results_vector(12);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0, 3, 6, 9, 1, 4, 7, 10, 2, 5, 8, 11};
EXPECT(migraphx::verify_range(results_vector, gold));
} }
TEST_CASE(contiguous_dyn_test) TEST_CASE(contiguous_dyn_test)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment