Commit 6a895e3e authored by Khalique's avatar Khalique
Browse files

formatting

parent 2901abd0
......@@ -10,11 +10,8 @@ inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
argument pad(hipStream_t stream,
argument result,
argument arg1,
float value,
std::vector<std::int64_t> pads)
argument
pad(hipStream_t stream, argument result, argument arg1, float value, std::vector<std::int64_t> pads)
{
std::size_t nelements = arg1.get_shape().elements();
......@@ -27,14 +24,14 @@ argument pad(hipStream_t stream,
const auto* inptr = input.data();
hip_tensor_descriptor<ndim> desc_input(input.get_shape());
hip_tensor_descriptor<ndim> desc_output(output.get_shape());
gs_launch(stream, nelements)(
[=](auto i) {
auto idx = desc_input.multi(i);
for(std::size_t j = 0; j < ndim;j++) {
idx[j] += offsets[j];
}
outptr[desc_output.linear(idx)] = inptr[i];
});
gs_launch(stream, nelements)([=](auto i) {
auto idx = desc_input.multi(i);
for(std::size_t j = 0; j < ndim; j++)
{
idx[j] += offsets[j];
}
outptr[desc_output.linear(idx)] = inptr[i];
});
});
});
return result;
......
......@@ -12,10 +12,10 @@ namespace gpu {
namespace device {
argument pad(hipStream_t stream,
argument result,
argument arg1,
float value,
std::vector<std::int64_t> pads);
argument result,
argument arg1,
float value,
std::vector<std::int64_t> pads);
} // namespace device
} // namespace gpu
......
......@@ -15,9 +15,7 @@ shape hip_pad::compute_shape(std::vector<shape> inputs) const
return op.compute_shape(inputs);
}
argument hip_pad::compute(context& ctx,
const shape&,
const std::vector<argument>& args) const
argument hip_pad::compute(context& ctx, const shape&, const std::vector<argument>& args) const
{
return device::pad(ctx.get_stream().get(), args.back(), args.front(), op.value, op.pads);
}
......
......@@ -1286,17 +1286,14 @@ TEST_CASE(min_test)
TEST_CASE(pad_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2,2}};
migraphx::shape s{migraphx::shape::float_type, {2, 2}};
auto l0 = p.add_literal(migraphx::literal{s, {1, 2, 3, 4}});
p.add_instruction(migraphx::op::pad{{1,1,1,1}}, l0);
p.add_instruction(migraphx::op::pad{{1, 1, 1, 1}}, l0);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(16);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold { 0, 0, 0, 0,
0, 1, 2, 0,
0, 3, 4, 0,
0, 0, 0, 0};
std::vector<float> gold{0, 0, 0, 0, 0, 1, 2, 0, 0, 3, 4, 0, 0, 0, 0, 0};
EXPECT(migraphx::verify_range(results_vector, gold));
}
......
......@@ -940,8 +940,8 @@ struct test_pad
{
migraphx::program p;
migraphx::shape s0{migraphx::shape::int32_type, {1, 96, 165, 165}};
std::vector<int64_t> pads = {0,0,0,0,0,0,1,1};
auto l0 = p.add_parameter("x", s0);
std::vector<int64_t> pads = {0, 0, 0, 0, 0, 0, 1, 1};
auto l0 = p.add_parameter("x", s0);
p.add_instruction(migraphx::op::pad{pads}, l0);
return p;
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment