Commit 9a1a92f5 authored by Brian Pickrell's avatar Brian Pickrell
Browse files

work in progress, "test_verify_onnx resize_downsample_f_dyn_test" looks ok. Contains debug code.

parent 19bbfb2b
...@@ -348,6 +348,7 @@ struct parse_resize : op_parser<parse_resize> ...@@ -348,6 +348,7 @@ struct parse_resize : op_parser<parse_resize>
// The indexes we find will be an argument to the gather op. // The indexes we find will be an argument to the gather op.
shape_for_each(static_out_shape, [&](const auto& out_idx_v, size_t out_idx) { shape_for_each(static_out_shape, [&](const auto& out_idx_v, size_t out_idx) {
std::vector<size_t> in_idx(out_idx_v.size()); std::vector<size_t> in_idx(out_idx_v.size());
// printf(" index ");
for(auto ii = 0; ii < fixed_dims.size(); ++ii) for(auto ii = 0; ii < fixed_dims.size(); ++ii)
{ {
// Convert this index by scaling. Inefficient since indexes are repeated // Convert this index by scaling. Inefficient since indexes are repeated
...@@ -355,37 +356,79 @@ struct parse_resize : op_parser<parse_resize> ...@@ -355,37 +356,79 @@ struct parse_resize : op_parser<parse_resize>
fixed_dims[ii], fixed_out_lens[ii], out_idx_v[ii], vec_scale[ii]); fixed_dims[ii], fixed_out_lens[ii], out_idx_v[ii], vec_scale[ii]);
// round the scaled value to an index // round the scaled value to an index
in_idx[ii] = nearest_op(fixed_dims[ii], idx_val); in_idx[ii] = nearest_op(fixed_dims[ii], idx_val);
// printf(" %lu ", in_idx[ii]);
} }
// printf("\n");
// convert a 3-D index to a single index into a vector
// ind has a size equal to the output, each value is a 1D index into the
// input data
ind[out_idx] = static_cast<int64_t>(static_out_shape.index(in_idx)); ind[out_idx] = static_cast<int64_t>(static_out_shape.index(in_idx));
// printf("Maps to %d \n", ind[out_idx]);
}); });
// Create a static shape that's just like the scaled out_lens except we set to 1 the // Create a static shape that's just like the scaled out_lens except we set to 1 the
// 0'th dimension of output, later to be broadcasted to dynamic batch size // 0'th dimension of output, later to be broadcasted to dynamic batch size
out_lens[0] = 1; // out_lens[0] = 1;
shape ind_s{shape::int32_type, out_lens}; // shape ind_s{shape::int32_type, out_lens};
auto ins_ind = info.add_literal(literal(ind_s, ind)); // auto ins_ind = info.add_literal(literal(ind_s, ind));
// define a dynamic shape including the batch dimension // define a dynamic shape including the batch dimension
std::vector<shape::dynamic_dimension> out_dyn_dims(in_dims.size()); // Not using this now; the next block seems to work with gather
out_dyn_dims[0] = in_dims[0]; // std::vector<shape::dynamic_dimension> out_dyn_dims(in_dims.size());
std::transform(fixed_out_lens.begin(), // out_dyn_dims[0] = in_dims[0];
fixed_out_lens.end(), // std::transform(fixed_out_lens.begin(),
out_dyn_dims.begin() + 1, // fixed_out_lens.end(),
[&](auto len) { // out_dyn_dims.begin() + 1,
return shape::dynamic_dimension{len, len}; // [&](auto len) {
}); // return shape::dynamic_dimension{len, len};
shape dyn_out_shape{in_s.type(), out_dyn_dims}; // });
// shape dyn_out_shape{in_s.type(), out_dyn_dims};
// allocate op to create the output argument we want
auto ins_dyn_out = instruction_ref gather_ins{args[0]};
info.add_instruction(make_op("allocate", {{"shape", to_value(dyn_out_shape)}})); // for each static dimension
for(auto ii = 0; ii < fixed_dims.size(); ++ii)
// multibroadcast op to convert static ins_ind to a dynamic shape {
auto ins_dyn = std::vector<size_t> in_idx(fixed_out_lens[ii]);
info.add_instruction(make_op("multibroadcast"), ins_ind, ins_dyn_out); // for range of this dimension's size in output
for(auto len : range(fixed_out_lens[ii]))
return info.add_instruction(make_op("gather", {{"axis", 0}}), args[0], ins_dyn); {
// Convert this index by scaling.
auto idx_val = idx_op(fixed_dims[ii], fixed_out_lens[ii], len, vec_scale[ii+1]);
printf(" ii %d out_lens %lu len %lu vec_scale[ii+1] %f ---> idx_val %f\n", ii, fixed_out_lens[ii],
len, vec_scale[ii+1], idx_val);
// round the scaled value to an index
in_idx[len] = nearest_op(fixed_dims[ii], idx_val);
printf(" in_idx %lu\n", in_idx[len]);
// Put the value into index vector
}
// Create a 1D shape literal
auto index_litA = info.add_literal(literal(migraphx::shape(migraphx::shape::int64_type,
{fixed_out_lens[ii]}), in_idx));
// add a "gather" instruction
gather_ins = info.add_instruction(make_op("gather", {{"axis", 1 + ii}}), gather_ins, index_litA);
printf("***\n");
if( ii == (fixed_dims.size() - 1))
return gather_ins;
}
// If we get here, no gather instructions were added.
MIGRAPHX_THROW("PARSE_RESIZE: inputs didn't have enough dimensions");
// define an index dynamic shape without the batch dimension
// shape index_shape{in_s.type(), fixed_out_lens};
// this has the same data as ins_ind, but 1 less dimension ;lacks the leading dimension of 1
// auto index_lit = info.add_literal(literal(index_shape, ind));
printf("fixed_out_lens: ");
for(size_t aa : fixed_out_lens) printf (" %lu ", aa);printf("\n");
// Axis 0 or 1? look into shape in gather's compute_shape' and results
// return info.add_instruction(make_op("gather", {{"axis", 0}}), args[0], index_lit);
} }
else else
{ {
......
...@@ -5652,7 +5652,7 @@ def resize_downsample_f_test(): ...@@ -5652,7 +5652,7 @@ def resize_downsample_f_test():
dims=scales.shape, dims=scales.shape,
vals=scales.flatten().astype(np.float32)) vals=scales.flatten().astype(np.float32))
X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 35, 60]) X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 4])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, []) Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [])
node = onnx.helper.make_node( node = onnx.helper.make_node(
...@@ -5669,13 +5669,13 @@ def resize_downsample_f_test(): ...@@ -5669,13 +5669,13 @@ def resize_downsample_f_test():
@onnx_test() @onnx_test()
def resize_downsample_f_dyn_test(): def resize_downsample_f_dyn_test():
scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32) scales = np.array([1.0, 1.0, 0.601, 0.601], dtype=np.float32)
scale_tensor = helper.make_tensor(name='scales', scale_tensor = helper.make_tensor(name='scales',
data_type=TensorProto.FLOAT, data_type=TensorProto.FLOAT,
dims=scales.shape, dims=scales.shape,
vals=scales.flatten().astype(np.float32)) vals=scales.flatten().astype(np.float32))
X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [None, 1, 35, 60]) X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [None, 1, 5, 9])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, []) Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [])
node = onnx.helper.make_node( node = onnx.helper.make_node(
......
...@@ -1266,6 +1266,35 @@ TEST_CASE(resize_downsample_f_test) ...@@ -1266,6 +1266,35 @@ TEST_CASE(resize_downsample_f_test)
EXPECT(migraphx::verify::verify_range(result_vector, gold)); EXPECT(migraphx::verify::verify_range(result_vector, gold));
} }
TEST_CASE(resize_downsample_f_dyn_test)
{
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 10};
options.use_dyn_output = true;
auto p = migraphx::parse_onnx("resize_downsample_f_dyn_test.onnx", options);
// migraphx::program p = migraphx::parse_onnx("resize_downsample_f_dyn_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape sx{migraphx::shape::float_type, {2, 1, 5, 9}};
std::vector<float> dx(sx.elements());
std::iota(dx.begin(), dx.end(), 0.1f);
migraphx::parameter_map pp;
pp["X"] = migraphx::argument(sx, dx.data());
p.debug_print();
auto result = p.eval(pp).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
printf("result_vector has size %lu: \n", result_vector.size());
for(float aa : result_vector) printf (" %f ", aa);printf("\n");
std::vector<float> gold = {0.0f, 3.0f};
EXPECT(migraphx::verify::verify_range(result_vector, gold));
}
TEST_CASE(resize_upsample_linear_ac_test) TEST_CASE(resize_upsample_linear_ac_test)
{ {
migraphx::program p = migraphx::parse_onnx("resize_upsample_linear_ac_test.onnx"); migraphx::program p = migraphx::parse_onnx("resize_upsample_linear_ac_test.onnx");
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment