Commit 38163d54 authored by turneram's avatar turneram
Browse files

Merge remote-tracking branch 'origin/develop' into bert-attention-no-transpose-ops

parents 7e316254 5bf4dee6
......@@ -145,7 +145,7 @@ TEST_CASE(conv)
const std::string mlir_output = R"__migraphx__(
module {
func @main(%arg0: tensor<2x8x3x3xf32>, %arg1: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {kernel = "mixr"} {
%0 = migraphx.convolution(%arg1, %arg0) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
%0 = migraphx.convolution(%arg1, %arg0) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1], use_dynamic_same_auto_pad = 0 : i64} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
return %0 : tensor<1x2x2x2xf32>
}
}
......@@ -168,7 +168,7 @@ TEST_CASE(conv_add_relu)
const std::string mlir_output = R"__migraphx__(
module {
func @main(%arg0: tensor<1x2x2x2xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {kernel = "mixr"} {
%0 = migraphx.convolution(%arg2, %arg1) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
%0 = migraphx.convolution(%arg2, %arg1) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1], use_dynamic_same_auto_pad = 0 : i64} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
%1 = migraphx.add(%0, %arg0) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
%2 = migraphx.relu(%1) : (tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
return %2 : tensor<1x2x2x2xf32>
......
......@@ -647,6 +647,46 @@ def constant_scalar_test():
return ([node], [], [y])
@onnx_test
def constant_empty_scalar_int64_test():
x = np.array([]).astype(np.int64)
y = helper.make_tensor_value_info('0', TensorProto.INT64, [0])
node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['0'],
value=onnx.helper.make_tensor(
name='one_element_tensor',
data_type=TensorProto.INT64,
dims=x.shape,
vals=x.flatten().astype(np.int64),
),
)
return ([node], [], [y])
@onnx_test
def constant_one_val_int64_test():
x = np.array([1]).astype(np.int64)
y = helper.make_tensor_value_info('0', TensorProto.INT64, [0])
node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['0'],
value=onnx.helper.make_tensor(
name='empty_tensor',
data_type=TensorProto.INT64,
dims=x.shape,
vals=x.flatten().astype(np.int64),
),
)
return ([node], [], [y])
@onnx_test
def const_of_shape_empty_input_test():
tensor_val = onnx.helper.make_tensor('value', onnx.TensorProto.INT64, [1],
......@@ -872,6 +912,96 @@ def conv_bn_relu_maxpool_test():
return ([node0, node1, node2, node3], [x, y, z, m, n, k, l], [out])
@onnx_test
def conv_dynamic_batch_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [None, 3, 5, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 3, 3])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT,
[None, 1, 3, 3])
node = onnx.helper.make_node('Conv', inputs=['0', '1'], outputs=['2'])
return ([node], [x, y], [out])
@onnx_test
def conv_dynamic_img_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[1, 3, None, None])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 3, 3])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT,
[1, 1, None, None])
node = onnx.helper.make_node('Conv', inputs=['0', '1'], outputs=['2'])
return ([node], [x, y], [out])
@onnx_test
def conv_dynamic_weights_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT,
[1, 3, None, None])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT,
[1, 1, None, None])
node = onnx.helper.make_node('Conv', inputs=['0', '1'], outputs=['2'])
return ([node], [x, y], [out])
@onnx_test
def conv_dynamic_img_and_weights_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[1, 3, None, None])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT,
[1, 3, None, None])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT,
[1, 1, None, None])
node = onnx.helper.make_node('Conv', inputs=['0', '1'], outputs=['2'])
return ([node], [x, y], [out])
@onnx_test
def conv_dynamic_batch_same_upper_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [None, 3, 5, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 3, 3])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1, 1, 5, 5])
node = onnx.helper.make_node('Conv',
inputs=['0', '1'],
outputs=['2'],
auto_pad='SAME_UPPER')
return ([node], [x, y], [out])
@onnx_test
def conv_dynamic_img_same_upper_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[1, 3, None, None])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 3, 3])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT,
[1, 1, None, None])
node = onnx.helper.make_node('Conv',
inputs=['0', '1'],
outputs=['2'],
auto_pad='SAME_UPPER')
return ([node], [x, y], [out])
@onnx_test
def conv_dynamic_kernel_same_lower_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT,
[1, 3, None, None])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1, 1, 5, 5])
node = onnx.helper.make_node('Conv',
inputs=['0', '1'],
outputs=['2'],
auto_pad='SAME_LOWER')
return ([node], [x, y], [out])
@onnx_test
def conv_relu_maxpool_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 32, 32])
......
......@@ -636,11 +636,31 @@ TEST_CASE(constant_scalar_test)
EXPECT(p == prog);
}
TEST_CASE(constant_empty_scalar_int64_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal(migraphx::literal{migraphx::shape::int64_type});
auto prog = optimize_onnx("constant_empty_scalar_int64_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(constant_one_val_int64_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal(migraphx::literal{migraphx::shape{migraphx::shape::int64_type, {1}}, {1}});
auto prog = optimize_onnx("constant_one_val_int64_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(const_of_shape_empty_input_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal(migraphx::literal());
mm->add_literal(migraphx::literal(migraphx::shape::int32_type));
migraphx::shape s(migraphx::shape::int64_type, {1}, {0});
std::vector<int64_t> vec(s.elements(), 10);
mm->add_literal(migraphx::literal(s, vec));
......@@ -796,6 +816,170 @@ TEST_CASE(conv_bn_relu_maxpool_test)
EXPECT(p == prog);
}
TEST_CASE(conv_dynamic_batch_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0", {migraphx::shape::float_type, {{1, 6, 0}, {3, 3, 0}, {5, 5, 0}, {5, 5, 0}}});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1, 3, 3, 3}});
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
l0,
l1);
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 6, 0};
auto prog = migraphx::parse_onnx("conv_dynamic_batch_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_dynamic_img_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0", {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {5, 10, 0}, {5, 10, 0}}});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1, 3, 3, 3}});
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
l0,
l1);
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {5, 10, 0};
auto prog = migraphx::parse_onnx("conv_dynamic_img_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_dynamic_weights_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", {migraphx::shape::float_type, {1, 3, 5, 5}});
auto l1 = mm->add_parameter(
"1", {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {2, 4, 0}, {2, 4, 0}}});
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
l0,
l1);
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {2, 4, 0};
auto prog = migraphx::parse_onnx("conv_dynamic_weights_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_dynamic_img_and_weights_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0", {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {5, 10, 0}, {5, 10, 0}}});
auto l1 = mm->add_parameter(
"1", {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {2, 4, 0}, {2, 4, 0}}});
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
l0,
l1);
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {5, 10, 0};
options.map_dyn_input_dims["1"] = {{1, 1, 0}, {3, 3, 0}, {2, 4, 0}, {2, 4, 0}};
auto prog = migraphx::parse_onnx("conv_dynamic_img_and_weights_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_dynamic_batch_same_upper)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0", {migraphx::shape::float_type, {{1, 10, 0}, {3, 3, 0}, {5, 5, 0}, {5, 5, 0}}});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1, 3, 3, 3}});
auto c0 =
mm->add_instruction(migraphx::make_op("convolution",
{{"padding", {1, 1, 1, 1}},
{"stride", {1, 1}},
{"dilation", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::same},
{"use_dynamic_same_auto_pad", false}}),
l0,
l1);
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 10, 0};
auto prog = migraphx::parse_onnx("conv_dynamic_batch_same_upper_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_dynamic_img_same_upper)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0", {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {5, 10, 0}, {5, 10, 0}}});
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1, 3, 3, 3}});
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0, 0}},
{"stride", {1, 1}},
{"dilation", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::same_upper},
{"use_dynamic_same_auto_pad", true}}),
l0,
l1);
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {5, 10, 0};
auto prog = migraphx::parse_onnx("conv_dynamic_img_same_upper_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_dynamic_kernel_same_lower)
{
std::cout << "here1\n";
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", {migraphx::shape::float_type, {1, 3, 5, 5}});
auto l1 = mm->add_parameter(
"1", {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {2, 4, 0}, {2, 4, 0}}});
std::cout << "here2\n";
auto c0 = mm->add_instruction(
migraphx::make_op("convolution",
{{"padding", {0, 0}},
{"stride", {1, 1}},
{"dilation", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::same_lower},
{"use_dynamic_same_auto_pad", true}}),
l0,
l1);
std::cout << "here3\n";
mm->add_return({c0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {2, 4, 0};
std::cout << "here\n";
auto prog = migraphx::parse_onnx("conv_dynamic_kernel_same_lower_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_relu_maxpool_test)
{
migraphx::program p;
......@@ -3902,7 +4086,7 @@ TEST_CASE(reducesum_empty_axes_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal({});
mm->add_literal(migraphx::literal{migraphx::shape::int64_type});
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3, 4, 5, 6}});
auto l1 = mm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {0, 1, 2, 3}}}), x);
auto r = mm->add_instruction(migraphx::make_op("squeeze", {{"axes", {0, 1, 2, 3}}}), l1);
......@@ -3917,7 +4101,7 @@ TEST_CASE(reducesum_noop_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal({});
mm->add_literal(migraphx::literal{migraphx::shape::int64_type});
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3, 4, 5, 6}});
mm->add_return({x});
auto prog = migraphx::parse_onnx("reducesum_noop_test.onnx");
......@@ -5127,7 +5311,7 @@ TEST_CASE(squeeze_empty_axes_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal({});
mm->add_literal(migraphx::literal{migraphx::shape::int64_type});
auto l0 = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3, 1, 5, 1}});
auto l1 = mm->add_instruction(migraphx::make_op("squeeze"), l0);
mm->add_return({l1});
......
......@@ -144,6 +144,7 @@ TEST_CASE(convolution_shape)
throws_shape(migraphx::make_op("convolution"), input2, weights2);
throws_shape(migraphx::make_op("convolution"), input2, weights);
// 1D convolution
migraphx::shape output_1d{migraphx::shape::float_type, {4, 4, 1}};
migraphx::shape input_1d{migraphx::shape::float_type, {4, 3, 3}};
migraphx::shape weights_1d{migraphx::shape::float_type, {4, 3, 3}};
......@@ -153,6 +154,11 @@ TEST_CASE(convolution_shape)
input_1d,
weights_1d);
// channel numbers mismatch
weights_1d = {migraphx::shape::float_type, {4, 8, 3}};
throws_shape(migraphx::make_op("convolution"), input_1d, weights_1d);
// 3D convolution
migraphx::shape output_3d{migraphx::shape::float_type, {4, 4, 1, 1, 1}};
migraphx::shape input_3d{migraphx::shape::float_type, {4, 3, 3, 3, 3}};
migraphx::shape weights_3d{migraphx::shape::float_type, {4, 3, 3, 3, 3}};
......@@ -164,6 +170,130 @@ TEST_CASE(convolution_shape)
weights_3d);
throws_shape(migraphx::make_op("convolution"), input_3d, weights_3d);
// dynamic batch
migraphx::shape input_dyn_shape{migraphx::shape::float_type,
{{1, 100, 0}, {3, 3, 0}, {5, 5, 0}, {5, 5, 0}}};
migraphx::shape weights_shape{migraphx::shape::float_type, {1, 3, 3, 3}};
migraphx::shape output_dyn_shape{migraphx::shape::float_type,
{{
1,
100,
0,
},
{1, 1, 0},
{3, 3, 0},
{3, 3, 0}}};
expect_shape(output_dyn_shape,
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
input_dyn_shape,
weights_shape);
// dynamic image
input_dyn_shape = {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {5, 20, 0}, {5, 20, 0}}};
weights_shape = {migraphx::shape::float_type, {1, 3, 3, 3}};
output_dyn_shape = {migraphx::shape::float_type,
{{
1,
1,
0,
},
{1, 1, 0},
{3, 18, 0},
{3, 18, 0}}};
expect_shape(output_dyn_shape,
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
input_dyn_shape,
weights_shape);
// dynamic weights
input_dyn_shape = {migraphx::shape::float_type, {1, 3, 10, 10}};
weights_shape = {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {2, 4, 0}, {2, 4, 0}}};
output_dyn_shape = {migraphx::shape::float_type,
{{
1,
1,
0,
},
{1, 1, 0},
{7, 9, 0},
{7, 9, 0}}};
expect_shape(output_dyn_shape,
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
input_dyn_shape,
weights_shape);
// dynamic img and weights
input_dyn_shape = {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {5, 20, 0}, {5, 20, 0}}};
weights_shape = {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {2, 4, 0}, {2, 4, 0}}};
output_dyn_shape = {migraphx::shape::float_type,
{{
1,
1,
0,
},
{1, 1, 0},
{2, 19, 0},
{2, 19, 0}}};
expect_shape(output_dyn_shape,
migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
input_dyn_shape,
weights_shape);
// input attr shape mismatch
input_dyn_shape = {migraphx::shape::float_type,
{{1, 100, 0}, {3, 3, 0}, {5, 5, 0}, {5, 5, 0}, {5, 5, 0}}};
weights_shape = {migraphx::shape::float_type, {1, 3, 3, 3, 3}};
throws_shape(migraphx::make_op("convolution",
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
input_dyn_shape,
weights_shape);
// auto_pad dynamic batch
input_dyn_shape = {migraphx::shape::float_type, {{1, 10, 0}, {3, 3, 0}, {5, 5, 0}, {5, 5, 0}}};
weights_shape = {migraphx::shape::float_type, {1, 3, 3, 3}};
output_dyn_shape = {migraphx::shape::float_type, {{1, 10, 0}, {1, 1, 0}, {5, 5, 0}, {5, 5, 0}}};
expect_shape(output_dyn_shape,
migraphx::make_op("convolution",
{{"stride", {1, 1}},
{"dilation", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::same_upper},
{"use_dynamic_same_auto_pad", true}}),
input_dyn_shape,
weights_shape);
// auto_pad dynamic img
input_dyn_shape = {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {5, 10, 0}, {5, 10, 0}}};
weights_shape = {migraphx::shape::float_type, {1, 3, 3, 3}};
output_dyn_shape = {migraphx::shape::float_type,
{{1, 1, 0}, {1, 1, 0}, {5, 10, 0}, {5, 10, 0}}};
expect_shape(output_dyn_shape,
migraphx::make_op("convolution",
{{"stride", {1, 1}},
{"dilation", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::same_upper},
{"use_dynamic_same_auto_pad", true}}),
input_dyn_shape,
weights_shape);
// auto_pad dynamic kernel
input_dyn_shape = {migraphx::shape::float_type,
{{1, 1, 0}, {3, 3, 0}, {10, 10, 0}, {10, 10, 0}}};
weights_shape = {migraphx::shape::float_type, {{1, 1, 0}, {3, 3, 0}, {2, 4, 0}, {2, 4, 0}}};
output_dyn_shape = {migraphx::shape::float_type,
{{1, 1, 0}, {1, 1, 0}, {10, 10, 0}, {10, 10, 0}}};
expect_shape(output_dyn_shape,
migraphx::make_op("convolution",
{{"stride", {1, 1}},
{"dilation", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::same_lower},
{"use_dynamic_same_auto_pad", true}}),
input_dyn_shape,
weights_shape);
}
TEST_CASE(contiguous_shape)
......
This diff is collapsed.
......@@ -68,8 +68,10 @@ struct operation
*
* @param ctx This is the context created by the `target` during compilation. Implementations
* can use the target's `context` class rather than the `context` interface class.
* @param output This is the output shape. It is equivalent to running `compute_shape` with each
* `shape` of the `argument`.
* @param output Equivalent to running `compute_shape` with each `shape` of the `argument`.
* For a fixed shape, the returned argument will have the same shape as `output`.
* For a dynamic shape, the returned `argument` will be a fixed shape within the bounds
* set in the dynamic shape `output`.
* @param input This is the `argument` result from the previous instruction's computation.
* @return Return an `argument` of the result computation. The `shape` of `argument` should be
* the same the `output` shape.
......@@ -137,7 +139,7 @@ auto compute_shape_op(rank<2>, const T& x, const std::vector<shape>& inputs)
-> decltype(x.normalize_compute_shape(inputs))
{
dependent_type<operation, T> y = x;
normalize_attributes(y, inputs[0].lens());
normalize_attributes(y, inputs[0].max_lens());
return any_cast<T>(y).normalize_compute_shape(inputs);
}
......
......@@ -22,11 +22,14 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#####################################################################################
import subprocess
import subprocess, os
#Debug flag
debug = False
__repo_dir__ = os.path.normpath(
os.path.join(os.path.realpath(__file__), '..', '..'))
# Markdown code blob we should use to insert into notebook files
def getipynb_markdownBlockAsList():
......@@ -222,14 +225,15 @@ def getDelimiter(filename):
def main():
message = open('LICENSE').read()
message = open(os.path.join(__repo_dir__, 'LICENSE')).read()
#Get a list of all the files in our git repo
#bashCommand = "git ls-files --exclude-standard"
#print (bashCommand.split())
proc = subprocess.run("git ls-files --exclude-standard",
shell=True,
stdout=subprocess.PIPE)
stdout=subprocess.PIPE,
cwd=__repo_dir__)
fileList = proc.stdout.decode().split('\n')
message = message.split('\n')
......@@ -237,7 +241,8 @@ def main():
print("Target file list:\n" + str(fileList))
print("Output Message:\n" + str(message))
for file in fileList:
for rfile in fileList:
file = os.path.join(__repo_dir__, rfile)
#print(file)
commentDelim = getDelimiter(file)
if commentDelim is not None:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment