Commit b878f78f authored by turneram's avatar turneram
Browse files

Merge remote-tracking branch 'origin/develop' into rewrite-fast-gelu

parents 3b414cc2 55cb7d3a
...@@ -145,7 +145,7 @@ TEST_CASE(conv) ...@@ -145,7 +145,7 @@ TEST_CASE(conv)
const std::string mlir_output = R"__migraphx__( const std::string mlir_output = R"__migraphx__(
module { module {
func @main(%arg0: tensor<2x8x3x3xf32>, %arg1: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {kernel = "mixr"} { func @main(%arg0: tensor<2x8x3x3xf32>, %arg1: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {kernel = "mixr"} {
%0 = migraphx.convolution(%arg1, %arg0) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32> %0 = migraphx.convolution(%arg1, %arg0) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1], use_dynamic_same_auto_pad = 0 : i64} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
return %0 : tensor<1x2x2x2xf32> return %0 : tensor<1x2x2x2xf32>
} }
} }
...@@ -168,7 +168,7 @@ TEST_CASE(conv_add_relu) ...@@ -168,7 +168,7 @@ TEST_CASE(conv_add_relu)
const std::string mlir_output = R"__migraphx__( const std::string mlir_output = R"__migraphx__(
module { module {
func @main(%arg0: tensor<1x2x2x2xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {kernel = "mixr"} { func @main(%arg0: tensor<1x2x2x2xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {kernel = "mixr"} {
%0 = migraphx.convolution(%arg2, %arg1) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32> %0 = migraphx.convolution(%arg2, %arg1) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1], use_dynamic_same_auto_pad = 0 : i64} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
%1 = migraphx.add(%0, %arg0) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32> %1 = migraphx.add(%0, %arg0) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
%2 = migraphx.relu(%1) : (tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32> %2 = migraphx.relu(%1) : (tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
return %2 : tensor<1x2x2x2xf32> return %2 : tensor<1x2x2x2xf32>
......
...@@ -626,6 +626,46 @@ def constant_scalar_test(): ...@@ -626,6 +626,46 @@ def constant_scalar_test():
return ([node], [], [y]) return ([node], [], [y])
@onnx_test
def constant_empty_scalar_int64_test():
x = np.array([]).astype(np.int64)
y = helper.make_tensor_value_info('0', TensorProto.INT64, [0])
node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['0'],
value=onnx.helper.make_tensor(
name='one_element_tensor',
data_type=TensorProto.INT64,
dims=x.shape,
vals=x.flatten().astype(np.int64),
),
)
return ([node], [], [y])
@onnx_test
def constant_one_val_int64_test():
x = np.array([1]).astype(np.int64)
y = helper.make_tensor_value_info('0', TensorProto.INT64, [0])
node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['0'],
value=onnx.helper.make_tensor(
name='empty_tensor',
data_type=TensorProto.INT64,
dims=x.shape,
vals=x.flatten().astype(np.int64),
),
)
return ([node], [], [y])
@onnx_test @onnx_test
def const_of_shape_empty_input_test(): def const_of_shape_empty_input_test():
tensor_val = onnx.helper.make_tensor('value', onnx.TensorProto.INT64, [1], tensor_val = onnx.helper.make_tensor('value', onnx.TensorProto.INT64, [1],
...@@ -851,6 +891,96 @@ def conv_bn_relu_maxpool_test(): ...@@ -851,6 +891,96 @@ def conv_bn_relu_maxpool_test():
return ([node0, node1, node2, node3], [x, y, z, m, n, k, l], [out]) return ([node0, node1, node2, node3], [x, y, z, m, n, k, l], [out])
@onnx_test
def conv_dynamic_batch_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [None, 3, 5, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 3, 3])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT,
[None, 1, 3, 3])
node = onnx.helper.make_node('Conv', inputs=['0', '1'], outputs=['2'])
return ([node], [x, y], [out])
@onnx_test
def conv_dynamic_img_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[1, 3, None, None])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 3, 3])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT,
[1, 1, None, None])
node = onnx.helper.make_node('Conv', inputs=['0', '1'], outputs=['2'])
return ([node], [x, y], [out])
@onnx_test
def conv_dynamic_weights_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT,
[1, 3, None, None])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT,
[1, 1, None, None])
node = onnx.helper.make_node('Conv', inputs=['0', '1'], outputs=['2'])
return ([node], [x, y], [out])
@onnx_test
def conv_dynamic_img_and_weights_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[1, 3, None, None])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT,
[1, 3, None, None])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT,
[1, 1, None, None])
node = onnx.helper.make_node('Conv', inputs=['0', '1'], outputs=['2'])
return ([node], [x, y], [out])
@onnx_test
def conv_dynamic_batch_same_upper_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [None, 3, 5, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 3, 3])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1, 1, 5, 5])
node = onnx.helper.make_node('Conv',
inputs=['0', '1'],
outputs=['2'],
auto_pad='SAME_UPPER')
return ([node], [x, y], [out])
@onnx_test
def conv_dynamic_img_same_upper_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[1, 3, None, None])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 3, 3])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT,
[1, 1, None, None])
node = onnx.helper.make_node('Conv',
inputs=['0', '1'],
outputs=['2'],
auto_pad='SAME_UPPER')
return ([node], [x, y], [out])
@onnx_test
def conv_dynamic_kernel_same_lower_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT,
[1, 3, None, None])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1, 1, 5, 5])
node = onnx.helper.make_node('Conv',
inputs=['0', '1'],
outputs=['2'],
auto_pad='SAME_LOWER')
return ([node], [x, y], [out])
@onnx_test @onnx_test
def conv_relu_maxpool_test(): def conv_relu_maxpool_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 32, 32]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 32, 32])
...@@ -2496,6 +2626,62 @@ def instance_norm_test(): ...@@ -2496,6 +2626,62 @@ def instance_norm_test():
return ([node], [x, scale, bias], [y]) return ([node], [x, scale, bias], [y])
@onnx_test
def instance_norm_half_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT16, [1, 2, 3, 3])
scale = helper.make_tensor_value_info('1', TensorProto.FLOAT16, [2])
bias = helper.make_tensor_value_info('2', TensorProto.FLOAT16, [2])
y = helper.make_tensor_value_info('3', TensorProto.FLOAT16, [1, 2, 3, 3])
node = onnx.helper.make_node('InstanceNormalization',
inputs=['0', '1', '2'],
outputs=['3'])
return ([node], [x, scale, bias], [y])
@onnx_test
def instance_norm_type_mismatch_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 2, 3, 3])
scale = helper.make_tensor_value_info('1', TensorProto.FLOAT16, [2])
bias = helper.make_tensor_value_info('2', TensorProto.FLOAT16, [2])
y = helper.make_tensor_value_info('3', TensorProto.FLOAT, [1, 2, 3, 3])
node = onnx.helper.make_node('InstanceNormalization',
inputs=['0', '1', '2'],
outputs=['3'])
return ([node], [x, scale, bias], [y])
@onnx_test
def instance_norm_invalid_type_test():
x = helper.make_tensor_value_info('0', TensorProto.INT32, [1, 2, 3, 3])
scale = helper.make_tensor_value_info('1', TensorProto.FLOAT, [2])
bias = helper.make_tensor_value_info('2', TensorProto.FLOAT, [2])
y = helper.make_tensor_value_info('3', TensorProto.FLOAT, [1, 2, 3, 3])
node = onnx.helper.make_node('InstanceNormalization',
inputs=['0', '1', '2'],
outputs=['3'])
return ([node], [x, scale, bias], [y])
@onnx_test
def instance_norm_nonbroadcastable_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 2, 3, 3])
scale = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4])
bias = helper.make_tensor_value_info('2', TensorProto.FLOAT, [4])
y = helper.make_tensor_value_info('3', TensorProto.FLOAT, [1, 2, 3, 3])
node = onnx.helper.make_node('InstanceNormalization',
inputs=['0', '1', '2'],
outputs=['3'])
return ([node], [x, scale, bias], [y])
@onnx_test @onnx_test
def instance_norm_val_test(): def instance_norm_val_test():
x = np.array([[[[0, 1, 2], [3, 4, 5], [6, 7, 8]], x = np.array([[[[0, 1, 2], [3, 4, 5], [6, 7, 8]],
...@@ -3086,7 +3272,7 @@ def max_test(): ...@@ -3086,7 +3272,7 @@ def max_test():
a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3]) a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
b = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3]) b = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
c = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3]) c = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3]) y = helper.make_tensor_value_info('3', TensorProto.FLOAT, [3])
node = onnx.helper.make_node( node = onnx.helper.make_node(
'Max', 'Max',
...@@ -3220,7 +3406,7 @@ def min_test(): ...@@ -3220,7 +3406,7 @@ def min_test():
a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3]) a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
b = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3]) b = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
c = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3]) c = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3]) y = helper.make_tensor_value_info('3', TensorProto.FLOAT, [3])
node = onnx.helper.make_node( node = onnx.helper.make_node(
'Min', 'Min',
...@@ -3231,6 +3417,89 @@ def min_test(): ...@@ -3231,6 +3417,89 @@ def min_test():
return ([node], [a, b, c], [y]) return ([node], [a, b, c], [y])
@onnx_test
def mod_test():
a = helper.make_tensor_value_info('0', TensorProto.INT32, [3, 3, 3])
b = helper.make_tensor_value_info('1', TensorProto.INT32, [3, 3, 3])
y = helper.make_tensor_value_info('2', TensorProto.INT32, [3, 3, 3])
node = onnx.helper.make_node('Mod', inputs=['0', '1'], outputs=['2'])
return ([node], [a, b], [y])
@onnx_test
def mod_test_half():
a = helper.make_tensor_value_info('0', TensorProto.FLOAT16, [3, 3, 3])
b = helper.make_tensor_value_info('1', TensorProto.FLOAT16, [3, 3, 3])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT16, [3, 3, 3])
node = onnx.helper.make_node('Mod', inputs=['0', '1'], outputs=['2'])
return ([node], [a, b], [y])
@onnx_test
def mod_test_different_dtypes():
a = helper.make_tensor_value_info('0', TensorProto.INT16, [3, 3, 3])
b = helper.make_tensor_value_info('1', TensorProto.INT32, [3, 3, 3])
y = helper.make_tensor_value_info('2', TensorProto.INT32, [3, 3, 3])
node = onnx.helper.make_node(
'Mod',
inputs=['0', '1'],
outputs=['2'],
)
return ([node], [a, b], [y])
@onnx_test
def mod_test_fmod():
a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3, 3, 3])
b = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 3, 3])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3, 3, 3])
node = onnx.helper.make_node(
'Mod',
inputs=['0', '1'],
outputs=['2'],
fmod=1 #fmod flag = 1
)
return ([node], [a, b], [y])
@onnx_test
def mod_test_fmod_half():
a = helper.make_tensor_value_info('0', TensorProto.FLOAT16, [3, 3, 3])
b = helper.make_tensor_value_info('1', TensorProto.FLOAT16, [3, 3, 3])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT16, [3, 3, 3])
node = onnx.helper.make_node('Mod',
inputs=['0', '1'],
outputs=['2'],
fmod=1)
return ([node], [a, b], [y])
@onnx_test
def mod_test_fmod_different_dtypes():
a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3, 3, 3])
b = helper.make_tensor_value_info('1', TensorProto.INT32, [3, 3, 3])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3, 3, 3])
node = onnx.helper.make_node(
'Mod',
inputs=['0', '1'],
outputs=['2'],
fmod=1 #fmod flag = 1
)
return ([node], [a, b], [y])
@onnx_test @onnx_test
def multinomial_test(): def multinomial_test():
sample_size = 10 sample_size = 10
......
instance_norm_half_test:
#
0
1
23"InstanceNormalizationinstance_norm_half_testZ
0





Z
1


Z
2


b
3





B
\ No newline at end of file
instance_norm_invalid_type_test:
#
0
1
23"InstanceNormalizationinstance_norm_invalid_type_testZ
0




Z
1

Z
2

b
3




B
\ No newline at end of file
#instance_norm_nonbroadcastable_test:
#
0
1
23"InstanceNormalization#instance_norm_nonbroadcastable_testZ
0




Z
1

Z
2

b
3




B
\ No newline at end of file
 instance_norm_type_mismatch_test:
#
0
1
23"InstanceNormalization instance_norm_type_mismatch_testZ
0




Z
1


Z
2


b
3




B
\ No newline at end of file
 max-example:e max_test:a
 
0 0
1 1
23"Max test-dropoutZ 23"Maxmax_testZ
0 0
 
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
 
b b
2 3
 
B B
\ No newline at end of file \ No newline at end of file
 min-example:e min_test:a
 
0 0
1 1
23"Min test-dropoutZ 23"Minmin_testZ
0 0
 
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
 
b b
2 3
 
B B
\ No newline at end of file \ No newline at end of file
mod_test:e

0
12"Modmod_testZ
0



Z
1



b
2



B
\ No newline at end of file
mod_test_different_dtypes:v

0
12"Modmod_test_different_dtypesZ
0



Z
1



b
2



B
\ No newline at end of file
 mod_test_fmod:w

0
12"Mod*
fmod mod_test_fmodZ
0



Z
1



b
2



B
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment