Unverified Commit ca300bd6 authored by Chris Austen's avatar Chris Austen Committed by GitHub
Browse files

Merge branch 'develop' into blas_tuning

parents 5adb597c e7486577
......@@ -2722,6 +2722,119 @@ def group_conv_test():
return ([node], [x, y], [z])
def group_norm_test(x_dims,
scale_dims,
bias_dims,
y_dims,
num_groups,
eps_value=1e-5,
dtype=TensorProto.FLOAT):
x = helper.make_tensor_value_info('x', dtype, x_dims)
scale = helper.make_tensor_value_info('scale', dtype, scale_dims)
bias = helper.make_tensor_value_info('bias', dtype, bias_dims)
y = helper.make_tensor_value_info('y', dtype, y_dims)
node = onnx.helper.make_node('GroupNormalization',
inputs=['x', 'scale', 'bias'],
outputs=['y'],
num_groups=num_groups,
epsilon=eps_value)
return ([node], [x, scale, bias], [y])
@onnx_test()
def group_norm_3d_test():
return group_norm_test([1, 4, 2], [2], [2], [1, 4, 2], 2)
@onnx_test()
def group_norm_3d_half_test():
return group_norm_test([1, 4, 2], [2], [2], [1, 4, 2],
2,
dtype=TensorProto.FLOAT16)
@onnx_test()
def group_norm_4d_test():
return group_norm_test([1, 4, 3, 3], [2], [2], [1, 4, 3, 3], 2)
@onnx_test()
def group_norm_4d_half_test():
return group_norm_test([1, 4, 3, 3], [2], [2], [1, 4, 3, 3],
2,
dtype=TensorProto.FLOAT16)
@onnx_test()
def group_norm_5d_test():
return group_norm_test([3, 3, 3, 3, 3], [1], [1], [3, 3, 3, 3, 3], 1)
@onnx_test()
def group_norm_5d_half_test():
return group_norm_test([3, 3, 3, 3, 3], [1], [1], [3, 3, 3, 3, 3],
1,
dtype=TensorProto.FLOAT16)
@onnx_test()
def group_norm_small_eps_half_test():
return group_norm_test([1, 4, 2], [2], [2], [1, 4, 2],
2,
eps_value=1e-12,
dtype=TensorProto.FLOAT16)
@onnx_test()
def group_norm_invalid_num_groups_error_test():
return group_norm_test([1, 4, 3, 3], [2], [2], [1, 4, 3, 3], 3)
@onnx_test()
def group_norm_missing_attribute_error_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 4])
scale = helper.make_tensor_value_info('scale', TensorProto.FLOAT, [2])
bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [2])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 4])
node = onnx.helper.make_node('GroupNormalization',
inputs=['x', 'scale', 'bias'],
outputs=['y'])
return ([node], [x, scale, bias], [y])
@onnx_test()
def group_norm_invalid_input_count_error_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 4, 3, 3])
scale = helper.make_tensor_value_info('scale', TensorProto.FLOAT, [2])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 4, 3, 3])
node = onnx.helper.make_node('GroupNormalization',
inputs=['x', 'scale'],
outputs=['y'],
num_groups=2)
return ([node], [x, scale], [y])
@onnx_test()
def group_norm_invalid_input_shape_error_test():
return group_norm_test([1, 4], [2], [2], [1, 4], 2)
@onnx_test()
def group_norm_invalid_scale_shape_test():
return group_norm_test([1, 4, 3, 3], [1], [2], [1, 4, 3, 3], 2)
@onnx_test()
def group_norm_invalid_bias_shape_test():
return group_norm_test([1, 4, 3, 3], [2], [3], [1, 4, 3, 3], 2)
@onnx_test()
def hardsigmoid_default_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 3, 4, 5])
......@@ -3804,6 +3917,110 @@ def layernorm_test():
bias_add], [x, scale, bias], [y], [pow_tensor, epsilon_tensor])
def make_layer_norm(shape, axis, dtype=TensorProto.FLOAT):
norm_axis = axis + len(shape) if axis < 0 else axis
x = helper.make_tensor_value_info('x', dtype, shape)
scale = helper.make_tensor_value_info('scale', dtype, shape[norm_axis:])
bias = helper.make_tensor_value_info('bias', dtype, shape[norm_axis:])
y = helper.make_tensor_value_info('y', dtype, shape)
node = onnx.helper.make_node('LayerNormalization',
inputs=['x', 'scale', 'bias'],
outputs=['y'],
axis=axis)
return ([node], [x, scale, bias], [y])
@onnx_test()
def layer_norm_invalid_shape_error_test():
return make_layer_norm([3], 0)
@onnx_test()
def layer_norm_2d_axis_zero_test():
return make_layer_norm([3, 4], 0)
@onnx_test()
def layer_norm_2d_axis_one_test():
return make_layer_norm([3, 4], 1)
@onnx_test()
def layer_norm_2d_axis_minus_one_test():
return make_layer_norm([3, 4], -1)
@onnx_test()
def layer_norm_3d_test():
return make_layer_norm([1, 4, 2], -1)
@onnx_test()
def layer_norm_3d_half_test():
return make_layer_norm([1, 4, 2], -1, TensorProto.FLOAT16)
@onnx_test()
def layer_norm_4d_test():
return make_layer_norm([3, 3, 3, 3], -1)
@onnx_test()
def layer_norm_4d_half_test():
return make_layer_norm([3, 3, 3, 3], -1, TensorProto.FLOAT16)
@onnx_test()
def layer_norm_invalid_axis_error_test():
return make_layer_norm([1, 4, 2], 1000)
@onnx_test()
def layer_norm_invalid_minus_axis_error_test():
return make_layer_norm([1, 4, 2], -1000)
@onnx_test()
def layer_norm_invalid_input_count_error_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 2])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2])
node = onnx.helper.make_node('LayerNormalization',
inputs=['x'],
outputs=['y'])
return ([node], [x], [y])
@onnx_test()
def layer_norm_without_bias_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 2])
scale = helper.make_tensor_value_info('scale', TensorProto.FLOAT, [2])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2])
node = onnx.helper.make_node('LayerNormalization',
inputs=['x', 'scale'],
outputs=['y'])
return ([node], [x, scale], [y])
@onnx_test()
def layer_norm_small_eps_half_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16, [1, 2])
scale = helper.make_tensor_value_info('scale', TensorProto.FLOAT16, [2])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT16, [1, 2])
node = onnx.helper.make_node('LayerNormalization',
inputs=['x', 'scale'],
outputs=['y'],
epsilon=1e-12)
return ([node], [x, scale], [y])
@onnx_test()
def leaky_relu_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
......@@ -4890,6 +5107,32 @@ def pad_test():
return ([node], [x], [y])
@onnx_test()
def pad_asym_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 4, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 6, 4, 12])
node = onnx.helper.make_node('Pad',
inputs=['0'],
pads=[0, 1, 0, 3, 0, 2, 0, 4],
outputs=['1'])
return ([node], [x], [y])
@onnx_test()
def pad_asym_invalid_pads_error_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 4, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 6, 4, 12])
node = onnx.helper.make_node('Pad',
inputs=['0'],
pads=[0, 1, 0, 3, 0, 2],
outputs=['1'])
return ([node], [x], [y])
@onnx_test()
def pad_3arg_test():
values = np.array([1])
......@@ -4922,6 +5165,129 @@ def pad_3arg_test():
return ([arg_val, arg_pad, node], [x], [y])
@onnx_test()
def pad_4arg_axes_test():
values = np.array([1])
val_tensor = helper.make_tensor(name='val',
data_type=TensorProto.FLOAT,
dims=values.reshape(()).shape,
vals=values.astype(float))
arg_val = onnx.helper.make_node('Constant',
inputs=[],
outputs=['arg_val'],
value=val_tensor)
sizes = np.array([1, 3, 2, 4])
pad_tensor = helper.make_tensor(name='pad_size',
data_type=TensorProto.INT32,
dims=sizes.shape,
vals=sizes.astype(int))
arg_pad = onnx.helper.make_node('Constant',
inputs=[],
outputs=['arg_pad'],
value=pad_tensor)
axes = np.array([1, 3])
axes_tensor = helper.make_tensor(name='pad_axes',
data_type=TensorProto.INT32,
dims=axes.shape,
vals=axes.astype(int))
arg_axes = onnx.helper.make_node('Constant',
inputs=[],
outputs=['arg_axes'],
value=axes_tensor)
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 4, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 6, 4, 12])
node = onnx.helper.make_node(
'Pad', inputs=['0', 'arg_pad', 'arg_val', 'arg_axes'], outputs=['1'])
return ([arg_axes, arg_val, arg_pad, node], [x], [y])
@onnx_test()
def pad_4arg_invalid_axes_error_test():
values = np.array([1])
val_tensor = helper.make_tensor(name='val',
data_type=TensorProto.FLOAT,
dims=values.reshape(()).shape,
vals=values.astype(float))
arg_val = onnx.helper.make_node('Constant',
inputs=[],
outputs=['arg_val'],
value=val_tensor)
sizes = np.array([1, 3, 2, 4])
pad_tensor = helper.make_tensor(name='pad_size',
data_type=TensorProto.INT32,
dims=sizes.shape,
vals=sizes.astype(int))
arg_pad = onnx.helper.make_node('Constant',
inputs=[],
outputs=['arg_pad'],
value=pad_tensor)
axes = np.array([1, 2, 3])
axes_tensor = helper.make_tensor(name='pad_axes',
data_type=TensorProto.INT32,
dims=axes.shape,
vals=axes.astype(int))
arg_axes = onnx.helper.make_node('Constant',
inputs=[],
outputs=['arg_axes'],
value=axes_tensor)
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 4, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 6, 4, 12])
node = onnx.helper.make_node(
'Pad', inputs=['0', 'arg_pad', 'arg_val', 'arg_axes'], outputs=['1'])
return ([arg_axes, arg_val, arg_pad, node], [x], [y])
@onnx_test()
def pad_4arg_neg_axes_test():
values = np.array([1])
val_tensor = helper.make_tensor(name='val',
data_type=TensorProto.FLOAT,
dims=values.reshape(()).shape,
vals=values.astype(float))
arg_val = onnx.helper.make_node('Constant',
inputs=[],
outputs=['arg_val'],
value=val_tensor)
sizes = np.array([1, 3, 2, 4])
pad_tensor = helper.make_tensor(name='pad_size',
data_type=TensorProto.INT32,
dims=sizes.shape,
vals=sizes.astype(int))
arg_pad = onnx.helper.make_node('Constant',
inputs=[],
outputs=['arg_pad'],
value=pad_tensor)
axes = np.array([-3, -1])
axes_tensor = helper.make_tensor(name='pad_axes',
data_type=TensorProto.INT32,
dims=axes.shape,
vals=axes.astype(int))
arg_axes = onnx.helper.make_node('Constant',
inputs=[],
outputs=['arg_axes'],
value=axes_tensor)
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 4, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 6, 4, 12])
node = onnx.helper.make_node(
'Pad', inputs=['0', 'arg_pad', 'arg_val', 'arg_axes'], outputs=['1'])
return ([arg_axes, arg_val, arg_pad, node], [x], [y])
@onnx_test()
def pad_reflect_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 2])
......@@ -4945,6 +5311,39 @@ def pad_reflect_test():
return ([arg_pad, node], [x], [y])
@onnx_test()
def pad_reflect_with_axes_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 2])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [2, 5])
sizes = np.array([2, 1])
pad_tensor = helper.make_tensor(name='pad_size',
data_type=TensorProto.INT32,
dims=sizes.shape,
vals=sizes.astype(int))
arg_pad = onnx.helper.make_node('Constant',
inputs=[],
outputs=['arg_pad'],
value=pad_tensor)
axes = np.array([1])
axes_tensor = helper.make_tensor(name='pad_axes',
data_type=TensorProto.INT32,
dims=axes.shape,
vals=axes.astype(int))
arg_axes = onnx.helper.make_node('Constant',
inputs=[],
outputs=['arg_axes'],
value=axes_tensor)
node = onnx.helper.make_node('Pad',
mode='reflect',
inputs=['0', 'arg_pad', 'arg_axes'],
outputs=['1'])
return ([arg_axes, arg_pad, node], [x], [y])
@onnx_test()
def pad_reflect_multiaxis_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3])
......@@ -6736,6 +7135,101 @@ def shape_gather_test():
return ([node_const, node_shape, node_gather], [x], [z])
@onnx_test()
def shrink_hard_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [5])
node = onnx.helper.make_node(
"Shrink",
inputs=["x"],
outputs=["y"],
lambd=1.5,
)
return ([node], [x], [y])
@onnx_test()
def shrink_soft_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [5])
node = onnx.helper.make_node(
"Shrink",
inputs=["x"],
outputs=["y"],
lambd=1.5,
bias=1.5,
)
return ([node], [x], [y])
@onnx_test()
def shrink_verify_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16, [5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT16, [5])
node = onnx.helper.make_node(
"Shrink",
inputs=["x"],
outputs=["y"],
lambd=-5.0,
bias=1.0,
)
return ([node], [x], [y])
@onnx_test()
def shrink_verify2_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16, [5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT16, [5])
node = onnx.helper.make_node(
"Shrink",
inputs=["x"],
outputs=["y"],
lambd=-6.0,
bias=5.0,
)
return ([node], [x], [y])
@onnx_test()
def shrink_int8_test():
x = helper.make_tensor_value_info('x', TensorProto.INT8, [3, 3])
y = helper.make_tensor_value_info('y', TensorProto.INT8, [3, 3])
node = onnx.helper.make_node(
"Shrink",
inputs=["x"],
outputs=["y"],
lambd=1.5,
bias=1.5,
)
return ([node], [x], [y])
@onnx_test()
def shrink_uint8_test():
x = helper.make_tensor_value_info('x', TensorProto.UINT8, [3, 3])
y = helper.make_tensor_value_info('y', TensorProto.UINT8, [3, 3])
node = onnx.helper.make_node(
"Shrink",
inputs=["x"],
outputs=["y"],
lambd=5.0,
bias=-4.5,
)
return ([node], [x], [y])
@onnx_test()
def sign_test():
x = helper.make_tensor_value_info('x', TensorProto.DOUBLE, [10, 5])
......
group_norm_3d_half_test:
M
x
scale
biasy"GroupNormalization*
epsilon'7*
num_groupsgroup_norm_3d_half_testZ
x




Z
scale


Z
bias


b
y




B
\ No newline at end of file
 group_norm_3d_test:
:
x
scale
biasy"GroupNormalization*
num_groupsgroup_norm_3d_testZ
x



Z
scale

Z
bias

b
y



B
\ No newline at end of file
group_norm_4d_half_test:
M
x
scale
biasy"GroupNormalization*
epsilon'7*
num_groupsgroup_norm_4d_half_testZ
x





Z
scale


Z
bias


b
y





B
\ No newline at end of file
 group_norm_4d_test:
:
x
scale
biasy"GroupNormalization*
num_groupsgroup_norm_4d_testZ
x




Z
scale

Z
bias

b
y




B
\ No newline at end of file
group_norm_5d_half_test:
M
x
scale
biasy"GroupNormalization*
epsilon'7*
num_groupsgroup_norm_5d_half_testZ
x






Z
scale


Z
bias


b
y






B
\ No newline at end of file
 group_norm_5d_test:
:
x
scale
biasy"GroupNormalization*
num_groupsgroup_norm_5d_testZ
x





Z
scale

Z
bias

b
y





B
\ No newline at end of file
 "group_norm_invalid_bias_shape_test:
:
x
scale
biasy"GroupNormalization*
num_groups"group_norm_invalid_bias_shape_testZ
x




Z
scale

Z
bias

b
y




B
\ No newline at end of file
 )group_norm_invalid_input_count_error_test:
4
x
scaley"GroupNormalization*
num_groups)group_norm_invalid_input_count_error_testZ
x




Z
scale

b
y




B
\ No newline at end of file
 )group_norm_invalid_input_shape_error_test:
:
x
scale
biasy"GroupNormalization*
num_groups)group_norm_invalid_input_shape_error_testZ
x


Z
scale

Z
bias

b
y


B
\ No newline at end of file
 (group_norm_invalid_num_groups_error_test:
:
x
scale
biasy"GroupNormalization*
num_groups(group_norm_invalid_num_groups_error_testZ
x




Z
scale

Z
bias

b
y




B
\ No newline at end of file
 #group_norm_invalid_scale_shape_test:
:
x
scale
biasy"GroupNormalization*
num_groups#group_norm_invalid_scale_shape_testZ
x




Z
scale

Z
bias

b
y




B
\ No newline at end of file
 'group_norm_missing_attribute_error_test:
'
x
scale
biasy"GroupNormalization'group_norm_missing_attribute_error_testZ
x


Z
scale

Z
bias

b
y


B
\ No newline at end of file
group_norm_small_eps_half_test:
M
x
scale
biasy"GroupNormalization*
epsilon̼+*
num_groupsgroup_norm_small_eps_half_testZ
x




Z
scale


Z
bias


b
y




B
\ No newline at end of file
 !layer_norm_2d_axis_minus_one_test:
=
x
scale
biasy"LayerNormalization*
axis!layer_norm_2d_axis_minus_one_testZ
x


Z
scale

Z
bias

b
y


B
\ No newline at end of file
 layer_norm_2d_axis_one_test:
4
x
scale
biasy"LayerNormalization*
axislayer_norm_2d_axis_one_testZ
x


Z
scale

Z
bias

b
y


B
\ No newline at end of file
layer_norm_3d_half_test:
=
x
scale
biasy"LayerNormalization*
axislayer_norm_3d_half_testZ
x




Z
scale


Z
bias


b
y




B
\ No newline at end of file
 layer_norm_3d_test:
=
x
scale
biasy"LayerNormalization*
axislayer_norm_3d_testZ
x



Z
scale

Z
bias

b
y



B
\ No newline at end of file
layer_norm_4d_half_test:
=
x
scale
biasy"LayerNormalization*
axislayer_norm_4d_half_testZ
x





Z
scale


Z
bias


b
y





B
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment