Commit 98d5a846 authored by Alan Turner's avatar Alan Turner
Browse files

Merge remote-tracking branch 'origin/develop' into ck-gsg

parents 4837bf72 8b651eee
...@@ -132,9 +132,14 @@ MIGRAPHX_DEVICE_MATH_FOR(float, fmod, ::fmodf) ...@@ -132,9 +132,14 @@ MIGRAPHX_DEVICE_MATH_FOR(float, fmod, ::fmodf)
// Builtin half functions // Builtin half functions
MIGRAPHX_DEVICE_MATH_FOR(migraphx::half, abs, ::__habs) MIGRAPHX_DEVICE_MATH_FOR(migraphx::half, abs, ::__habs)
MIGRAPHX_DEVICE_MATH_FOR(migraphx::half, ceil, ::hceil)
MIGRAPHX_DEVICE_MATH_FOR(migraphx::half, cos, ::hcos)
MIGRAPHX_DEVICE_MATH_FOR(migraphx::half, exp, ::hexp) MIGRAPHX_DEVICE_MATH_FOR(migraphx::half, exp, ::hexp)
MIGRAPHX_DEVICE_MATH_FOR(migraphx::half, floor, ::hfloor)
MIGRAPHX_DEVICE_MATH_FOR(migraphx::half, isnan, ::__hisnan)
MIGRAPHX_DEVICE_MATH_FOR(migraphx::half, log, ::hlog) MIGRAPHX_DEVICE_MATH_FOR(migraphx::half, log, ::hlog)
MIGRAPHX_DEVICE_MATH_FOR(migraphx::half, rsqrt, ::hrsqrt) MIGRAPHX_DEVICE_MATH_FOR(migraphx::half, rsqrt, ::hrsqrt)
// MIGRAPHX_DEVICE_MATH_FOR(migraphx::half, sin, ::hsin)
MIGRAPHX_DEVICE_MATH_FOR(migraphx::half, sqrt, ::hsqrt) MIGRAPHX_DEVICE_MATH_FOR(migraphx::half, sqrt, ::hsqrt)
// Use float to compute half overload // Use float to compute half overload
...@@ -144,16 +149,11 @@ MIGRAPHX_DEVICE_MATH_HALF(asin, ::asin) ...@@ -144,16 +149,11 @@ MIGRAPHX_DEVICE_MATH_HALF(asin, ::asin)
MIGRAPHX_DEVICE_MATH_HALF(asinh, ::asinh) MIGRAPHX_DEVICE_MATH_HALF(asinh, ::asinh)
MIGRAPHX_DEVICE_MATH_HALF(atan, ::atan) MIGRAPHX_DEVICE_MATH_HALF(atan, ::atan)
MIGRAPHX_DEVICE_MATH_HALF(atanh, ::atanh) MIGRAPHX_DEVICE_MATH_HALF(atanh, ::atanh)
MIGRAPHX_DEVICE_MATH_HALF(ceil, ::ceil)
MIGRAPHX_DEVICE_MATH_HALF(cos, ::cos)
MIGRAPHX_DEVICE_MATH_HALF(cosh, ::cosh) MIGRAPHX_DEVICE_MATH_HALF(cosh, ::cosh)
MIGRAPHX_DEVICE_MATH_HALF(erf, ::erf) MIGRAPHX_DEVICE_MATH_HALF(erf, ::erf)
MIGRAPHX_DEVICE_MATH_HALF(floor, ::floor)
MIGRAPHX_DEVICE_MATH_HALF(isnan, ::isnan)
MIGRAPHX_DEVICE_MATH_HALF(pow, ::pow) MIGRAPHX_DEVICE_MATH_HALF(pow, ::pow)
MIGRAPHX_DEVICE_MATH_HALF(remainder, ::remainder) MIGRAPHX_DEVICE_MATH_HALF(remainder, ::remainder)
MIGRAPHX_DEVICE_MATH_HALF(round, ::round) MIGRAPHX_DEVICE_MATH_HALF(round, ::round)
MIGRAPHX_DEVICE_MATH_HALF(sin, ::sin)
MIGRAPHX_DEVICE_MATH_HALF(sinh, ::sinh) MIGRAPHX_DEVICE_MATH_HALF(sinh, ::sinh)
MIGRAPHX_DEVICE_MATH_HALF(tan, ::tan) MIGRAPHX_DEVICE_MATH_HALF(tan, ::tan)
MIGRAPHX_DEVICE_MATH_HALF(tanh, ::tanh) MIGRAPHX_DEVICE_MATH_HALF(tanh, ::tanh)
...@@ -166,19 +166,19 @@ MIGRAPHX_DEVICE_MATH_HALF(fmod, ::fmod) ...@@ -166,19 +166,19 @@ MIGRAPHX_DEVICE_MATH_HALF(fmod, ::fmod)
// at this time are: exp2, exp10, log2, log10, isinf // at this time are: exp2, exp10, log2, log10, isinf
MIGRAPHX_DEVICE_MATH_HALF2(abs, ::__habs2) MIGRAPHX_DEVICE_MATH_HALF2(abs, ::__habs2)
MIGRAPHX_DEVICE_MATH_HALF2(ceil, ::h2ceil) MIGRAPHX_DEVICE_MATH_HALF2(ceil, ::h2ceil)
MIGRAPHX_DEVICE_MATH_HALF2(floor, ::h2floor)
MIGRAPHX_DEVICE_MATH_HALF2(sin, ::h2sin)
MIGRAPHX_DEVICE_MATH_HALF2(cos, ::h2cos) MIGRAPHX_DEVICE_MATH_HALF2(cos, ::h2cos)
MIGRAPHX_DEVICE_MATH_HALF2(exp, ::h2exp) MIGRAPHX_DEVICE_MATH_HALF2(exp, ::h2exp)
MIGRAPHX_DEVICE_MATH_HALF2(exp2, ::h2exp2)
MIGRAPHX_DEVICE_MATH_HALF2(exp10, ::h2exp10) MIGRAPHX_DEVICE_MATH_HALF2(exp10, ::h2exp10)
MIGRAPHX_DEVICE_MATH_HALF2(log2, ::h2log2) MIGRAPHX_DEVICE_MATH_HALF2(exp2, ::h2exp2)
MIGRAPHX_DEVICE_MATH_HALF2(floor, ::h2floor)
MIGRAPHX_DEVICE_MATH_HALF2(isinf, ::__hisinf2)
MIGRAPHX_DEVICE_MATH_HALF2(isnan, ::__hisnan2)
MIGRAPHX_DEVICE_MATH_HALF2(log, ::h2log) MIGRAPHX_DEVICE_MATH_HALF2(log, ::h2log)
MIGRAPHX_DEVICE_MATH_HALF2(log10, ::h2log10) MIGRAPHX_DEVICE_MATH_HALF2(log10, ::h2log10)
MIGRAPHX_DEVICE_MATH_HALF2(log2, ::h2log2)
MIGRAPHX_DEVICE_MATH_HALF2(rsqrt, ::h2rsqrt) MIGRAPHX_DEVICE_MATH_HALF2(rsqrt, ::h2rsqrt)
// MIGRAPHX_DEVICE_MATH_HALF2(sin, ::h2sin)
MIGRAPHX_DEVICE_MATH_HALF2(sqrt, ::h2sqrt) MIGRAPHX_DEVICE_MATH_HALF2(sqrt, ::h2sqrt)
MIGRAPHX_DEVICE_MATH_HALF2(isinf, ::__hisinf2)
MIGRAPHX_DEVICE_MATH_HALF2(isnan, ::__hisnan2)
template <class T, class U> template <class T, class U>
constexpr auto where(bool cond, const T& a, const U& b) constexpr auto where(bool cond, const T& a, const U& b)
...@@ -218,6 +218,14 @@ constexpr auto min(const T& a, const U& b) ...@@ -218,6 +218,14 @@ constexpr auto min(const T& a, const U& b)
return min<common_type_t<T, U>>(a, b); return min<common_type_t<T, U>>(a, b);
} }
// Sin for half is broken on hip, so use cos instead
template <class T, MIGRAPHX_REQUIRES(is_same<vec_type<T>, half>{})>
constexpr T sin(T x)
{
constexpr const T shift = M_PI_2;
return migraphx::cos(shift - x);
}
MIGRAPHX_DEVICE_MATH_VEC(abs) MIGRAPHX_DEVICE_MATH_VEC(abs)
MIGRAPHX_DEVICE_MATH_VEC(acos) MIGRAPHX_DEVICE_MATH_VEC(acos)
MIGRAPHX_DEVICE_MATH_VEC(acosh) MIGRAPHX_DEVICE_MATH_VEC(acosh)
......
...@@ -56,6 +56,16 @@ struct id ...@@ -56,6 +56,16 @@ struct id
} }
}; };
template <class T>
struct convert_to
{
template <class U>
MIGRAPHX_DEVICE_CONSTEXPR auto operator()(U x) const
{
return convert<T>(x);
}
};
struct mean struct mean
{ {
index_int item_num = 1; index_int item_num = 1;
......
...@@ -128,6 +128,7 @@ struct shape ...@@ -128,6 +128,7 @@ struct shape
result[0] = tidx; result[0] = tidx;
return result; return result;
} }
/// Convert multi-index into a single index /// Convert multi-index into a single index
constexpr index_int single(index_array idx) const constexpr index_int single(index_array idx) const
{ {
......
...@@ -90,7 +90,6 @@ struct miopen_apply ...@@ -90,7 +90,6 @@ struct miopen_apply
add_extend_op("argmax"); add_extend_op("argmax");
add_extend_op("argmin"); add_extend_op("argmin");
add_extend_op("gather");
add_extend_op("logsoftmax"); add_extend_op("logsoftmax");
add_extend_op("lrn"); add_extend_op("lrn");
add_extend_op("multinomial"); add_extend_op("multinomial");
......
...@@ -346,10 +346,10 @@ struct ref_pad ...@@ -346,10 +346,10 @@ struct ref_pad
std::string name() const { return "ref::pad"; } std::string name() const { return "ref::pad"; }
shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); } shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); }
argument compute(context&, const shape& output_shape, std::vector<argument> args) const argument compute(context&, const dyn_output& dyn_out, std::vector<argument> args) const
{ {
assert(output_shape.standard()); assert(dyn_out.computed_shape.standard());
argument result{output_shape}; argument result{dyn_out.computed_shape};
result.visit([&](auto output) { result.visit([&](auto output) {
using type = typename decltype(output)::value_type; using type = typename decltype(output)::value_type;
std::fill(output.begin(), output.end(), pad_clamp<type>(op.value)); std::fill(output.begin(), output.end(), pad_clamp<type>(op.value));
......
...@@ -66,7 +66,7 @@ TEST_CASE(load_and_run_init_list) ...@@ -66,7 +66,7 @@ TEST_CASE(load_and_run_init_list)
TEST_CASE(quantize_fp16) TEST_CASE(quantize_fp16)
{ {
auto p1 = migraphx::parse_onnx("gemm_ex_test.onnx"); auto p1 = migraphx::parse_onnx("gemm_test.onnx");
const auto& p2 = p1; const auto& p2 = p1;
const auto& p3 = p1; const auto& p3 = p1;
migraphx::quantize_fp16(p1); migraphx::quantize_fp16(p1);
...@@ -82,7 +82,7 @@ TEST_CASE(quantize_fp16) ...@@ -82,7 +82,7 @@ TEST_CASE(quantize_fp16)
TEST_CASE(quantize_int8) TEST_CASE(quantize_int8)
{ {
auto p1 = migraphx::parse_onnx("gemm_ex_test.onnx"); auto p1 = migraphx::parse_onnx("gemm_test.onnx");
const auto& p2 = p1; const auto& p2 = p1;
auto t = migraphx::target("ref"); auto t = migraphx::target("ref");
migraphx::quantize_int8_options options; migraphx::quantize_int8_options options;
......
No preview for this file type
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
##################################################################################### #####################################################################################
# This script generates onnx files for MIGraphX onnx operator tests. # This script generates onnx files for MIGraphX onnx operator tests.
# To generate an individual onnx file, you can use the following # To generate an individual onnx file, you can use the following
# command: python -c "import gen_onnx; gen_onnx.{test_name}_test()" # command: python3 -c "import gen_onnx; gen_onnx.{test_name}_test()"
import numpy as np import numpy as np
import onnx import onnx
from onnx import helper from onnx import helper
...@@ -1123,6 +1123,24 @@ def conv_dynamic_batch_test(): ...@@ -1123,6 +1123,24 @@ def conv_dynamic_batch_test():
return ([node], [x, y], [out]) return ([node], [x, y], [out])
@onnx_test()
def conv_dynamic_bias_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[None, 3, 32, 32])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 5, 5])
z = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1])
out = helper.make_tensor_value_info('3', TensorProto.FLOAT,
[None, 2, 28, 28])
node = onnx.helper.make_node('Conv',
inputs=['0', '1', '2'],
outputs=['3'],
dilations=[1, 1],
strides=[1, 1])
return ([node], [x, y, z], [out])
@onnx_test() @onnx_test()
def conv_dynamic_img_test(): def conv_dynamic_img_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, x = helper.make_tensor_value_info('0', TensorProto.FLOAT,
...@@ -2100,71 +2118,136 @@ def gathernd_batch_dims_test(): ...@@ -2100,71 +2118,136 @@ def gathernd_batch_dims_test():
@onnx_test() @onnx_test()
def gemm_test(): def gemm_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [5, 7]) A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [8, 6])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [11, 5]) B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [8, 7])
z = helper.make_tensor_value_info('2', TensorProto.FLOAT, []) C = helper.make_tensor_value_info('C', TensorProto.FLOAT, [6, 7])
a = helper.make_tensor_value_info('3', TensorProto.FLOAT, [7, 11]) Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [6, 7])
node = onnx.helper.make_node('Gemm', node = onnx.helper.make_node('Gemm',
inputs=['0', '1', '2'], inputs=['A', 'B', 'C'],
outputs=['3'], outputs=['Y'],
alpha=0.5,
beta=0.8,
transA=1)
return ([node], [A, B, C], [Y])
@onnx_test()
def gemm_no_C_test():
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [5, 7])
B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [11, 5])
C = helper.make_tensor_value_info('C', TensorProto.FLOAT, [])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [7, 11])
node = onnx.helper.make_node('Gemm',
inputs=['A', 'B', 'C'],
outputs=['Y'],
alpha=2.0, alpha=2.0,
beta=2.0, beta=2.0,
transA=1, transA=1,
transB=1) transB=1)
return ([node], [x, y, z], [a]) return ([node], [A, B, C], [Y])
@onnx_test() @onnx_test()
def gemm_ex_test(): def gemm_brcst_C_test():
m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 1, 8, 6]) A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [5, 6])
m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1, 1, 8, 7]) B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [5, 7])
m3 = helper.make_tensor_value_info('3', TensorProto.FLOAT, [1, 1, 6, 7]) C = helper.make_tensor_value_info('C', TensorProto.FLOAT, [6, 1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 6, 7]) Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [6, 7])
node = onnx.helper.make_node('Gemm', node = onnx.helper.make_node('Gemm',
inputs=['1', '2', '3'], inputs=['A', 'B', 'C'],
outputs=['y'], outputs=['Y'],
alpha=0.5, alpha=0.5,
beta=0.8, beta=0.8,
transA=1) transA=1)
return ([node], [m1, m2, m3], [y]) return ([node], [A, B, C], [Y])
@onnx_test() @onnx_test()
def gemm_ex_brcst_test(): def gemm_half_test():
m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 1, 5, 6]) A = helper.make_tensor_value_info('A', TensorProto.FLOAT16, [8, 6])
m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1, 1, 5, 7]) B = helper.make_tensor_value_info('B', TensorProto.FLOAT16, [8, 7])
m3 = helper.make_tensor_value_info('3', TensorProto.FLOAT, [1, 1, 6, 1]) C = helper.make_tensor_value_info('C', TensorProto.FLOAT16, [6, 1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 6, 7]) Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT16, [6, 7])
node = onnx.helper.make_node('Gemm', node = onnx.helper.make_node('Gemm',
inputs=['1', '2', '3'], inputs=['A', 'B', 'C'],
outputs=['y'], outputs=['Y'],
alpha=0.5, alpha=0.5,
beta=0.8, beta=0.8,
transA=1) transA=1)
return ([node], [m1, m2, m3], [y]) return ([node], [A, B, C], [Y])
@onnx_test() @onnx_test()
def gemm_half_test(): def gemm_dyn_inner_test():
m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT16, [1, 1, 8, 6]) A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [None, 6])
m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT16, [1, 1, 8, 7]) B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [None, 7])
m3 = helper.make_tensor_value_info('3', TensorProto.FLOAT16, [1, 1, 6, 1]) Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [6, 7])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT16, [1, 1, 6, 7])
node = onnx.helper.make_node('Gemm', node = onnx.helper.make_node('Gemm',
inputs=['1', '2', '3'], inputs=['A', 'B'],
outputs=['y'], outputs=['Y'],
alpha=0.5,
transA=1)
return ([node], [A, B], [Y])
@onnx_test()
def gemm_dyn_outer_test():
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [5, None])
B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [11, 5])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [None, 11])
node = onnx.helper.make_node('Gemm',
inputs=['A', 'B'],
outputs=['Y'],
alpha=2.0,
transA=1,
transB=1)
return ([node], [A, B], [Y])
@onnx_test()
def gemm_dyn_C_error():
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [8, None])
B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [8, 7])
C = helper.make_tensor_value_info('C', TensorProto.FLOAT, [1, 7])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [None, 7])
node = onnx.helper.make_node('Gemm',
inputs=['A', 'B', 'C'],
outputs=['Y'],
alpha=1.0,
beta=1.0,
transA=1)
return ([node], [A, B, C], [Y])
@onnx_test()
def gemm_rank_error():
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [4, 1, 8, 6])
B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [4, 1, 8, 7])
C = helper.make_tensor_value_info('C', TensorProto.FLOAT, [6, 7])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [4, 1, 6, 7])
node = onnx.helper.make_node('Gemm',
inputs=['A', 'B', 'C'],
outputs=['Y'],
alpha=0.5, alpha=0.5,
beta=0.8, beta=0.8,
transA=1) transA=1)
return ([node], [m1, m2, m3], [y]) return ([node], [A, B, C], [Y])
@onnx_test() @onnx_test()
...@@ -3547,6 +3630,81 @@ def matmul_vv_test(): ...@@ -3547,6 +3630,81 @@ def matmul_vv_test():
return ([node], [m1, m2], [y]) return ([node], [m1, m2], [y])
@onnx_test()
def matmul_dyn_mm_test():
m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [None, 7])
m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [7, None])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [None, None])
node = onnx.helper.make_node(
'MatMul',
inputs=['1', '2'],
outputs=['y'],
)
return ([node], [m1, m2], [y])
@onnx_test()
def matmul_dyn_mv_test():
m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [None, 7])
m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [7])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [None, 1])
node = onnx.helper.make_node(
'MatMul',
inputs=['1', '2'],
outputs=['y'],
)
return ([node], [m1, m2], [y])
@onnx_test()
def matmul_dyn_vm_test():
m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [7])
m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [7, None])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, None])
node = onnx.helper.make_node(
'MatMul',
inputs=['1', '2'],
outputs=['y'],
)
return ([node], [m1, m2], [y])
@onnx_test()
def matmul_dyn_vv_test():
m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [None])
m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [None])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1])
node = onnx.helper.make_node(
'MatMul',
inputs=['1', '2'],
outputs=['y'],
)
return ([node], [m1, m2], [y])
@onnx_test()
def matmul_dyn_broadcast_error():
m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [7])
m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [5, 7, None])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [5, None])
node = onnx.helper.make_node(
'MatMul',
inputs=['1', '2'],
outputs=['y'],
)
return ([node], [m1, m2], [y])
@onnx_test() @onnx_test()
def matmulinteger_test(): def matmulinteger_test():
m1 = helper.make_tensor_value_info('1', TensorProto.INT8, [3, 6, 16]) m1 = helper.make_tensor_value_info('1', TensorProto.INT8, [3, 6, 16])
...@@ -3562,6 +3720,21 @@ def matmulinteger_test(): ...@@ -3562,6 +3720,21 @@ def matmulinteger_test():
return ([node], [m1, m2], [y]) return ([node], [m1, m2], [y])
@onnx_test()
def matmulinteger_dyn_error():
m1 = helper.make_tensor_value_info('1', TensorProto.INT8, [None, 6, 16])
m2 = helper.make_tensor_value_info('2', TensorProto.INT8, [None, 16, 8])
y = helper.make_tensor_value_info('y', TensorProto.INT32, [None, 6, 8])
node = onnx.helper.make_node(
'MatMulInteger',
inputs=['1', '2'],
outputs=['y'],
)
return ([node], [m1, m2], [y])
@onnx_test() @onnx_test()
def max_test(): def max_test():
a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3]) a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
...@@ -4200,6 +4373,53 @@ def pad_reflect_multiaxis_test(): ...@@ -4200,6 +4373,53 @@ def pad_reflect_multiaxis_test():
return ([arg_pad, node], [x], [y]) return ([arg_pad, node], [x], [y])
@onnx_test()
def pad_attr_dyn_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [None, None])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [None, None])
node = onnx.helper.make_node('Pad',
inputs=['0'],
pads=[1, 1, 1, 1],
outputs=['1'])
return ([node], [x], [y])
@onnx_test()
def pad_cnst_dyn_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [None, None])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [None, None])
sizes = np.array([0, 2, 0, 1])
pad_tensor = helper.make_tensor(name='pad_size',
data_type=TensorProto.INT32,
dims=sizes.shape,
vals=sizes.astype(int))
arg_pad = onnx.helper.make_node('Constant',
inputs=[],
outputs=['arg_pad'],
value=pad_tensor)
node = onnx.helper.make_node('Pad', inputs=['0', 'arg_pad'], outputs=['1'])
return ([arg_pad, node], [x], [y])
@onnx_test()
def pad_dyn_reflect_error():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [None, None])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [None, None])
node = onnx.helper.make_node('Pad',
mode='reflect',
inputs=['0'],
pads=[0, 2, 0, 1],
outputs=['1'])
return ([node], [x], [y])
@onnx_test() @onnx_test()
def pow_test(): def pow_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5]) arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5])
...@@ -4677,6 +4897,34 @@ def reducel1_test(): ...@@ -4677,6 +4897,34 @@ def reducel1_test():
return ([node], [x], [y]) return ([node], [x], [y])
@onnx_test
def reducel1_dyn_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [None])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [None])
axes = [-2]
node = onnx.helper.make_node('ReduceL1',
inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=0)
return ([node], [x], [y])
@onnx_test
def reducel1_dyn_noaxes_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [None])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [None])
node = onnx.helper.make_node('ReduceL1',
inputs=['x'],
outputs=['y'],
keepdims=0)
return ([node], [x], [y])
@onnx_test() @onnx_test()
def reducel2_test(): def reducel2_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
...@@ -4726,6 +4974,22 @@ def reduce_log_sum_exp_test(): ...@@ -4726,6 +4974,22 @@ def reduce_log_sum_exp_test():
def reducemax_test(): def reducemax_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 6]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 6])
axes = [2]
node = onnx.helper.make_node('ReduceMax',
inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=0)
return ([node], [x], [y])
@onnx_test
def reducemax_dyn_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [None, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [None, 4, 6])
axes = [2] axes = [2]
node = onnx.helper.make_node('ReduceMax', node = onnx.helper.make_node('ReduceMax',
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment