"test/srt/git@developer.sourcefind.cn:zhaoyu6/sglang.git" did not exist on "9a7e7a657644ffff7165b0c7a39033c934f6f63a"
Commit 6711780a authored by Artur Wojcik's avatar Artur Wojcik
Browse files

Merge branch 'develop' into uif2-initial

parents c0563b9e d1abf06f
...@@ -31,6 +31,14 @@ ...@@ -31,6 +31,14 @@
#include <migraphx/kernels/debug.hpp> #include <migraphx/kernels/debug.hpp>
#include <migraphx/kernels/functional.hpp> #include <migraphx/kernels/functional.hpp>
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wreserved-identifier"
extern "C" __device__ size_t __ockl_get_enqueued_local_size(uint); // NOLINT
extern "C" __device__ size_t __ockl_get_local_size(uint); // NOLINT
#pragma clang diagnostic pop
#endif
namespace migraphx { namespace migraphx {
#if defined(MIGRAPHX_NGLOBAL) && defined(MIGRAPHX_NLOCAL) #if defined(MIGRAPHX_NGLOBAL) && defined(MIGRAPHX_NLOCAL)
...@@ -45,43 +53,37 @@ inline __device__ __attribute__((const)) index_int compute_global_size() ...@@ -45,43 +53,37 @@ inline __device__ __attribute__((const)) index_int compute_global_size()
// This actualy works even when global is not divisible by local size. // This actualy works even when global is not divisible by local size.
// This doesnt actually do a multiplicatiosn. Instead it calls a device // This doesnt actually do a multiplicatiosn. Instead it calls a device
// function to get the global size, which is why it works. // function to get the global size, which is why it works.
return blockDim.x * gridDim.x; // NOLINT return blockDim.x * gridDim.x; // NOLINT
#endif #endif
} }
// We cant just use blockDim.x to get the local size since its broken on hip #ifdef MIGRAPHX_NGROUP
// when global is not divisible by local size. In this case, we calulate the // If global is divisible by local then local can be a const
// size for the last group. #if(MIGRAPHX_NGLOBAL % MIGRAPHX_NLOCAL == 0) || (MIGRAPHX_NGROUP == 1)
#define MIGRAPHX_HAS_CONST_LOCAL 1
#endif
#endif
inline __device__ __attribute__((const)) index_int compute_local_size() inline __device__ __attribute__((const)) index_int compute_local_size()
{ {
#ifdef MIGRAPHX_NLOCAL #ifdef MIGRAPHX_HAS_CONST_LOCAL
const auto nlocal = MIGRAPHX_NLOCAL; return MIGRAPHX_NLOCAL;
#else
const auto nlocal = blockDim.x; // NOLINT
#endif
#ifdef MIGRAPHX_NGROUP
const auto ngroup = MIGRAPHX_NGROUP;
#else #else
const auto ngroup = gridDim.x; // NOLINT // Returns block size. For the non-uniform block it returns the size of the non-uniform block.
return __ockl_get_local_size(0); // NOLINT
#endif #endif
const auto group_id = blockIdx.x; // NOLINT
const auto nglobal = compute_global_size();
if(group_id == ngroup - 1)
{
return 1 + (nglobal - 1) % nlocal;
}
else
{
return nlocal; // NOLINT
}
} }
#ifdef MIGRAPHX_NGROUP inline __device__ __attribute__((const)) index_int compute_max_local_size()
// If global is divisible by local then local can be a const {
#if(MIGRAPHX_NGLOBAL % MIGRAPHX_NLOCAL == 0) || (MIGRAPHX_NGROUP == 1) #ifdef MIGRAPHX_LOCAL
#define MIGRAPHX_HAS_CONST_LOCAL 1 return MIGRAPHX_NLOCAL;
#endif #else
// Returns the block size. When workgrop has non-uniform block, this returns size of the uniform
// block.
return __ockl_get_enqueued_local_size(0); // NOLINT
#endif #endif
}
struct index struct index
{ {
...@@ -126,8 +128,8 @@ struct index ...@@ -126,8 +128,8 @@ struct index
#else #else
__device__ index_int max_nlocal() const __device__ index_int max_nlocal() const
{ {
MIGRAPHX_ASSERT(blockDim.x > 0); MIGRAPHX_ASSERT(compute_max_local_size() > 0);
return blockDim.x; return compute_max_local_size();
} }
#endif #endif
...@@ -249,7 +251,8 @@ struct index ...@@ -249,7 +251,8 @@ struct index
#endif #endif
inline __device__ __attribute__((const)) index make_index() inline __device__ __attribute__((const)) index make_index()
{ {
return index{blockIdx.x * blockDim.x + threadIdx.x, threadIdx.x, blockIdx.x}; // NOLINT return index{
blockIdx.x * compute_max_local_size() + threadIdx.x, threadIdx.x, blockIdx.x}; // NOLINT
} }
} // namespace migraphx } // namespace migraphx
......
...@@ -24,9 +24,8 @@ ...@@ -24,9 +24,8 @@
#include <migraphx/permutation.hpp> #include <migraphx/permutation.hpp>
#include <migraphx/gpu/prefuse_ops.hpp> #include <migraphx/gpu/prefuse_ops.hpp>
#if !defined(_MSC_VER) #if !defined(_MSC_VER)
#include <migraphx/gpu/gemm_softmax_gemm.hpp>
#include <migraphx/match/layernorm.hpp> #include <migraphx/match/layernorm.hpp>
#include <migraphx/check_shapes.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/register_op.hpp> #include <migraphx/register_op.hpp>
#endif #endif
#include <migraphx/pass_manager.hpp> #include <migraphx/pass_manager.hpp>
...@@ -126,6 +125,60 @@ struct find_add_layernorm ...@@ -126,6 +125,60 @@ struct find_add_layernorm
m.replace_instruction(ins, add_layernorm{op.epsilon}, add_ins->inputs()); m.replace_instruction(ins, add_layernorm{op.epsilon}, add_ins->inputs());
} }
}; };
struct pre_gemm_softmax_gemm : gemm_softmax_gemm
{
std::string name() const { return "gpu::pre_gemm_softmax_gemm"; }
};
MIGRAPHX_REGISTER_OP(pre_gemm_softmax_gemm);
MIGRAPHX_PRED_MATCHER(is_ck_gemm, instruction_ref ins)
{
if(ins->name() != "dot")
return false;
if(not pre_gemm_softmax_gemm::is_ck_supported_type(ins->get_shape().type()))
return false;
return true;
}
struct find_gemm_softmax_gemm
{
auto matcher() const
{
auto gemm1 =
match::skip(match::name("contiguous"))(match::name("dot")(is_ck_gemm().bind("gemm1")));
auto mul = match::name("mul")(
match::nargs(2), match::either_arg(0, 1)(match::is_constant().bind("scale"), gemm1));
auto softmax = match::name("softmax")(match::arg(0)(mul)).bind("softmax");
return match::name("dot")(is_ck_gemm().bind("gemm2"))(match::arg(0)(softmax));
}
void apply(module_pass_manager& mpm, const match::matcher_result& r) const
{
auto ins = r.result;
auto gemm2_ins = r.instructions["gemm2"];
auto gemm1_ins = r.instructions["gemm1"];
auto scale_lit = r.instructions["scale"];
float scale = 1.0;
scale_lit->eval().visit([&](const auto s) {
// CK only supports single-valued scale
if(std::all_of(
s.begin() + 1, s.end(), [&](auto v) { return float_equal(v, s.front()); }))
scale = s.front();
else
return;
});
auto inputs = gemm1_ins->inputs(); // A, B
inputs.push_back(gemm2_ins->inputs().back()); // B1
mpm.get_module().replace_instruction(
ins, pre_gemm_softmax_gemm{gemm2_ins->get_operator(), scale}, inputs);
}
};
} // namespace } // namespace
#endif #endif
...@@ -135,6 +188,10 @@ void prefuse_ops::apply(module_pass_manager& mpm) const ...@@ -135,6 +188,10 @@ void prefuse_ops::apply(module_pass_manager& mpm) const
match::find_matches(mpm.get_module(), find_layernorm{}); match::find_matches(mpm.get_module(), find_layernorm{});
mpm.run_pass(dead_code_elimination{}); mpm.run_pass(dead_code_elimination{});
match::find_matches(mpm.get_module(), find_add_layernorm{}); match::find_matches(mpm.get_module(), find_add_layernorm{});
if(enabled(MIGRAPHX_ENABLE_CK{}))
match::find_matches(mpm, find_gemm_softmax_gemm{});
#else
(void)mpm;
#endif #endif
} }
......
...@@ -41,8 +41,7 @@ std::vector<argument> generate_arguments(const std::vector<shape>& shapes, unsig ...@@ -41,8 +41,7 @@ std::vector<argument> generate_arguments(const std::vector<shape>& shapes, unsig
} }
using milliseconds = std::chrono::duration<double, std::milli>; using milliseconds = std::chrono::duration<double, std::milli>;
std::pair<double, double> double time_op(context& ictx, operation op, const std::vector<shape>& inputs, int n)
time_op(context& ictx, operation op, const std::vector<shape>& inputs, int n)
{ {
// TODO: Use std::ref // TODO: Use std::ref
...@@ -51,21 +50,19 @@ time_op(context& ictx, operation op, const std::vector<shape>& inputs, int n) ...@@ -51,21 +50,19 @@ time_op(context& ictx, operation op, const std::vector<shape>& inputs, int n)
auto output = op.compute_shape(inputs); auto output = op.compute_shape(inputs);
op.finalize(ctx, output, inputs); op.finalize(ctx, output, inputs);
auto args = generate_arguments(inputs); auto args = generate_arguments(inputs);
auto run = [&] { auto start = context::create_event_for_timing();
op.compute(ctx, output, args); auto stop = context::create_event_for_timing();
ctx.finish(); auto run = [&] { op.compute(ctx, output, args); };
};
gctx.enable_perf_measurement();
run(); run();
double host_time = 0.0; gctx.get_stream().record(start.get());
double device_time = 0.0;
for(auto i : range(n)) for(auto i : range(n))
{ {
(void)i; (void)i;
host_time += time<milliseconds>(run); run();
device_time += gctx.get_elapsed_ms();
} }
return std::make_pair(host_time / n, device_time / n); gctx.get_stream().record(stop.get());
gctx.finish();
return context::get_elapsed_ms(start.get(), stop.get()) / n;
} }
} // namespace gpu } // namespace gpu
......
...@@ -55,7 +55,7 @@ struct allocate ...@@ -55,7 +55,7 @@ struct allocate
const migraphx::shape& output_shape, const migraphx::shape& output_shape,
const std::vector<migraphx::argument>&) const const std::vector<migraphx::argument>&) const
{ {
return {output_shape}; return migraphx::argument{output_shape};
} }
}; };
......
...@@ -60,7 +60,7 @@ struct concat ...@@ -60,7 +60,7 @@ struct concat
const migraphx::shape& output_shape, const migraphx::shape& output_shape,
const std::vector<migraphx::argument>&) const const std::vector<migraphx::argument>&) const
{ {
return {output_shape}; return migraphx::argument{output_shape};
} }
}; };
...@@ -104,7 +104,7 @@ struct allocate ...@@ -104,7 +104,7 @@ struct allocate
const migraphx::shape& output_shape, const migraphx::shape& output_shape,
const std::vector<migraphx::argument>&) const const std::vector<migraphx::argument>&) const
{ {
return {output_shape}; return migraphx::argument{output_shape};
} }
}; };
......
...@@ -34,7 +34,8 @@ ...@@ -34,7 +34,8 @@
void run_pass(migraphx::program& p) void run_pass(migraphx::program& p)
{ {
migraphx::run_passes(p, {migraphx::gpu::fuse_mlir{}, migraphx::dead_code_elimination{}}); migraphx::run_passes(
p, {migraphx::gpu::fuse_mlir{.enable_extra = true}, migraphx::dead_code_elimination{}});
} }
template <class F> template <class F>
...@@ -151,7 +152,6 @@ TEST_CASE(int_quant_dot_tanh_fails) ...@@ -151,7 +152,6 @@ TEST_CASE(int_quant_dot_tanh_fails)
int main(int argc, const char* argv[]) int main(int argc, const char* argv[])
{ {
if(migraphx::gpu::mlir_enabled()) test::run(argc, argv);
test::run(argc, argv);
return 0; return 0;
} }
...@@ -55,7 +55,7 @@ struct allocate ...@@ -55,7 +55,7 @@ struct allocate
const migraphx::shape& output_shape, const migraphx::shape& output_shape,
const std::vector<migraphx::argument>&) const const std::vector<migraphx::argument>&) const
{ {
return {output_shape}; return migraphx::argument{output_shape};
} }
}; };
......
...@@ -57,7 +57,7 @@ struct normalize_test_op ...@@ -57,7 +57,7 @@ struct normalize_test_op
const migraphx::shape& output_shape, const migraphx::shape& output_shape,
const std::vector<migraphx::argument>&) const const std::vector<migraphx::argument>&) const
{ {
return {output_shape}; return migraphx::argument{output_shape};
} }
}; };
......
6d7bc2a097a1a08541cd0d4628831c79ab8092d5 635d3faa3b3908d2806d009dc6872152cfcfcdda
...@@ -149,6 +149,21 @@ def argmax_test(): ...@@ -149,6 +149,21 @@ def argmax_test():
return ([node], [x], [y]) return ([node], [x], [y])
@onnx_test()
def argmax_select_last_index_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 6])
node = onnx.helper.make_node('ArgMax',
inputs=['x'],
outputs=['y'],
axis=2,
keepdims=0,
select_last_index=1)
return ([node], [x], [y])
@onnx_test() @onnx_test()
def argmax_dyn_test(): def argmax_dyn_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [None, 4, 5, 6]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [None, 4, 5, 6])
...@@ -177,6 +192,21 @@ def argmin_test(): ...@@ -177,6 +192,21 @@ def argmin_test():
return ([node], [x], [y]) return ([node], [x], [y])
@onnx_test()
def argmin_select_last_index_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 5])
node = onnx.helper.make_node('ArgMin',
inputs=['x'],
outputs=['y'],
axis=3,
keepdims=0,
select_last_index=1)
return ([node], [x], [y])
@onnx_test() @onnx_test()
def asin_test(): def asin_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
...@@ -2722,6 +2752,119 @@ def group_conv_test(): ...@@ -2722,6 +2752,119 @@ def group_conv_test():
return ([node], [x, y], [z]) return ([node], [x, y], [z])
def group_norm_test(x_dims,
scale_dims,
bias_dims,
y_dims,
num_groups,
eps_value=1e-5,
dtype=TensorProto.FLOAT):
x = helper.make_tensor_value_info('x', dtype, x_dims)
scale = helper.make_tensor_value_info('scale', dtype, scale_dims)
bias = helper.make_tensor_value_info('bias', dtype, bias_dims)
y = helper.make_tensor_value_info('y', dtype, y_dims)
node = onnx.helper.make_node('GroupNormalization',
inputs=['x', 'scale', 'bias'],
outputs=['y'],
num_groups=num_groups,
epsilon=eps_value)
return ([node], [x, scale, bias], [y])
@onnx_test()
def group_norm_3d_test():
return group_norm_test([1, 4, 2], [2], [2], [1, 4, 2], 2)
@onnx_test()
def group_norm_3d_half_test():
return group_norm_test([1, 4, 2], [2], [2], [1, 4, 2],
2,
dtype=TensorProto.FLOAT16)
@onnx_test()
def group_norm_4d_test():
return group_norm_test([1, 4, 3, 3], [2], [2], [1, 4, 3, 3], 2)
@onnx_test()
def group_norm_4d_half_test():
return group_norm_test([1, 4, 3, 3], [2], [2], [1, 4, 3, 3],
2,
dtype=TensorProto.FLOAT16)
@onnx_test()
def group_norm_5d_test():
return group_norm_test([3, 3, 3, 3, 3], [1], [1], [3, 3, 3, 3, 3], 1)
@onnx_test()
def group_norm_5d_half_test():
return group_norm_test([3, 3, 3, 3, 3], [1], [1], [3, 3, 3, 3, 3],
1,
dtype=TensorProto.FLOAT16)
@onnx_test()
def group_norm_small_eps_half_test():
return group_norm_test([1, 4, 2], [2], [2], [1, 4, 2],
2,
eps_value=1e-12,
dtype=TensorProto.FLOAT16)
@onnx_test()
def group_norm_invalid_num_groups_error_test():
return group_norm_test([1, 4, 3, 3], [2], [2], [1, 4, 3, 3], 3)
@onnx_test()
def group_norm_missing_attribute_error_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 4])
scale = helper.make_tensor_value_info('scale', TensorProto.FLOAT, [2])
bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [2])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 4])
node = onnx.helper.make_node('GroupNormalization',
inputs=['x', 'scale', 'bias'],
outputs=['y'])
return ([node], [x, scale, bias], [y])
@onnx_test()
def group_norm_invalid_input_count_error_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 4, 3, 3])
scale = helper.make_tensor_value_info('scale', TensorProto.FLOAT, [2])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 4, 3, 3])
node = onnx.helper.make_node('GroupNormalization',
inputs=['x', 'scale'],
outputs=['y'],
num_groups=2)
return ([node], [x, scale], [y])
@onnx_test()
def group_norm_invalid_input_shape_error_test():
return group_norm_test([1, 4], [2], [2], [1, 4], 2)
@onnx_test()
def group_norm_invalid_scale_shape_test():
return group_norm_test([1, 4, 3, 3], [1], [2], [1, 4, 3, 3], 2)
@onnx_test()
def group_norm_invalid_bias_shape_test():
return group_norm_test([1, 4, 3, 3], [2], [3], [1, 4, 3, 3], 2)
@onnx_test() @onnx_test()
def hardsigmoid_default_test(): def hardsigmoid_default_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 3, 4, 5]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 3, 4, 5])
...@@ -3804,6 +3947,110 @@ def layernorm_test(): ...@@ -3804,6 +3947,110 @@ def layernorm_test():
bias_add], [x, scale, bias], [y], [pow_tensor, epsilon_tensor]) bias_add], [x, scale, bias], [y], [pow_tensor, epsilon_tensor])
def make_layer_norm(shape, axis, dtype=TensorProto.FLOAT):
norm_axis = axis + len(shape) if axis < 0 else axis
x = helper.make_tensor_value_info('x', dtype, shape)
scale = helper.make_tensor_value_info('scale', dtype, shape[norm_axis:])
bias = helper.make_tensor_value_info('bias', dtype, shape[norm_axis:])
y = helper.make_tensor_value_info('y', dtype, shape)
node = onnx.helper.make_node('LayerNormalization',
inputs=['x', 'scale', 'bias'],
outputs=['y'],
axis=axis)
return ([node], [x, scale, bias], [y])
@onnx_test()
def layer_norm_invalid_shape_error_test():
return make_layer_norm([3], 0)
@onnx_test()
def layer_norm_2d_axis_zero_test():
return make_layer_norm([3, 4], 0)
@onnx_test()
def layer_norm_2d_axis_one_test():
return make_layer_norm([3, 4], 1)
@onnx_test()
def layer_norm_2d_axis_minus_one_test():
return make_layer_norm([3, 4], -1)
@onnx_test()
def layer_norm_3d_test():
return make_layer_norm([1, 4, 2], -1)
@onnx_test()
def layer_norm_3d_half_test():
return make_layer_norm([1, 4, 2], -1, TensorProto.FLOAT16)
@onnx_test()
def layer_norm_4d_test():
return make_layer_norm([3, 3, 3, 3], -1)
@onnx_test()
def layer_norm_4d_half_test():
return make_layer_norm([3, 3, 3, 3], -1, TensorProto.FLOAT16)
@onnx_test()
def layer_norm_invalid_axis_error_test():
return make_layer_norm([1, 4, 2], 1000)
@onnx_test()
def layer_norm_invalid_minus_axis_error_test():
return make_layer_norm([1, 4, 2], -1000)
@onnx_test()
def layer_norm_invalid_input_count_error_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 2])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2])
node = onnx.helper.make_node('LayerNormalization',
inputs=['x'],
outputs=['y'])
return ([node], [x], [y])
@onnx_test()
def layer_norm_without_bias_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 2])
scale = helper.make_tensor_value_info('scale', TensorProto.FLOAT, [2])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2])
node = onnx.helper.make_node('LayerNormalization',
inputs=['x', 'scale'],
outputs=['y'])
return ([node], [x, scale], [y])
@onnx_test()
def layer_norm_small_eps_half_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16, [1, 2])
scale = helper.make_tensor_value_info('scale', TensorProto.FLOAT16, [2])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT16, [1, 2])
node = onnx.helper.make_node('LayerNormalization',
inputs=['x', 'scale'],
outputs=['y'],
epsilon=1e-12)
return ([node], [x, scale], [y])
@onnx_test() @onnx_test()
def leaky_relu_test(): def leaky_relu_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
...@@ -4464,6 +4711,77 @@ def mean_integral_test(): ...@@ -4464,6 +4711,77 @@ def mean_integral_test():
return ([node], data, [mean]) return ([node], data, [mean])
def mvn_default_axes_test_base(dims, type=TensorProto.FLOAT):
data = helper.make_tensor_value_info("data", type, dims)
out = helper.make_tensor_value_info("out", type, dims)
node = helper.make_node("MeanVarianceNormalization",
inputs=["data"],
outputs=["out"])
return ([node], [data], [out])
@onnx_test()
def mvn_default_axes_test():
return mvn_default_axes_test_base([2, 2, 2, 2])
@onnx_test()
def mvn_default_axes_fp16_test():
return mvn_default_axes_test_base([2, 2, 2, 2], TensorProto.FLOAT16)
@onnx_test()
def mvn_default_axes_rank_too_small_test():
return mvn_default_axes_test_base([2, 2, 2])
@onnx_test()
def mvn_default_axes_rank_too_big_test():
return mvn_default_axes_test_base([2, 2, 2, 2, 2])
def mvn_n_rank_test_base(axes, dims, type=TensorProto.FLOAT):
data = helper.make_tensor_value_info("data", type, dims)
out = helper.make_tensor_value_info("out", type, dims)
node = helper.make_node("MeanVarianceNormalization",
inputs=["data"],
outputs=["out"],
axes=axes)
return ([node], [data], [out])
@onnx_test()
def mvn_rank_2_test():
return mvn_n_rank_test_base([1], [2, 2])
@onnx_test()
def mvn_rank_2_fp16_test():
return mvn_n_rank_test_base([1], [2, 2], TensorProto.FLOAT16)
@onnx_test()
def mvn_rank_3_test():
return mvn_n_rank_test_base([0, 1], [2, 2, 2])
@onnx_test()
def mvn_rank_3_fp16_test():
return mvn_n_rank_test_base([0, 1], [2, 2, 2], TensorProto.FLOAT16)
@onnx_test()
def mvn_axes_rank_too_small_test():
return mvn_n_rank_test_base([0, 1, 2], [2, 2, 2])
@onnx_test()
def mvn_axes_rank_too_big_test():
return mvn_n_rank_test_base([0], [2, 2, 2])
@onnx_test() @onnx_test()
def min_test(): def min_test():
a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3]) a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
...@@ -4890,6 +5208,32 @@ def pad_test(): ...@@ -4890,6 +5208,32 @@ def pad_test():
return ([node], [x], [y]) return ([node], [x], [y])
@onnx_test()
def pad_asym_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 4, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 6, 4, 12])
node = onnx.helper.make_node('Pad',
inputs=['0'],
pads=[0, 1, 0, 3, 0, 2, 0, 4],
outputs=['1'])
return ([node], [x], [y])
@onnx_test()
def pad_asym_invalid_pads_error_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 4, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 6, 4, 12])
node = onnx.helper.make_node('Pad',
inputs=['0'],
pads=[0, 1, 0, 3, 0, 2],
outputs=['1'])
return ([node], [x], [y])
@onnx_test() @onnx_test()
def pad_3arg_test(): def pad_3arg_test():
values = np.array([1]) values = np.array([1])
...@@ -4923,11 +5267,18 @@ def pad_3arg_test(): ...@@ -4923,11 +5267,18 @@ def pad_3arg_test():
@onnx_test() @onnx_test()
def pad_reflect_test(): def pad_4arg_axes_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 2]) values = np.array([1])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [2, 5]) val_tensor = helper.make_tensor(name='val',
data_type=TensorProto.FLOAT,
dims=values.reshape(()).shape,
vals=values.astype(float))
arg_val = onnx.helper.make_node('Constant',
inputs=[],
outputs=['arg_val'],
value=val_tensor)
sizes = np.array([0, 2, 0, 1]) sizes = np.array([1, 3, 2, 4])
pad_tensor = helper.make_tensor(name='pad_size', pad_tensor = helper.make_tensor(name='pad_size',
data_type=TensorProto.INT32, data_type=TensorProto.INT32,
dims=sizes.shape, dims=sizes.shape,
...@@ -4937,20 +5288,38 @@ def pad_reflect_test(): ...@@ -4937,20 +5288,38 @@ def pad_reflect_test():
outputs=['arg_pad'], outputs=['arg_pad'],
value=pad_tensor) value=pad_tensor)
node = onnx.helper.make_node('Pad', axes = np.array([1, 3])
mode='reflect', axes_tensor = helper.make_tensor(name='pad_axes',
inputs=['0', 'arg_pad'], data_type=TensorProto.INT32,
outputs=['1']) dims=axes.shape,
vals=axes.astype(int))
arg_axes = onnx.helper.make_node('Constant',
inputs=[],
outputs=['arg_axes'],
value=axes_tensor)
return ([arg_pad, node], [x], [y]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 4, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 6, 4, 12])
node = onnx.helper.make_node(
'Pad', inputs=['0', 'arg_pad', 'arg_val', 'arg_axes'], outputs=['1'])
return ([arg_axes, arg_val, arg_pad, node], [x], [y])
@onnx_test() @onnx_test()
def pad_reflect_multiaxis_test(): def pad_4arg_invalid_axes_error_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3]) values = np.array([1])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 5]) val_tensor = helper.make_tensor(name='val',
data_type=TensorProto.FLOAT,
dims=values.reshape(()).shape,
vals=values.astype(float))
arg_val = onnx.helper.make_node('Constant',
inputs=[],
outputs=['arg_val'],
value=val_tensor)
sizes = np.array([0, 2, 2, 0]) sizes = np.array([1, 3, 2, 4])
pad_tensor = helper.make_tensor(name='pad_size', pad_tensor = helper.make_tensor(name='pad_size',
data_type=TensorProto.INT32, data_type=TensorProto.INT32,
dims=sizes.shape, dims=sizes.shape,
...@@ -4960,38 +5329,169 @@ def pad_reflect_multiaxis_test(): ...@@ -4960,38 +5329,169 @@ def pad_reflect_multiaxis_test():
outputs=['arg_pad'], outputs=['arg_pad'],
value=pad_tensor) value=pad_tensor)
node = onnx.helper.make_node('Pad', axes = np.array([1, 2, 3])
mode='reflect', axes_tensor = helper.make_tensor(name='pad_axes',
inputs=['0', 'arg_pad'], data_type=TensorProto.INT32,
outputs=['1']) dims=axes.shape,
vals=axes.astype(int))
return ([arg_pad, node], [x], [y]) arg_axes = onnx.helper.make_node('Constant',
inputs=[],
outputs=['arg_axes'],
value=axes_tensor)
@onnx_test() x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 4, 5])
def pad_attr_dyn_test(): y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 6, 4, 12])
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [None, None])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [None, None])
node = onnx.helper.make_node('Pad', node = onnx.helper.make_node(
inputs=['0'], 'Pad', inputs=['0', 'arg_pad', 'arg_val', 'arg_axes'], outputs=['1'])
pads=[1, 1, 1, 1],
outputs=['1'])
return ([node], [x], [y]) return ([arg_axes, arg_val, arg_pad, node], [x], [y])
@onnx_test() @onnx_test()
def pad_cnst_dyn_test(): def pad_4arg_neg_axes_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [None, None]) values = np.array([1])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [None, None]) val_tensor = helper.make_tensor(name='val',
data_type=TensorProto.FLOAT,
sizes = np.array([0, 2, 0, 1]) dims=values.reshape(()).shape,
pad_tensor = helper.make_tensor(name='pad_size', vals=values.astype(float))
data_type=TensorProto.INT32, arg_val = onnx.helper.make_node('Constant',
dims=sizes.shape, inputs=[],
vals=sizes.astype(int)) outputs=['arg_val'],
arg_pad = onnx.helper.make_node('Constant', value=val_tensor)
sizes = np.array([1, 3, 2, 4])
pad_tensor = helper.make_tensor(name='pad_size',
data_type=TensorProto.INT32,
dims=sizes.shape,
vals=sizes.astype(int))
arg_pad = onnx.helper.make_node('Constant',
inputs=[],
outputs=['arg_pad'],
value=pad_tensor)
axes = np.array([-3, -1])
axes_tensor = helper.make_tensor(name='pad_axes',
data_type=TensorProto.INT32,
dims=axes.shape,
vals=axes.astype(int))
arg_axes = onnx.helper.make_node('Constant',
inputs=[],
outputs=['arg_axes'],
value=axes_tensor)
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 4, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 6, 4, 12])
node = onnx.helper.make_node(
'Pad', inputs=['0', 'arg_pad', 'arg_val', 'arg_axes'], outputs=['1'])
return ([arg_axes, arg_val, arg_pad, node], [x], [y])
@onnx_test()
def pad_reflect_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 2])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [2, 5])
sizes = np.array([0, 2, 0, 1])
pad_tensor = helper.make_tensor(name='pad_size',
data_type=TensorProto.INT32,
dims=sizes.shape,
vals=sizes.astype(int))
arg_pad = onnx.helper.make_node('Constant',
inputs=[],
outputs=['arg_pad'],
value=pad_tensor)
node = onnx.helper.make_node('Pad',
mode='reflect',
inputs=['0', 'arg_pad'],
outputs=['1'])
return ([arg_pad, node], [x], [y])
@onnx_test()
def pad_reflect_with_axes_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 2])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [2, 5])
sizes = np.array([2, 1])
pad_tensor = helper.make_tensor(name='pad_size',
data_type=TensorProto.INT32,
dims=sizes.shape,
vals=sizes.astype(int))
arg_pad = onnx.helper.make_node('Constant',
inputs=[],
outputs=['arg_pad'],
value=pad_tensor)
axes = np.array([1])
axes_tensor = helper.make_tensor(name='pad_axes',
data_type=TensorProto.INT32,
dims=axes.shape,
vals=axes.astype(int))
arg_axes = onnx.helper.make_node('Constant',
inputs=[],
outputs=['arg_axes'],
value=axes_tensor)
node = onnx.helper.make_node('Pad',
mode='reflect',
inputs=['0', 'arg_pad', 'arg_axes'],
outputs=['1'])
return ([arg_axes, arg_pad, node], [x], [y])
@onnx_test()
def pad_reflect_multiaxis_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 5])
sizes = np.array([0, 2, 2, 0])
pad_tensor = helper.make_tensor(name='pad_size',
data_type=TensorProto.INT32,
dims=sizes.shape,
vals=sizes.astype(int))
arg_pad = onnx.helper.make_node('Constant',
inputs=[],
outputs=['arg_pad'],
value=pad_tensor)
node = onnx.helper.make_node('Pad',
mode='reflect',
inputs=['0', 'arg_pad'],
outputs=['1'])
return ([arg_pad, node], [x], [y])
@onnx_test()
def pad_attr_dyn_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [None, None])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [None, None])
node = onnx.helper.make_node('Pad',
inputs=['0'],
pads=[1, 1, 1, 1],
outputs=['1'])
return ([node], [x], [y])
@onnx_test()
def pad_cnst_dyn_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [None, None])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [None, None])
sizes = np.array([0, 2, 0, 1])
pad_tensor = helper.make_tensor(name='pad_size',
data_type=TensorProto.INT32,
dims=sizes.shape,
vals=sizes.astype(int))
arg_pad = onnx.helper.make_node('Constant',
inputs=[], inputs=[],
outputs=['arg_pad'], outputs=['arg_pad'],
value=pad_tensor) value=pad_tensor)
...@@ -5151,6 +5651,223 @@ def qlinearadd_bcast_test(): ...@@ -5151,6 +5651,223 @@ def qlinearadd_bcast_test():
[sc_a, zero_pt_a, sc_b, zero_pt_b, sc_c, zero_pt_c]) [sc_a, zero_pt_a, sc_b, zero_pt_b, sc_c, zero_pt_c])
@onnx_test()
def qlinearconv_test():
# https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__QLinearConv.html
x = helper.make_tensor_value_info('X', TensorProto.UINT8, [1, 1, 7, 7])
sc_x = helper.make_tensor('1', TensorProto.FLOAT, [], [0.00369204697])
zero_pt_x = helper.make_tensor('2', TensorProto.UINT8, [], [132])
wt = helper.make_tensor('3', TensorProto.UINT8, [1, 1, 1, 1], [0])
sc_wt = helper.make_tensor('4', TensorProto.FLOAT, [], [0.00172794575])
zero_pt_wt = helper.make_tensor('5', TensorProto.UINT8, [], [255])
sc_y = helper.make_tensor('6', TensorProto.FLOAT, [], [0.00162681262])
zero_pt_y = helper.make_tensor('7', TensorProto.UINT8, [], [123])
out = helper.make_tensor_value_info('out', TensorProto.UINT8, [1, 1, 7, 7])
node = onnx.helper.make_node(
'QLinearConv',
inputs=['X', '1', '2', '3', '4', '5', '6', '7'],
outputs=['out'],
)
return ([node], [x], [out],
[sc_x, zero_pt_x, wt, sc_wt, zero_pt_wt, sc_y, zero_pt_y])
@onnx_test()
def qlinearconv_pad_1_test():
# https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__Conv.html
x = helper.make_tensor_value_info('X', TensorProto.UINT8, [1, 1, 5, 5])
sc_x = helper.make_tensor('1', TensorProto.FLOAT, [],
[0.09411764705882353])
zero_pt_x = helper.make_tensor('2', TensorProto.UINT8, [], [0])
wt = helper.make_tensor('3', TensorProto.UINT8, [1, 1, 3, 3],
[1, 1, 1, 1, 1, 1, 1, 1, 1])
sc_wt = helper.make_tensor('4', TensorProto.FLOAT, [], [1.0])
zero_pt_wt = helper.make_tensor('5', TensorProto.UINT8, [], [0])
sc_y = helper.make_tensor('6', TensorProto.FLOAT, [], [0.6352941176470588])
zero_pt_y = helper.make_tensor('7', TensorProto.UINT8, [], [0])
out = helper.make_tensor_value_info('out', TensorProto.UINT8, [1, 1, 5, 5])
node = onnx.helper.make_node(
'QLinearConv',
inputs=['X', '1', '2', '3', '4', '5', '6', '7'],
outputs=['out'],
pads=[1, 1, 1, 1],
)
return ([node], [x], [out],
[sc_x, zero_pt_x, wt, sc_wt, zero_pt_wt, sc_y, zero_pt_y])
@onnx_test()
def qlinearconv_pad_0_test():
# https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__Conv.html
x = helper.make_tensor_value_info('X', TensorProto.UINT8, [1, 1, 5, 5])
sc_x = helper.make_tensor('1', TensorProto.FLOAT, [],
[0.09411764705882353])
zero_pt_x = helper.make_tensor('2', TensorProto.UINT8, [], [0])
wt = helper.make_tensor('3', TensorProto.UINT8, [1, 1, 3, 3],
[1, 1, 1, 1, 1, 1, 1, 1, 1])
sc_wt = helper.make_tensor('4', TensorProto.FLOAT, [], [1.0])
zero_pt_wt = helper.make_tensor('5', TensorProto.UINT8, [], [0])
sc_y = helper.make_tensor('6', TensorProto.FLOAT, [], [0.6352941176470588])
zero_pt_y = helper.make_tensor('7', TensorProto.INT8, [], [-128])
out = helper.make_tensor_value_info('out', TensorProto.INT8, [1, 1, 3, 3])
node = onnx.helper.make_node(
'QLinearConv',
inputs=['X', '1', '2', '3', '4', '5', '6', '7'],
outputs=['out'],
pads=[0, 0, 0, 0],
)
return ([node], [x], [out],
[sc_x, zero_pt_x, wt, sc_wt, zero_pt_wt, sc_y, zero_pt_y])
@onnx_test()
def qlinearconv_scale_1D_test():
# https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__Conv.html
x = helper.make_tensor_value_info('X', TensorProto.UINT8, [1, 1, 5, 5])
sc_x = helper.make_tensor('1', TensorProto.FLOAT, [],
[0.09411764705882353])
zero_pt_x = helper.make_tensor('2', TensorProto.UINT8, [], [0])
wt = helper.make_tensor(
'3', TensorProto.UINT8, [2, 1, 3, 3],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2])
sc_wt = helper.make_tensor('4', TensorProto.FLOAT, [2], [1.0, 0.5])
zero_pt_wt = helper.make_tensor('5', TensorProto.UINT8, [2], [0, 0])
sc_y = helper.make_tensor('6', TensorProto.FLOAT, [], [0.6352941176470588])
zero_pt_y = helper.make_tensor('7', TensorProto.INT8, [], [-128])
out = helper.make_tensor_value_info('out', TensorProto.INT8, [1, 2, 3, 3])
node = onnx.helper.make_node(
'QLinearConv',
inputs=['X', '1', '2', '3', '4', '5', '6', '7'],
outputs=['out'],
pads=[0, 0, 0, 0],
)
return ([node], [x], [out],
[sc_x, zero_pt_x, wt, sc_wt, zero_pt_wt, sc_y, zero_pt_y])
@onnx_test()
def qlinearglobalavgpool_test():
x = helper.make_tensor_value_info('X', TensorProto.UINT8, [1, 3, 4, 4])
sc_x = helper.make_tensor('X_scale', TensorProto.FLOAT, [], [0.05])
z_pt_x = helper.make_tensor('X_zero_point', TensorProto.UINT8, [], [128])
y = helper.make_tensor_value_info('Y', TensorProto.UINT8, [1, 3, 1, 1])
sc_y = helper.make_tensor('Y_scale', TensorProto.FLOAT, [], [0.025])
z_pt_y = helper.make_tensor('Y_zero_point', TensorProto.UINT8, [], [64])
n = onnx.helper.make_node(
'QLinearGlobalAveragePool',
inputs=['X', 'X_scale', 'X_zero_point', 'Y_scale', 'Y_zero_point'],
outputs=['Y'],
channels_last=0,
)
return ([n], [x], [y], [sc_x, z_pt_x, sc_y, z_pt_y])
def qlinearmatmul_1D_test():
a = helper.make_tensor_value_info('A', TensorProto.UINT8, [8])
sc_a = helper.make_tensor('A_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_a = helper.make_tensor('A_zero_point', TensorProto.UINT8, [], [0])
b = helper.make_tensor_value_info('B', TensorProto.UINT8, [8])
sc_b = helper.make_tensor('B_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_b = helper.make_tensor('B_zero_point', TensorProto.UINT8, [],
[128])
sc_c = helper.make_tensor('C_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_c = helper.make_tensor('C_zero_point', TensorProto.UINT8, [], [64])
c = helper.make_tensor_value_info('C', TensorProto.UINT8, [1])
node = onnx.helper.make_node(
'QLinearMatMul',
inputs=[
'A', 'A_scale', 'A_zero_point', 'B', 'B_scale', 'B_zero_point',
'C_scale', 'C_zero_point'
],
outputs=['C'],
)
return ([node], [a, b], [c],
[sc_a, zero_pt_a, sc_b, zero_pt_b, sc_c, zero_pt_c])
@onnx_test()
def qlinearmatmul_2D_test():
a = helper.make_tensor_value_info('A', TensorProto.UINT8, [1, 8])
sc_a = helper.make_tensor('A_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_a = helper.make_tensor('A_zero_point', TensorProto.UINT8, [], [0])
b = helper.make_tensor_value_info('B', TensorProto.UINT8, [8, 1])
sc_b = helper.make_tensor('B_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_b = helper.make_tensor('B_zero_point', TensorProto.UINT8, [],
[128])
sc_c = helper.make_tensor('C_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_c = helper.make_tensor('C_zero_point', TensorProto.UINT8, [], [64])
c = helper.make_tensor_value_info('C', TensorProto.UINT8, [1, 1])
node = onnx.helper.make_node(
'QLinearMatMul',
inputs=[
'A', 'A_scale', 'A_zero_point', 'B', 'B_scale', 'B_zero_point',
'C_scale', 'C_zero_point'
],
outputs=['C'],
)
return ([node], [a, b], [c],
[sc_a, zero_pt_a, sc_b, zero_pt_b, sc_c, zero_pt_c])
@onnx_test()
def qlinearmatmul_3D_test():
a = helper.make_tensor_value_info('A', TensorProto.UINT8, [2, 2, 4])
sc_a = helper.make_tensor('A_scale', TensorProto.FLOAT, [], [0.0066])
zero_pt_a = helper.make_tensor('A_zero_point', TensorProto.UINT8, [],
[113])
b = helper.make_tensor_value_info('B', TensorProto.UINT8, [2, 4, 3])
sc_b = helper.make_tensor('B_scale', TensorProto.FLOAT, [], [0.00705])
zero_pt_b = helper.make_tensor('B_zero_point', TensorProto.UINT8, [],
[114])
sc_c = helper.make_tensor('C_scale', TensorProto.FLOAT, [], [0.0107])
zero_pt_c = helper.make_tensor('C_zero_point', TensorProto.UINT8, [],
[118])
c = helper.make_tensor_value_info('C', TensorProto.UINT8, [2, 2, 3])
node = onnx.helper.make_node(
'QLinearMatMul',
inputs=[
'A', 'A_scale', 'A_zero_point', 'B', 'B_scale', 'B_zero_point',
'C_scale', 'C_zero_point'
],
outputs=['C'],
)
return ([node], [a, b], [c],
[sc_a, zero_pt_a, sc_b, zero_pt_b, sc_c, zero_pt_c])
@onnx_test() @onnx_test()
def quantizelinear_test(): def quantizelinear_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [5]) arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [5])
...@@ -5848,6 +6565,24 @@ def reshape_non_standard_test(): ...@@ -5848,6 +6565,24 @@ def reshape_non_standard_test():
return ([trans, res], [x], [y]) return ([trans, res], [x], [y])
@onnx_test()
def reshape_variable_input_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [4, 2, 3])
x_shape = helper.make_tensor_value_info('1', TensorProto.INT64, [2])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3, 8])
node = onnx.helper.make_node('Reshape', inputs=['0', '1'], outputs=['2'])
return ([node], [x, x_shape], [y])
@onnx_test()
def reshape_variable_input_dyn_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [None, 2, 3])
x_shape = helper.make_tensor_value_info('1', TensorProto.INT64, [2])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [None, 6])
node = onnx.helper.make_node('Reshape', inputs=['0', '1'], outputs=['2'])
return ([node], [x, x_shape], [y])
@onnx_test() @onnx_test()
def resize_downsample_f_test(): def resize_downsample_f_test():
scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32) scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)
...@@ -6501,6 +7236,101 @@ def shape_gather_test(): ...@@ -6501,6 +7236,101 @@ def shape_gather_test():
return ([node_const, node_shape, node_gather], [x], [z]) return ([node_const, node_shape, node_gather], [x], [z])
@onnx_test()
def shrink_hard_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [5])
node = onnx.helper.make_node(
"Shrink",
inputs=["x"],
outputs=["y"],
lambd=1.5,
)
return ([node], [x], [y])
@onnx_test()
def shrink_soft_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [5])
node = onnx.helper.make_node(
"Shrink",
inputs=["x"],
outputs=["y"],
lambd=1.5,
bias=1.5,
)
return ([node], [x], [y])
@onnx_test()
def shrink_verify_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16, [5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT16, [5])
node = onnx.helper.make_node(
"Shrink",
inputs=["x"],
outputs=["y"],
lambd=-5.0,
bias=1.0,
)
return ([node], [x], [y])
@onnx_test()
def shrink_verify2_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16, [5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT16, [5])
node = onnx.helper.make_node(
"Shrink",
inputs=["x"],
outputs=["y"],
lambd=-6.0,
bias=5.0,
)
return ([node], [x], [y])
@onnx_test()
def shrink_int8_test():
x = helper.make_tensor_value_info('x', TensorProto.INT8, [3, 3])
y = helper.make_tensor_value_info('y', TensorProto.INT8, [3, 3])
node = onnx.helper.make_node(
"Shrink",
inputs=["x"],
outputs=["y"],
lambd=1.5,
bias=1.5,
)
return ([node], [x], [y])
@onnx_test()
def shrink_uint8_test():
x = helper.make_tensor_value_info('x', TensorProto.UINT8, [3, 3])
y = helper.make_tensor_value_info('y', TensorProto.UINT8, [3, 3])
node = onnx.helper.make_node(
"Shrink",
inputs=["x"],
outputs=["y"],
lambd=5.0,
bias=-4.5,
)
return ([node], [x], [y])
@onnx_test() @onnx_test()
def sign_test(): def sign_test():
x = helper.make_tensor_value_info('x', TensorProto.DOUBLE, [10, 5]) x = helper.make_tensor_value_info('x', TensorProto.DOUBLE, [10, 5])
...@@ -7773,7 +8603,7 @@ def transpose_gather_test(): ...@@ -7773,7 +8603,7 @@ def transpose_gather_test():
@onnx_test() @onnx_test()
def trilu_test(): def triu_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4])
...@@ -7786,7 +8616,7 @@ def trilu_test(): ...@@ -7786,7 +8616,7 @@ def trilu_test():
@onnx_test() @onnx_test()
def trilu_batch_diff_k_test(): def triu_batch_diff_k_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 2, 3]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 2, 3])
k = np.array([2]) k = np.array([2])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 2, 3]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 2, 3])
...@@ -7804,7 +8634,24 @@ def trilu_batch_diff_k_test(): ...@@ -7804,7 +8634,24 @@ def trilu_batch_diff_k_test():
@onnx_test() @onnx_test()
def trilu_lower_test(): def tril_batch_diff_k_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 2, 3])
k = np.array([2])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 2, 3])
k_tensor = helper.make_tensor(name='k',
data_type=TensorProto.INT64,
dims=k.shape,
vals=k.astype(np.int64))
node = onnx.helper.make_node('Trilu',
inputs=['x', 'k'],
outputs=['y'],
upper=0)
return ([node], [x], [y], [k_tensor])
@onnx_test()
def tril_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4])
...@@ -7813,7 +8660,7 @@ def trilu_lower_test(): ...@@ -7813,7 +8660,7 @@ def trilu_lower_test():
@onnx_test() @onnx_test()
def trilu_neg_k_test(): def triu_neg_k_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4])
k = np.array([-1]) k = np.array([-1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4])
...@@ -7827,7 +8674,23 @@ def trilu_neg_k_test(): ...@@ -7827,7 +8674,23 @@ def trilu_neg_k_test():
@onnx_test() @onnx_test()
def trilu_out_k_test(): def tril_neg_k_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4])
k = np.array([-1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4])
k_tensor = helper.make_tensor(name='k',
data_type=TensorProto.INT64,
dims=k.shape,
vals=k.astype(np.int64))
node = onnx.helper.make_node('Trilu',
inputs=['x', 'k'],
outputs=['y'],
upper=0)
return ([node], [x], [y], [k_tensor])
@onnx_test()
def triu_out_k_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4])
k = np.array([5]) k = np.array([5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4])
...@@ -7841,7 +8704,23 @@ def trilu_out_k_test(): ...@@ -7841,7 +8704,23 @@ def trilu_out_k_test():
@onnx_test() @onnx_test()
def trilu_row_one_test(): def tril_out_k_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4])
k = np.array([5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4])
k_tensor = helper.make_tensor(name='k',
data_type=TensorProto.INT64,
dims=k.shape,
vals=k.astype(np.int64))
node = onnx.helper.make_node('Trilu',
inputs=['x', 'k'],
outputs=['y'],
upper=0)
return ([node], [x], [y], [k_tensor])
@onnx_test()
def triu_row_one_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 4]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 4])
k = np.array([1]) k = np.array([1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 4]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 4])
...@@ -7858,6 +8737,23 @@ def trilu_row_one_test(): ...@@ -7858,6 +8737,23 @@ def trilu_row_one_test():
return ([node], [x], [y], [k_tensor]) return ([node], [x], [y], [k_tensor])
@onnx_test()
def tril_row_one_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 4])
k = np.array([1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 4])
k_tensor = helper.make_tensor(name='k',
data_type=TensorProto.INT64,
dims=k.shape,
vals=k.astype(np.int64))
node = onnx.helper.make_node('Trilu',
inputs=['x', 'k'],
outputs=['y'],
upper=0)
return ([node], [x], [y], [k_tensor])
@onnx_test() @onnx_test()
def undefined_test(): def undefined_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5])
......
group_norm_3d_half_test:
M
x
scale
biasy"GroupNormalization*
epsilon'7*
num_groupsgroup_norm_3d_half_testZ
x




Z
scale


Z
bias


b
y




B
\ No newline at end of file
 group_norm_3d_test:
:
x
scale
biasy"GroupNormalization*
num_groupsgroup_norm_3d_testZ
x



Z
scale

Z
bias

b
y



B
\ No newline at end of file
group_norm_4d_half_test:
M
x
scale
biasy"GroupNormalization*
epsilon'7*
num_groupsgroup_norm_4d_half_testZ
x





Z
scale


Z
bias


b
y





B
\ No newline at end of file
 group_norm_4d_test:
:
x
scale
biasy"GroupNormalization*
num_groupsgroup_norm_4d_testZ
x




Z
scale

Z
bias

b
y




B
\ No newline at end of file
group_norm_5d_half_test:
M
x
scale
biasy"GroupNormalization*
epsilon'7*
num_groupsgroup_norm_5d_half_testZ
x






Z
scale


Z
bias


b
y






B
\ No newline at end of file
 group_norm_5d_test:
:
x
scale
biasy"GroupNormalization*
num_groupsgroup_norm_5d_testZ
x





Z
scale

Z
bias

b
y





B
\ No newline at end of file
 "group_norm_invalid_bias_shape_test:
:
x
scale
biasy"GroupNormalization*
num_groups"group_norm_invalid_bias_shape_testZ
x




Z
scale

Z
bias

b
y




B
\ No newline at end of file
 )group_norm_invalid_input_count_error_test:
4
x
scaley"GroupNormalization*
num_groups)group_norm_invalid_input_count_error_testZ
x




Z
scale

b
y




B
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment