Commit f3a8933c authored by Paul's avatar Paul
Browse files

Merge branch 'develop' into blas_tuning

parents ca300bd6 b249fb8a
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/gpu/int8_gemm_pack.hpp>
#include <migraphx/gpu/device/int8_gemm_pack.hpp>
#include <migraphx/gpu/context.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
shape hip_int8_gemm_pack_a::compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{{inputs.at(0)}, *this}.has(1).not_broadcasted().packed();
return inputs.at(0);
}
argument
hip_int8_gemm_pack_a::compute(context& ctx, const shape&, const std::vector<argument>& args) const
{
device::int8_gemm_pack_a(ctx.get_stream().get(), args[1], args[0]);
return args[1];
}
shape hip_int8_gemm_pack_b::compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{{inputs.at(0)}, *this}.has(1).not_broadcasted().packed();
return inputs.at(0);
}
argument
hip_int8_gemm_pack_b::compute(context& ctx, const shape&, const std::vector<argument>& args) const
{
device::int8_gemm_pack_b(ctx.get_stream().get(), args[1], args[0]);
return args[1];
}
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
...@@ -61,9 +61,8 @@ struct miopen_apply ...@@ -61,9 +61,8 @@ struct miopen_apply
const lowering* pass = nullptr; const lowering* pass = nullptr;
std::unordered_map<std::string, std::function<instruction_ref(instruction_ref)>> apply_map{}; std::unordered_map<std::string, std::function<instruction_ref(instruction_ref)>> apply_map{};
instruction_ref last{}; instruction_ref last{};
bool offload_copy = false; bool offload_copy = false;
bool int8_x4_format = true; bool compute_fp32 = false;
bool compute_fp32 = false;
context& get_context() const context& get_context() const
{ {
...@@ -84,10 +83,8 @@ struct miopen_apply ...@@ -84,10 +83,8 @@ struct miopen_apply
assert(mod != nullptr); assert(mod != nullptr);
assert(pass != nullptr); assert(pass != nullptr);
auto& ctx = get_context(); compute_fp32 = get_compute_fp32_flag();
int8_x4_format = get_int8_x4_format(ctx); offload_copy = (mod == mpm->get_root_module()) ? pass->offload_copy : false;
compute_fp32 = get_compute_fp32_flag();
offload_copy = (mod == mpm->get_root_module()) ? pass->offload_copy : false;
add_generic_op("contiguous"); add_generic_op("contiguous");
add_extend_op("argmax"); add_extend_op("argmax");
...@@ -231,18 +228,15 @@ struct miopen_apply ...@@ -231,18 +228,15 @@ struct miopen_apply
assert(refs.size() == 2); assert(refs.size() == 2);
auto output = insert_allocation(ins, ins->get_shape()); auto output = insert_allocation(ins, ins->get_shape());
refs.push_back(output); refs.push_back(output);
return mod->replace_instruction( return mod->replace_instruction(ins, rocblas_gemm<Op>{Op{}, 1, 0, compute_fp32}, refs);
ins, rocblas_gemm<Op>{Op{}, 1, 0, int8_x4_format, compute_fp32}, refs);
}); });
} }
void add_convolution_op(const std::string& name) void add_convolution_op(const std::string& name)
{ {
apply_map.emplace(name, [=](instruction_ref ins) { apply_map.emplace(name, [=](instruction_ref ins) {
operation conv = make_op( operation conv = make_op("gpu::" + name, {{"op", ins->get_operator().to_value()}});
"gpu::" + name, auto output = insert_allocation(ins, ins->get_shape());
{{"op", ins->get_operator().to_value()}, {"int8_x4_format", int8_x4_format}});
auto output = insert_allocation(ins, ins->get_shape());
return mod->replace_instruction(ins, return mod->replace_instruction(ins,
make_op("gpu::miopen_op", {{"op", to_value(conv)}}), make_op("gpu::miopen_op", {{"op", to_value(conv)}}),
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <iterator>
#include <migraphx/gpu/pack_int8_args.hpp>
#include <migraphx/gpu/int8_gemm_pack.hpp>
#include <migraphx/gpu/int8_conv_pack.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/instruction_ref.hpp>
#include <migraphx/program.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/permutation.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
static instruction_ref pad_ins(module& m, instruction_ref ins, int offset)
{
auto s = ins->get_shape();
auto lens = s.lens();
auto k = lens[lens.size() + offset];
auto pad_k = (k + 3) / 4 * 4;
auto pad_lens = lens;
pad_lens[lens.size() + offset] = pad_k;
auto ret_ins = ins;
if(pad_k != k)
{
std::vector<int64_t> pad_dims(lens.size() * 2, 0);
pad_dims[lens.size() + offset] = pad_k - k;
shape ps{s.type(), pad_lens};
auto ins_out =
m.insert_instruction(ins, make_op("hip::allocate", {{"shape", to_value(ps)}}));
auto pad = make_op("pad", {{"pads", pad_dims}});
ret_ins =
m.insert_instruction(std::next(ins), make_op("gpu::pad", pad.to_value()), ins, ins_out);
}
return ret_ins;
}
static std::vector<instruction_ref> pad_inputs(module& m, instruction_ref ins)
{
std::vector<instruction_ref> ret_inputs;
auto inputs = ins->inputs();
auto in0 = inputs.at(0);
auto sa = in0->get_shape();
bool transa = sa.transposed();
if(transa)
{
auto perm = find_permutation(sa);
auto val = in0->get_operator().to_value();
if(val.contains("dims"))
{
int offset = static_cast<int>(perm.back()) - static_cast<int>(perm.size());
auto t_in = in0->inputs().front();
auto p_in = pad_ins(m, t_in, offset);
auto dims = val.at("dims").to_vector<int64_t>();
auto r_in =
m.insert_instruction(ins, make_op("transpose", {{"permutation", dims}}), p_in);
ret_inputs.push_back(r_in);
}
else
{
shape cs{in0->get_shape().type(), in0->get_shape().lens()};
auto con_out =
m.insert_instruction(ins, make_op("hip::allocate", {{"shape", to_value(cs)}}));
auto cin0 = m.insert_instruction(ins, make_op("gpu::contiguous"), in0, con_out);
ret_inputs.push_back(pad_ins(m, cin0, -1));
}
}
else
{
ret_inputs.push_back(pad_ins(m, in0, -1));
}
auto in1 = inputs.at(1);
auto sb = in1->get_shape();
bool transb = sb.transposed();
if(transb)
{
auto perm = find_permutation(sb);
auto val = in1->get_operator().to_value();
if(val.contains("dims"))
{
int offset = static_cast<int>(perm[perm.size() - 2]) - static_cast<int>(perm.size());
auto t_in = in1->inputs().front();
auto p_in = pad_ins(m, t_in, offset);
auto dims = val.at("dims").to_vector<int64_t>();
auto r_in =
m.insert_instruction(ins, make_op("transpose", {{"permutation", dims}}), p_in);
ret_inputs.push_back(r_in);
}
else
{
shape cs{in1->get_shape().type(), in1->get_shape().lens()};
auto con_out =
m.insert_instruction(ins, make_op("hip::allocate", {{"shape", to_value(cs)}}));
auto cin1 = m.insert_instruction(ins, make_op("gpu::contiguous"), in1, con_out);
ret_inputs.push_back(pad_ins(m, cin1, -2));
}
}
else
{
ret_inputs.push_back(pad_ins(m, in1, -2));
}
std::copy(inputs.begin() + 2, inputs.end(), std::back_inserter(ret_inputs));
return ret_inputs;
}
void pack_int8_args::apply(module& m) const
{
for(auto ins : iterator_for(m))
{
if(ins->name() == "gpu::quant_gemm")
{
auto val = ins->get_operator().to_value();
assert(val.contains("int8_x4_format"));
if(not val.at("int8_x4_format").to<bool>())
{
continue;
}
auto inputs = ins->inputs();
auto lens = inputs.at(0)->get_shape().lens();
// gemm need the k to be multiple of 4, so need packing that dimension
auto old_inputs = inputs;
if((lens.back() % 4) != 0)
{
inputs = pad_inputs(m, ins);
}
bool transa = inputs[0]->get_shape().transposed();
bool transb = inputs[1]->get_shape().transposed();
if(not transb)
{
auto packed_b = m.insert_instruction(
ins, make_op("hip::allocate", {{"shape", to_value(inputs[1]->get_shape())}}));
auto output_b = m.insert_instruction(
ins, make_op("gpu::int8_gemm_pack_a"), {inputs[1], packed_b});
inputs[1] = output_b;
}
if(transa)
{
auto packed_a = m.insert_instruction(
ins, make_op("hip::allocate", {{"shape", to_value(inputs[0]->get_shape())}}));
auto output_a = m.insert_instruction(
ins, make_op("gpu::int8_gemm_pack_b"), {inputs[0], packed_a});
inputs[0] = output_a;
}
if(inputs != old_inputs)
{
m.replace_instruction(ins, ins->get_operator(), inputs);
}
}
else if(ins->name() == "gpu::quant_convolution")
{
auto val = ins->get_operator().to_value();
if(not val.at("int8_x4_format").to<bool>())
{
continue;
}
auto inputs = ins->inputs();
auto packed_x = m.insert_instruction(
ins,
make_op("hip::allocate",
{{"shape", to_value(pack_int8_shape(inputs[0]->get_shape()))}}));
auto output_x =
m.insert_instruction(ins, make_op("gpu::int8_conv_pack"), {inputs[0], packed_x});
instruction::replace_argument(ins, inputs[0], output_x);
auto packed_w = m.insert_instruction(
ins,
make_op("hip::allocate",
{{"shape", to_value(pack_int8_shape(inputs[1]->get_shape()))}}));
auto output_w =
m.insert_instruction(ins, make_op("gpu::int8_conv_pack"), {inputs[1], packed_w});
instruction::replace_argument(ins, inputs[1], output_w);
}
}
}
shape pack_int8_args::pack_int8_shape(const shape& s) const
{
if(s.type() != shape::int8_type)
{
MIGRAPHX_THROW("PACK_INT8_ARGS: only process int8_type");
}
auto lens = s.lens();
auto strides = s.strides();
lens[1] = (lens[1] + 3) / 4 * 4;
strides[0] = strides[1] * lens[1];
return {s.type(), lens, strides};
}
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
...@@ -53,19 +53,6 @@ bool get_compute_fp32_flag() ...@@ -53,19 +53,6 @@ bool get_compute_fp32_flag()
return (starts_with(device_name, "gfx9") and device_name >= "gfx908"); return (starts_with(device_name, "gfx9") and device_name >= "gfx908");
} }
bool get_int8_x4_format(context& ctx)
{
#if ROCBLAS_VERSION_MAJOR >= 3
(void)(ctx);
return false;
#else
// int8x4 packed format is only available starting from rocblas-v2.38 and it is deprecated in
// v3.0 and will be removed in v4.0
rocblas_gemm_flags flag;
rocblas_query_int8_layout_flag(ctx.get_stream().get_rocblas(), &flag);
return flag == rocblas_gemm_flags_pack_int8x4;
#endif
}
} // namespace gpu } // namespace gpu
} // namespace MIGRAPHX_INLINE_NS } // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx } // namespace migraphx
...@@ -63,7 +63,6 @@ ...@@ -63,7 +63,6 @@
#include <migraphx/gpu/fuse_ops.hpp> #include <migraphx/gpu/fuse_ops.hpp>
#include <migraphx/gpu/prefuse_ops.hpp> #include <migraphx/gpu/prefuse_ops.hpp>
#include <migraphx/gpu/lowering.hpp> #include <migraphx/gpu/lowering.hpp>
#include <migraphx/gpu/pack_int8_args.hpp>
#include <migraphx/gpu/schedule_model.hpp> #include <migraphx/gpu/schedule_model.hpp>
#include <migraphx/gpu/sync_device.hpp> #include <migraphx/gpu/sync_device.hpp>
#include <migraphx/gpu/target.hpp> #include <migraphx/gpu/target.hpp>
...@@ -154,7 +153,6 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti ...@@ -154,7 +153,6 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti
dead_code_elimination{}, dead_code_elimination{},
compile_miopen{&gctx}, compile_miopen{&gctx},
dead_code_elimination{}, dead_code_elimination{},
pack_int8_args{},
dead_code_elimination{}, dead_code_elimination{},
fuse_ops{&ctx, options.fast_math}, fuse_ops{&ctx, options.fast_math},
dead_code_elimination{}, dead_code_elimination{},
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
cmake_policy(SET CMP0057 NEW) cmake_policy(SET CMP0057 NEW)
find_package(Threads REQUIRED) find_package(Threads REQUIRED)
rocm_test_link_libraries(Threads::Threads migraphx migraphx_ref migraphx_onnx migraphx_tf) rocm_test_link_libraries(Threads::Threads migraphx migraphx_onnx migraphx_tf)
rocm_test_include_directories(include) rocm_test_include_directories(include)
set(MIGRAPHX_DISABLE_LARGE_BUFFER_TESTS Off CACHE BOOL "") set(MIGRAPHX_DISABLE_LARGE_BUFFER_TESTS Off CACHE BOOL "")
...@@ -146,7 +146,10 @@ endfunction() ...@@ -146,7 +146,10 @@ endfunction()
function(test_headers PREFIX) function(test_headers PREFIX)
file(GLOB HEADERS CONFIGURE_DEPENDS ${ARGN}) file(GLOB HEADERS CONFIGURE_DEPENDS ${ARGN})
if(NOT MIGRAPHX_USE_COMPOSABLEKERNEL)
list(REMOVE_ITEM HEADERS
${CMAKE_SOURCE_DIR}/src/targets/gpu/include/migraphx/gpu/ck.hpp)
endif()
foreach(HEADER ${HEADERS}) foreach(HEADER ${HEADERS})
file(RELATIVE_PATH HEADER_REL ${CMAKE_SOURCE_DIR} ${HEADER}) file(RELATIVE_PATH HEADER_REL ${CMAKE_SOURCE_DIR} ${HEADER})
string(MAKE_C_IDENTIFIER ${HEADER_REL} TEST_NAME) string(MAKE_C_IDENTIFIER ${HEADER_REL} TEST_NAME)
......
...@@ -152,6 +152,9 @@ TEST_CASE(int_quant_dot_tanh_fails) ...@@ -152,6 +152,9 @@ TEST_CASE(int_quant_dot_tanh_fails)
int main(int argc, const char* argv[]) int main(int argc, const char* argv[])
{ {
test::run(argc, argv); if(migraphx::gpu::mlir_enabled())
{
test::run(argc, argv);
}
return 0; return 0;
} }
This diff is collapsed.
635d3faa3b3908d2806d009dc6872152cfcfcdda 2eeafc37bca21dc8bf337dda7020b486543162d7
...@@ -149,6 +149,21 @@ def argmax_test(): ...@@ -149,6 +149,21 @@ def argmax_test():
return ([node], [x], [y]) return ([node], [x], [y])
@onnx_test()
def argmax_select_last_index_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 6])
node = onnx.helper.make_node('ArgMax',
inputs=['x'],
outputs=['y'],
axis=2,
keepdims=0,
select_last_index=1)
return ([node], [x], [y])
@onnx_test() @onnx_test()
def argmax_dyn_test(): def argmax_dyn_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [None, 4, 5, 6]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [None, 4, 5, 6])
...@@ -177,6 +192,21 @@ def argmin_test(): ...@@ -177,6 +192,21 @@ def argmin_test():
return ([node], [x], [y]) return ([node], [x], [y])
@onnx_test()
def argmin_select_last_index_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 5])
node = onnx.helper.make_node('ArgMin',
inputs=['x'],
outputs=['y'],
axis=3,
keepdims=0,
select_last_index=1)
return ([node], [x], [y])
@onnx_test() @onnx_test()
def asin_test(): def asin_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
...@@ -4681,6 +4711,77 @@ def mean_integral_test(): ...@@ -4681,6 +4711,77 @@ def mean_integral_test():
return ([node], data, [mean]) return ([node], data, [mean])
def mvn_default_axes_test_base(dims, type=TensorProto.FLOAT):
data = helper.make_tensor_value_info("data", type, dims)
out = helper.make_tensor_value_info("out", type, dims)
node = helper.make_node("MeanVarianceNormalization",
inputs=["data"],
outputs=["out"])
return ([node], [data], [out])
@onnx_test()
def mvn_default_axes_test():
return mvn_default_axes_test_base([2, 2, 2, 2])
@onnx_test()
def mvn_default_axes_fp16_test():
return mvn_default_axes_test_base([2, 2, 2, 2], TensorProto.FLOAT16)
@onnx_test()
def mvn_default_axes_rank_too_small_test():
return mvn_default_axes_test_base([2, 2, 2])
@onnx_test()
def mvn_default_axes_rank_too_big_test():
return mvn_default_axes_test_base([2, 2, 2, 2, 2])
def mvn_n_rank_test_base(axes, dims, type=TensorProto.FLOAT):
data = helper.make_tensor_value_info("data", type, dims)
out = helper.make_tensor_value_info("out", type, dims)
node = helper.make_node("MeanVarianceNormalization",
inputs=["data"],
outputs=["out"],
axes=axes)
return ([node], [data], [out])
@onnx_test()
def mvn_rank_2_test():
return mvn_n_rank_test_base([1], [2, 2])
@onnx_test()
def mvn_rank_2_fp16_test():
return mvn_n_rank_test_base([1], [2, 2], TensorProto.FLOAT16)
@onnx_test()
def mvn_rank_3_test():
return mvn_n_rank_test_base([0, 1], [2, 2, 2])
@onnx_test()
def mvn_rank_3_fp16_test():
return mvn_n_rank_test_base([0, 1], [2, 2, 2], TensorProto.FLOAT16)
@onnx_test()
def mvn_axes_rank_too_small_test():
return mvn_n_rank_test_base([0, 1, 2], [2, 2, 2])
@onnx_test()
def mvn_axes_rank_too_big_test():
return mvn_n_rank_test_base([0], [2, 2, 2])
@onnx_test() @onnx_test()
def min_test(): def min_test():
a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3]) a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
...@@ -8502,7 +8603,7 @@ def transpose_gather_test(): ...@@ -8502,7 +8603,7 @@ def transpose_gather_test():
@onnx_test() @onnx_test()
def trilu_test(): def triu_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4])
...@@ -8515,7 +8616,7 @@ def trilu_test(): ...@@ -8515,7 +8616,7 @@ def trilu_test():
@onnx_test() @onnx_test()
def trilu_batch_diff_k_test(): def triu_batch_diff_k_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 2, 3]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 2, 3])
k = np.array([2]) k = np.array([2])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 2, 3]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 2, 3])
...@@ -8533,7 +8634,24 @@ def trilu_batch_diff_k_test(): ...@@ -8533,7 +8634,24 @@ def trilu_batch_diff_k_test():
@onnx_test() @onnx_test()
def trilu_lower_test(): def tril_batch_diff_k_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 2, 3])
k = np.array([2])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 2, 3])
k_tensor = helper.make_tensor(name='k',
data_type=TensorProto.INT64,
dims=k.shape,
vals=k.astype(np.int64))
node = onnx.helper.make_node('Trilu',
inputs=['x', 'k'],
outputs=['y'],
upper=0)
return ([node], [x], [y], [k_tensor])
@onnx_test()
def tril_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4])
...@@ -8542,7 +8660,7 @@ def trilu_lower_test(): ...@@ -8542,7 +8660,7 @@ def trilu_lower_test():
@onnx_test() @onnx_test()
def trilu_neg_k_test(): def triu_neg_k_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4])
k = np.array([-1]) k = np.array([-1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4])
...@@ -8556,7 +8674,23 @@ def trilu_neg_k_test(): ...@@ -8556,7 +8674,23 @@ def trilu_neg_k_test():
@onnx_test() @onnx_test()
def trilu_out_k_test(): def tril_neg_k_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4])
k = np.array([-1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4])
k_tensor = helper.make_tensor(name='k',
data_type=TensorProto.INT64,
dims=k.shape,
vals=k.astype(np.int64))
node = onnx.helper.make_node('Trilu',
inputs=['x', 'k'],
outputs=['y'],
upper=0)
return ([node], [x], [y], [k_tensor])
@onnx_test()
def triu_out_k_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4])
k = np.array([5]) k = np.array([5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4])
...@@ -8570,7 +8704,23 @@ def trilu_out_k_test(): ...@@ -8570,7 +8704,23 @@ def trilu_out_k_test():
@onnx_test() @onnx_test()
def trilu_row_one_test(): def tril_out_k_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4])
k = np.array([5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4])
k_tensor = helper.make_tensor(name='k',
data_type=TensorProto.INT64,
dims=k.shape,
vals=k.astype(np.int64))
node = onnx.helper.make_node('Trilu',
inputs=['x', 'k'],
outputs=['y'],
upper=0)
return ([node], [x], [y], [k_tensor])
@onnx_test()
def triu_row_one_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 4]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 4])
k = np.array([1]) k = np.array([1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 4]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 4])
...@@ -8587,6 +8737,23 @@ def trilu_row_one_test(): ...@@ -8587,6 +8737,23 @@ def trilu_row_one_test():
return ([node], [x], [y], [k_tensor]) return ([node], [x], [y], [k_tensor])
@onnx_test()
def tril_row_one_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 4])
k = np.array([1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 4])
k_tensor = helper.make_tensor(name='k',
data_type=TensorProto.INT64,
dims=k.shape,
vals=k.astype(np.int64))
node = onnx.helper.make_node('Trilu',
inputs=['x', 'k'],
outputs=['y'],
upper=0)
return ([node], [x], [y], [k_tensor])
@onnx_test() @onnx_test()
def undefined_test(): def undefined_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5])
......
 mvn_default_axes_fp16_test:
&
dataout"MeanVarianceNormalizationmvn_default_axes_fp16_testZ
data





b
out





B
\ No newline at end of file
 $mvn_default_axes_rank_too_small_test:
&
dataout"MeanVarianceNormalization$mvn_default_axes_rank_too_small_testZ
data



b
out



B
\ No newline at end of file
 mvn_default_axes_test:~
&
dataout"MeanVarianceNormalizationmvn_default_axes_testZ
data




b
out




B
\ No newline at end of file
 mvn_rank_2_fp16_test:z
3
dataout"MeanVarianceNormalization*
axes@mvn_rank_2_fp16_testZ
data



b
out



B
\ No newline at end of file
 mvn_rank_2_test:u
3
dataout"MeanVarianceNormalization*
axes@mvn_rank_2_testZ
data


b
out


B
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment