Commit 31065c7d authored by charlie's avatar charlie
Browse files

Merge branch 'dyn_squeeze' of github.com:ROCmSoftwarePlatform/AMDMIGraphX into dyn_model_test

parents 6bec381f 6acbd4e4
......@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "migraphx/instruction_ref.hpp"
#include <migraphx/instruction_ref.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/gpu/target.hpp>
......@@ -30,6 +30,7 @@
#include <migraphx/adjust_allocation.hpp>
#include <migraphx/gpu/pack_int8_args.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/device_name.hpp>
#include <migraphx/auto_contiguous.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/replace_allocate.hpp>
......@@ -38,10 +39,13 @@
#include <migraphx/pass_manager.hpp>
#include <migraphx/make_op.hpp>
#include <test.hpp>
#include "make_precompile_op.hpp"
void run_passes(migraphx::module& m)
// Treat some operators as compilable to enable lowering
MIGRAPHX_GPU_TEST_PRECOMPILE("add", "mul", "convert")
void run_passes(migraphx::module& m, migraphx::gpu::context& ctx)
{
auto ctx = migraphx::gpu::context{};
migraphx::run_passes(m,
{migraphx::auto_contiguous{},
migraphx::gpu::lowering{&ctx, false},
......@@ -52,18 +56,6 @@ void run_passes(migraphx::module& m)
migraphx::dead_code_elimination{}});
}
bool get_int8_x4_format()
{
bool int8_x4_format = true;
#if ROCBLAS_VERSION_MAJOR >= 2 && ROCBLAS_VERSION_MINOR >= 38
auto ctx = migraphx::gpu::context{};
rocblas_gemm_flags flag;
rocblas_query_int8_layout_flag(ctx.get_stream().get_rocblas(), &flag);
int8_x4_format = (flag == rocblas_gemm_flags_pack_int8x4);
#endif
return int8_x4_format;
}
TEST_CASE(quant_dot)
{
auto create_module = [] {
......@@ -102,11 +94,13 @@ TEST_CASE(quant_dot)
migraphx::make_op("hip::allocate", {{"shape", migraphx::to_value(m2_shape)}}));
packa = m.add_instruction(migraphx::make_op("gpu::int8_gemm_pack_a"), l2, alloc);
}
auto gemm =
m.add_instruction(migraphx::make_op("gpu::quant_gemm", {{"int8_x4_format", int8_x4}}),
l1,
packa,
gemm_alloc);
auto gemm = m.add_instruction(
migraphx::make_op("gpu::quant_gemm",
{{"int8_x4_format", int8_x4},
{"compute_fp32", migraphx::gpu::get_compute_fp32_flag()}}),
l1,
packa,
gemm_alloc);
auto beta_broadcast = m.add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", m3_shape.lens()}}), beta);
......@@ -116,19 +110,19 @@ TEST_CASE(quant_dot)
m.add_instruction(migraphx::make_op("gpu::contiguous"), beta_broadcast, beta_alloc);
auto mul_alloc = m.add_instruction(
migraphx::make_op("hip::allocate", {{"shape", migraphx::to_value(m3_shape)}}));
auto m3_beta =
m.add_instruction(migraphx::make_op("gpu::mul"), l3, beta_contiguous, mul_alloc);
auto gemm_add = m.add_instruction(migraphx::make_op("gpu::add"), gemm, m3_beta, output);
auto m3_beta = m.add_instruction(make_precompile_op("mul"), l3, beta_contiguous, mul_alloc);
auto gemm_add = m.add_instruction(make_precompile_op("add"), gemm, m3_beta, output);
m.add_return({gemm_add});
return m;
};
auto m1 = create_module();
run_passes(m1);
auto m1 = create_module();
auto ctx = migraphx::gpu::context{};
run_passes(m1, ctx);
bool flag = get_int8_x4_format();
auto m2 = create_optimized_int8_x4(flag);
bool int8_x4 = migraphx::gpu::get_int8_x4_format(ctx);
auto m2 = create_optimized_int8_x4(int8_x4);
EXPECT(m1 == m2);
}
......@@ -187,21 +181,23 @@ TEST_CASE(quant_dot_trans)
// back result to int8
auto tl1_convert_alloc = m.add_instruction(migraphx::make_op(
"hip::allocate", {{"shape", migraphx::to_value(alpha_contiguous->get_shape())}}));
auto tl1_convert = m.add_instruction(
migraphx::make_op("gpu::convert", {{"target_type", alpha->get_shape().type()}}),
conta,
tl1_convert_alloc);
auto mul_alloc = m.add_instruction(migraphx::make_op(
auto tl1_convert =
m.add_instruction(make_precompile_op(migraphx::make_op(
"convert", {{"target_type", alpha->get_shape().type()}})),
conta,
tl1_convert_alloc);
auto mul_alloc = m.add_instruction(migraphx::make_op(
"hip::allocate", {{"shape", migraphx::to_value(tl1_convert->get_shape())}}));
auto tl1_alpha_int32 = m.add_instruction(
migraphx::make_op("gpu::mul"), alpha_contiguous, tl1_convert, mul_alloc);
auto tl1_alpha_int32 =
m.add_instruction(make_precompile_op("mul"), alpha_contiguous, tl1_convert, mul_alloc);
// convert mul_res to int8
auto tl1_alpha_int8_alloc = m.add_instruction(migraphx::make_op(
"hip::allocate", {{"shape", migraphx::to_value(conta->get_shape())}}));
auto tl1_alpha_int8 = m.add_instruction(
migraphx::make_op("gpu::convert", {{"target_type", conta->get_shape().type()}}),
tl1_alpha_int32,
tl1_alpha_int8_alloc);
auto tl1_alpha_int8 =
m.add_instruction(make_precompile_op(migraphx::make_op(
"convert", {{"target_type", conta->get_shape().type()}})),
tl1_alpha_int32,
tl1_alpha_int8_alloc);
auto packb = contb;
if(int8_x4)
......@@ -211,21 +207,24 @@ TEST_CASE(quant_dot_trans)
packb = m.add_instruction(migraphx::make_op("gpu::int8_gemm_pack_a"), contb, allocpb);
}
auto gemm =
m.add_instruction(migraphx::make_op("gpu::quant_gemm", {{"int8_x4_format", int8_x4}}),
tl1_alpha_int8,
packb,
output);
auto gemm = m.add_instruction(
migraphx::make_op("gpu::quant_gemm",
{{"int8_x4_format", int8_x4},
{"compute_fp32", migraphx::gpu::get_compute_fp32_flag()}}),
tl1_alpha_int8,
packb,
output);
m.add_return({gemm});
return m;
};
auto m1 = create_module();
bool flag = get_int8_x4_format();
auto m2 = create_optimized_int8_x4(flag);
auto m1 = create_module();
auto ctx = migraphx::gpu::context{};
run_passes(m1, ctx);
run_passes(m1);
bool int8_x4 = migraphx::gpu::get_int8_x4_format(ctx);
auto m2 = create_optimized_int8_x4(int8_x4);
EXPECT(m1 == m2);
}
......@@ -292,11 +291,13 @@ TEST_CASE(quant_dot_pad)
packa = m.add_instruction(migraphx::make_op("gpu::int8_gemm_pack_a"), pl2, alloc);
}
auto gemm =
m.add_instruction(migraphx::make_op("gpu::quant_gemm", {{"int8_x4_format", int8_x4}}),
pl1,
packa,
gemm_alloc);
auto gemm = m.add_instruction(
migraphx::make_op("gpu::quant_gemm",
{{"int8_x4_format", int8_x4},
{"compute_fp32", migraphx::gpu::get_compute_fp32_flag()}}),
pl1,
packa,
gemm_alloc);
auto beta_broadcast =
m.add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s3.lens()}}), beta);
......@@ -306,18 +307,18 @@ TEST_CASE(quant_dot_pad)
m.add_instruction(migraphx::make_op("gpu::contiguous"), beta_broadcast, beta_alloc);
auto mul_alloc = m.add_instruction(
migraphx::make_op("hip::allocate", {{"shape", migraphx::to_value(s3)}}));
auto m3_beta =
m.add_instruction(migraphx::make_op("gpu::mul"), l3, beta_contiguous, mul_alloc);
auto gemm_add = m.add_instruction(migraphx::make_op("gpu::add"), gemm, m3_beta, output);
auto m3_beta = m.add_instruction(make_precompile_op("mul"), l3, beta_contiguous, mul_alloc);
auto gemm_add = m.add_instruction(make_precompile_op("add"), gemm, m3_beta, output);
m.add_return({gemm_add});
return m;
};
auto m1 = create_module();
bool flag = get_int8_x4_format();
auto m2 = create_optimized_int8_x4(flag);
auto m1 = create_module();
auto ctx = migraphx::gpu::context{};
run_passes(m1, ctx);
run_passes(m1);
bool int8_x4 = migraphx::gpu::get_int8_x4_format(ctx);
auto m2 = create_optimized_int8_x4(int8_x4);
EXPECT(m1 == m2);
}
......@@ -396,14 +397,15 @@ TEST_CASE(quant_dot_trans_pad)
// back result to int8
auto tl1_convert_alloc = m.add_instruction(migraphx::make_op(
"hip::allocate", {{"shape", migraphx::to_value(alpha_contiguous->get_shape())}}));
auto tl1_convert = m.add_instruction(
migraphx::make_op("gpu::convert", {{"target_type", alpha->get_shape().type()}}),
conta,
tl1_convert_alloc);
auto mul_alloc = m.add_instruction(migraphx::make_op(
auto tl1_convert =
m.add_instruction(make_precompile_op(migraphx::make_op(
"convert", {{"target_type", alpha->get_shape().type()}})),
conta,
tl1_convert_alloc);
auto mul_alloc = m.add_instruction(migraphx::make_op(
"hip::allocate", {{"shape", migraphx::to_value(tl1_convert->get_shape())}}));
auto tl1_alpha_int32 = m.add_instruction(
migraphx::make_op("gpu::mul"), alpha_contiguous, tl1_convert, mul_alloc);
auto tl1_alpha_int32 =
m.add_instruction(make_precompile_op("mul"), alpha_contiguous, tl1_convert, mul_alloc);
// convert mul_res to int8
auto tl1_alpha_int8_alloc = m.add_instruction(migraphx::make_op(
"hip::allocate", {{"shape", migraphx::to_value(conta->get_shape())}}));
......@@ -415,10 +417,11 @@ TEST_CASE(quant_dot_trans_pad)
migraphx::make_op("hip::allocate", {{"shape", migraphx::to_value(ps1)}}));
}
auto tl1_alpha_int8 = m.add_instruction(
migraphx::make_op("gpu::convert", {{"target_type", conta->get_shape().type()}}),
tl1_alpha_int32,
tl1_alpha_int8_alloc);
auto tl1_alpha_int8 =
m.add_instruction(make_precompile_op(migraphx::make_op(
"convert", {{"target_type", conta->get_shape().type()}})),
tl1_alpha_int32,
tl1_alpha_int8_alloc);
auto pa = tl1_alpha_int8;
if(int8_x4)
......@@ -438,17 +441,23 @@ TEST_CASE(quant_dot_trans_pad)
}
auto gemm = m.add_instruction(
migraphx::make_op("gpu::quant_gemm", {{"int8_x4_format", int8_x4}}), pa, packb, output);
migraphx::make_op("gpu::quant_gemm",
{{"int8_x4_format", int8_x4},
{"compute_fp32", migraphx::gpu::get_compute_fp32_flag()}}),
pa,
packb,
output);
m.add_return({gemm});
return m;
};
auto m1 = create_module();
bool flag = get_int8_x4_format();
auto m2 = create_optimized_int8_x4(flag);
auto m1 = create_module();
auto ctx = migraphx::gpu::context{};
run_passes(m1, ctx);
run_passes(m1);
bool int8_x4 = migraphx::gpu::get_int8_x4_format(ctx);
auto m2 = create_optimized_int8_x4(int8_x4);
EXPECT(m1 == m2);
}
......
......@@ -30,7 +30,6 @@
#include <migraphx/ref/target.hpp>
#include <migraphx/gpu/target.hpp>
#include <migraphx/verify.hpp>
#include <migraphx/quantization.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/propagate_constant.hpp>
#include <migraphx/pass_manager.hpp>
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <iostream>
#include <vector>
#include <migraphx/gpu/context.hpp>
#include <migraphx/context.hpp>
#include <migraphx/gpu/compile_hip.hpp>
#include <migraphx/gpu/kernel.hpp>
#include <migraphx/gpu/device_name.hpp>
#include <migraphx/par_for.hpp>
#include <migraphx/program.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/module.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/gpu/target.hpp>
#include "test.hpp"
using hip_stream_ptr = MIGRAPHX_MANAGE_PTR(hipStream_t, hipStreamDestroy);
constexpr uint32_t stream_sync_test_val = 1337;
// NOLINTNEXTLINE
const std::string compare_numbers = R"__migraphx__(
#include <hip/hip_runtime.h>
extern "C" {
__global__ void compare(float* data)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (data[i] != 1337)
{
abort();
}
}
}
int main() {}
)__migraphx__";
migraphx::src_file make_src_file(const std::string& name, const std::string& content)
{
return {name, std::make_pair(content.data(), content.data() + content.size())};
}
hip_stream_ptr get_stream()
{
hipStream_t stream;
auto status = hipStreamCreate(&stream);
if(status != hipSuccess)
{
MIGRAPHX_THROW("Failed to get stream");
}
return hip_stream_ptr{stream};
}
TEST_CASE(test_stream_sync_compare_kernel)
{
auto binaries = migraphx::gpu::compile_hip_src(
{make_src_file("check_stuff.cpp", compare_numbers)}, "", migraphx::gpu::get_device_name());
EXPECT(binaries.size() == 1);
migraphx::gpu::kernel k1{binaries.front(), "compare"};
auto input =
migraphx::fill_argument({migraphx::shape::float_type, {128}}, stream_sync_test_val);
auto ginput = migraphx::gpu::to_gpu(input);
hip_stream_ptr pstream = get_stream();
k1.launch(pstream.get(), input.get_shape().elements(), 1024)(ginput.cast<float>());
auto output = migraphx::gpu::from_gpu(ginput);
EXPECT(output == input);
}
TEST_CASE(test_stream_sync)
{
auto binaries = migraphx::gpu::compile_hip_src(
{make_src_file("check_stuff.cpp", compare_numbers)}, "", migraphx::gpu::get_device_name());
EXPECT(binaries.size() == 1);
migraphx::gpu::kernel k1{binaries.front(), "compare"};
const unsigned int m = 128;
const unsigned int k = 8192;
// Setup empty GPU memory buffer
migraphx::shape input_shape{migraphx::shape::float_type, {m, k}};
migraphx::shape output_shape{migraphx::shape::float_type, {m, m}};
auto input = migraphx::fill_argument(input_shape, 0);
auto ginput = migraphx::gpu::to_gpu(input);
auto output = migraphx::fill_argument(output_shape, 0);
auto goutput = migraphx::gpu::to_gpu(output);
hip_stream_ptr pstream = get_stream();
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {m, k}});
auto y = mm->add_literal(
migraphx::generate_literal(migraphx::shape{migraphx::shape::float_type, {k, m}}));
std::vector<float> data(m * m, stream_sync_test_val);
auto test_val = mm->add_literal(output_shape, data);
auto mult_out = mm->add_instruction(migraphx::make_op("dot"), x, y);
mm->add_instruction(migraphx::make_op("add"), mult_out, test_val);
p.compile(migraphx::gpu::target{});
// Run network and then verify with kernel
auto args = p.eval({{"x", ginput}, {"output", goutput}}, {pstream.get(), true});
k1.launch(pstream.get(), m * m, 1024)(goutput.cast<float>());
output = migraphx::gpu::from_gpu(goutput);
EXPECT(output != input);
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -112,12 +112,12 @@ struct mod_pass_op
migraphx::shape compute_shape(std::vector<migraphx::shape> inputs,
std::vector<migraphx::module_ref> mods) const
{
if(!mods.empty())
if(not mods.empty())
{
auto out_shapes = mods[0]->get_output_shapes();
return out_shapes[0];
}
if(!inputs.empty())
if(not inputs.empty())
{
return inputs.front();
}
......@@ -186,9 +186,10 @@ struct nop
migraphx::shape compute_shape(const std::vector<migraphx::shape>&) const { return {}; }
};
inline migraphx::literal get_2x2()
inline migraphx::literal get_2x2(int base = 0)
{
return migraphx::literal{{migraphx::shape::float_type, {2, 2}}, {1, 2, 3, 4}};
return migraphx::literal{{migraphx::shape::float_type, {2, 2}},
{base + 1, base + 2, base + 3, base + 4}};
}
inline migraphx::literal get_2x2_transposed()
......
......@@ -108,15 +108,7 @@ struct function
};
template <class Stream, class Iterator>
inline Stream& stream_range(Stream& s, Iterator start, Iterator last)
{
if(start != last)
{
s << *start;
std::for_each(std::next(start), last, [&](auto&& x) { s << ", " << x; });
}
return s;
}
Stream& stream_range(Stream& s, Iterator start, Iterator last);
template <class Stream>
inline Stream& operator<<(Stream& s, std::nullptr_t)
......@@ -136,6 +128,17 @@ inline auto operator<<(Stream& s, const Range& v) -> decltype(stream_range(s, v.
return s;
}
template <class Stream, class Iterator>
inline Stream& stream_range(Stream& s, Iterator start, Iterator last)
{
if(start != last)
{
s << *start;
std::for_each(std::next(start), last, [&](auto&& x) { s << ", " << x; });
}
return s;
}
template <class T>
const T& get_value(const T& x)
{
......@@ -342,7 +345,7 @@ inline std::ostream& operator<<(std::ostream& os, const color& c)
template <class T, class F>
void failed(T x, const char* msg, const char* func, const char* file, int line, F f)
{
if(!bool(x.value()))
if(not bool(x.value()))
{
std::cout << func << std::endl;
std::cout << file << ":" << line << ":" << std::endl;
......
......@@ -39,8 +39,8 @@ TEST_CASE(literal_test)
migraphx::literal l2 = l1; // NOLINT
EXPECT(l1 == l2);
EXPECT(l1.at<int>(0) == 1);
EXPECT(!l1.empty());
EXPECT(!l2.empty());
EXPECT(not l1.empty());
EXPECT(not l2.empty());
migraphx::literal l3{};
migraphx::literal l4{};
......
......@@ -724,7 +724,7 @@ TEST_CASE(test39)
auto sub_modules = p.get_modules();
std::reverse(sub_modules.begin(), sub_modules.end());
for(auto& smod : sub_modules)
for(const auto& smod : sub_modules)
{
run_pass(*smod);
}
......
batch_norm_1d_test:
7
x
scale
bias
mean
variancey"BatchNormalizationbatch_norm_1d_testZ
x




Z
scale

Z
bias

Z
mean

Z
variance

b
y




B
\ No newline at end of file
batch_norm_2d_test:
7
x
scale
bias
mean
variancey"BatchNormalizationbatch_norm_2d_testZ
x




Z
scale

Z
bias

Z
mean

Z
variance

b
y




B
\ No newline at end of file
batch_norm_3d_test:
J
x
scale
bias
mean
variancey"BatchNormalization*
epsilon75batch_norm_3d_testZ
x






Z
scale


Z
bias


Z
mean


Z
variance


b
y






B
\ No newline at end of file
batch_norm_flat_test:
J
x
scale
bias
mean
variancey"BatchNormalization*
epsilon75batch_norm_flat_testZ
x


Z
scale

Z
bias

Z
mean

Z
variance

b
y


B
\ No newline at end of file
!batch_norm_invalid_bias_rank_test:
7
x
scale
bias
mean
variancey"BatchNormalization!batch_norm_invalid_bias_rank_testZ
x




Z
scale

Z
bias


Z
mean

Z
variance

b
y




B
\ No newline at end of file
batch_norm_rank_2_test:
J
x
scale
bias
mean
variancey"BatchNormalization*
epsilon75batch_norm_rank_2_testZ
x


Z
scale

Z
bias

Z
mean

Z
variance

b
y


B
\ No newline at end of file
batchnorm_1d_test:
M
0
1
2
3
45"BatchNormalization*
epsilon75*
momentumfff?batchnorm_1d_testZ
0



Z
1

Z
2

Z
3

Z
4

b
5



B
\ No newline at end of file
batchnorm_3d_test:
M
0
1
2
3
45"BatchNormalization*
epsilon75*
momentumfff?batchnorm_3d_testZ
0





Z
1

Z
2

Z
3

Z
4

b
5





B
\ No newline at end of file
......@@ -314,42 +314,163 @@ def averagepool_same_upper_test():
@onnx_test
def batchnorm_1d_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5])
scale = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
bias = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3])
mean = helper.make_tensor_value_info('3', TensorProto.FLOAT, [3])
var = helper.make_tensor_value_info('4', TensorProto.FLOAT, [3])
out = helper.make_tensor_value_info('5', TensorProto.FLOAT, [1, 3, 5])
node = onnx.helper.make_node('BatchNormalization',
inputs=['0', '1', '2', '3', '4'],
outputs=['5'],
epsilon=1e-6,
momentum=0.9)
def batch_norm_flat_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
scale = helper.make_tensor_value_info('scale', TensorProto.FLOAT, [1])
bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [1])
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT, [1])
var = helper.make_tensor_value_info('variance', TensorProto.FLOAT, [1])
out = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10])
node = onnx.helper.make_node(
'BatchNormalization',
inputs=['x', 'scale', 'bias', 'mean', 'variance'],
outputs=['y'],
epsilon=1e-6)
return ([node], [x, scale, bias, mean, var], [out])
@onnx_test
def batchnorm_3d_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5, 5, 5])
scale = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
bias = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3])
mean = helper.make_tensor_value_info('3', TensorProto.FLOAT, [3])
var = helper.make_tensor_value_info('4', TensorProto.FLOAT, [3])
out = helper.make_tensor_value_info('5', TensorProto.FLOAT,
[1, 3, 5, 5, 5])
node = onnx.helper.make_node('BatchNormalization',
inputs=['0', '1', '2', '3', '4'],
outputs=['5'],
epsilon=1e-6,
momentum=0.9)
def batch_norm_rank_2_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 5])
scale = helper.make_tensor_value_info('scale', TensorProto.FLOAT, [5])
bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [5])
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT, [5])
var = helper.make_tensor_value_info('variance', TensorProto.FLOAT, [5])
out = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 5])
node = onnx.helper.make_node(
'BatchNormalization',
inputs=['x', 'scale', 'bias', 'mean', 'variance'],
outputs=['y'],
epsilon=1e-6)
return ([node], [x, scale, bias, mean, var], [out])
@onnx_test
def batch_norm_1d_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16, [2, 3, 4])
scale = helper.make_tensor_value_info('scale', TensorProto.FLOAT, [3])
bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [3])
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT, [3])
var = helper.make_tensor_value_info('variance', TensorProto.FLOAT, [3])
out = helper.make_tensor_value_info('y', TensorProto.FLOAT16, [2, 3, 4])
node = onnx.helper.make_node(
'BatchNormalization',
inputs=['x', 'scale', 'bias', 'mean', 'variance'],
outputs=['y'])
return ([node], [x, scale, bias, mean, var], [out])
@onnx_test
def batch_norm_2d_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3, 4, 4])
scale = helper.make_tensor_value_info('scale', TensorProto.FLOAT, [3])
bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [3])
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT, [3])
var = helper.make_tensor_value_info('variance', TensorProto.FLOAT, [3])
out = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3, 4, 4])
node = onnx.helper.make_node(
'BatchNormalization',
inputs=['x', 'scale', 'bias', 'mean', 'variance'],
outputs=['y'])
return ([node], [x, scale, bias, mean, var], [out])
@onnx_test
def batch_norm_3d_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16,
[2, 2, 2, 2, 2])
scale = helper.make_tensor_value_info('scale', TensorProto.FLOAT16, [2])
bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT16, [2])
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT16, [2])
var = helper.make_tensor_value_info('variance', TensorProto.FLOAT16, [2])
out = helper.make_tensor_value_info('y', TensorProto.FLOAT16,
[2, 2, 2, 2, 2])
node = onnx.helper.make_node(
'BatchNormalization',
inputs=['x', 'scale', 'bias', 'mean', 'variance'],
outputs=['y'],
epsilon=1e-6)
return ([node], [x, scale, bias, mean, var], [out])
@onnx_test
def batch_norm_invalid_bias_rank_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3, 4, 4])
scale = helper.make_tensor_value_info('scale', TensorProto.FLOAT, [3])
bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [3, 1])
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT, [3])
var = helper.make_tensor_value_info('variance', TensorProto.FLOAT, [3])
out = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3, 4, 4])
node = onnx.helper.make_node(
'BatchNormalization',
inputs=['x', 'scale', 'bias', 'mean', 'variance'],
outputs=['y'])
return ([node], [x, scale, bias, mean, var], [out])
@onnx_test
def binary_dyn_brcst_prelu_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[None, 3, 4, 5])
arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 5])
arg_out = helper.make_tensor_value_info('out', TensorProto.FLOAT,
[None, 3, 4, 5])
node = onnx.helper.make_node(
'PRelu',
inputs=['0', '1'],
outputs=['out'],
)
return ([node], [arg0, arg1], [arg_out])
@onnx_test
def binary_dyn_brcst_add_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT16, [4, 5])
arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT,
[None, 3, 4, 5])
arg_out = helper.make_tensor_value_info('out', TensorProto.FLOAT,
[None, 3, 4, 5])
node = onnx.helper.make_node(
'Add',
inputs=['0', '1'],
outputs=['out'],
)
return ([node], [arg0, arg1], [arg_out])
@onnx_test
def binary_dyn_brcst_mul_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[None, 3, 4, 5])
arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 1])
arg_out = helper.make_tensor_value_info('out', TensorProto.FLOAT,
[None, 3, 4, 5])
node = onnx.helper.make_node(
'Mul',
inputs=['0', '1'],
outputs=['out'],
)
return ([node], [arg0, arg1], [arg_out])
@onnx_test
def cast_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16, [10])
......@@ -3578,6 +3699,16 @@ def neg_test():
return ([node], [x], [y])
@onnx_test
def neg_dynamic_test():
x = helper.make_tensor_value_info('0', TensorProto.INT64, [None, 3])
y = helper.make_tensor_value_info('1', TensorProto.INT64, [None, 3])
node = onnx.helper.make_node('Neg', inputs=['0'], outputs=['1'])
return ([node], [x], [y])
@onnx_test
def nms_test():
b = helper.make_tensor_value_info('boxes', TensorProto.FLOAT, [1, 6, 4])
......@@ -3589,7 +3720,7 @@ def nms_test():
st = helper.make_tensor_value_info('score_threshold', TensorProto.FLOAT,
[1])
out = helper.make_tensor_value_info('selected_indices', TensorProto.INT64,
[6, 3])
[None, 3])
node = onnx.helper.make_node('NonMaxSuppression',
inputs=[
......@@ -3603,6 +3734,108 @@ def nms_test():
return ([node], [b, s, mo, iou, st], [out])
@onnx_test
def nms_use_dyn_output_false_test():
b = helper.make_tensor_value_info('boxes', TensorProto.FLOAT, [1, 6, 4])
s = helper.make_tensor_value_info('scores', TensorProto.FLOAT, [1, 1, 6])
mo = helper.make_tensor_value_info('max_output_boxes_per_class',
TensorProto.INT64, [1])
iou = helper.make_tensor_value_info('iou_threshold', TensorProto.FLOAT,
[1])
st = helper.make_tensor_value_info('score_threshold', TensorProto.FLOAT,
[1])
out = helper.make_tensor_value_info('selected_indices', TensorProto.INT64,
[None, 3])
node = onnx.helper.make_node('NonMaxSuppression',
inputs=[
'boxes', 'scores',
'max_output_boxes_per_class',
'iou_threshold', 'score_threshold'
],
outputs=['selected_indices'],
use_dyn_output=0)
return ([node], [b, s, mo, iou, st], [out])
@onnx_test
def nms_dynamic_batch_test():
b = helper.make_tensor_value_info('boxes', TensorProto.FLOAT, [None, 6, 4])
s = helper.make_tensor_value_info('scores', TensorProto.FLOAT,
[None, 1, 6])
mo = helper.make_tensor_value_info('max_output_boxes_per_class',
TensorProto.INT64, [1])
iou = helper.make_tensor_value_info('iou_threshold', TensorProto.FLOAT,
[1])
st = helper.make_tensor_value_info('score_threshold', TensorProto.FLOAT,
[1])
out = helper.make_tensor_value_info('selected_indices', TensorProto.INT64,
[None, 3])
node = onnx.helper.make_node('NonMaxSuppression',
inputs=[
'boxes', 'scores',
'max_output_boxes_per_class',
'iou_threshold', 'score_threshold'
],
outputs=['selected_indices'],
center_point_box=1,
use_dyn_output=1)
return ([node], [b, s, mo, iou, st], [out])
@onnx_test
def nms_dynamic_boxes_test():
b = helper.make_tensor_value_info('boxes', TensorProto.FLOAT, [1, None, 4])
s = helper.make_tensor_value_info('scores', TensorProto.FLOAT,
[1, 1, None])
mo = helper.make_tensor_value_info('max_output_boxes_per_class',
TensorProto.INT64, [1])
iou = helper.make_tensor_value_info('iou_threshold', TensorProto.FLOAT,
[1])
st = helper.make_tensor_value_info('score_threshold', TensorProto.FLOAT,
[1])
out = helper.make_tensor_value_info('selected_indices', TensorProto.INT64,
[None, 3])
node = onnx.helper.make_node('NonMaxSuppression',
inputs=[
'boxes', 'scores',
'max_output_boxes_per_class',
'iou_threshold', 'score_threshold'
],
outputs=['selected_indices'])
return ([node], [b, s, mo, iou, st], [out])
@onnx_test
def nms_dynamic_classes_test():
b = helper.make_tensor_value_info('boxes', TensorProto.FLOAT, [1, 6, 4])
s = helper.make_tensor_value_info('scores', TensorProto.FLOAT,
[1, None, 6])
mo = helper.make_tensor_value_info('max_output_boxes_per_class',
TensorProto.INT64, [1])
iou = helper.make_tensor_value_info('iou_threshold', TensorProto.FLOAT,
[1])
st = helper.make_tensor_value_info('score_threshold', TensorProto.FLOAT,
[1])
out = helper.make_tensor_value_info('selected_indices', TensorProto.INT64,
[None, 3])
node = onnx.helper.make_node('NonMaxSuppression',
inputs=[
'boxes', 'scores',
'max_output_boxes_per_class',
'iou_threshold', 'score_threshold'
],
outputs=['selected_indices'])
return ([node], [b, s, mo, iou, st], [out])
@onnx_test
def not_test():
x = helper.make_tensor_value_info('0', TensorProto.INT32, [4])
......@@ -5109,6 +5342,20 @@ def sinh_test():
return ([node], [x], [y])
@onnx_test
def sinh_dynamic_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [None])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [None])
node = onnx.helper.make_node(
'Sinh',
inputs=['x'],
outputs=['y'],
)
return ([node], [x], [y])
@onnx_test
def size_float_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3, 4])
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment