Unverified Commit 66483df6 authored by Chris Austen's avatar Chris Austen Committed by GitHub
Browse files

Merge branch 'develop' into simplify_1_mul_div_ops

parents 9310bff0 40118191
......@@ -35,6 +35,12 @@ namespace {
template <class Derived, std::size_t N>
struct layernorm_base
{
float epsilon = 1e-12f;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.epsilon, "epsilon"));
}
shape compute_shape(std::vector<shape> inputs, std::vector<module_ref> mods) const
{
std::size_t nargs = 1;
......@@ -62,6 +68,7 @@ struct layernorm_base
struct layernorm : layernorm_base<layernorm, 0>
{
std::string name() const { return "gpu::prelayernorm"; }
};
MIGRAPHX_REGISTER_OP(layernorm);
......@@ -80,8 +87,9 @@ struct find_layernorm
{
auto ins = r.result;
auto x_ins = r.instructions["x"];
auto eps = r.instructions["eps"]->eval().at<float>();
m.replace_instruction(ins, layernorm{}, x_ins);
m.replace_instruction(ins, layernorm{eps}, x_ins);
}
};
......@@ -96,8 +104,9 @@ struct find_add_layernorm
{
auto ins = r.result;
auto add_ins = r.instructions["add"];
auto eps = r.instructions["eps"]->eval().at<float>();
m.replace_instruction(ins, add_layernorm{}, add_ins->inputs());
m.replace_instruction(ins, add_layernorm{eps}, add_ins->inputs());
}
};
} // namespace
......
......@@ -22,7 +22,6 @@
* THE SOFTWARE.
*/
#include <migraphx/gpu/quant_convolution.hpp>
#include <migraphx/gpu/device/convert.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/generate.hpp>
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/gpu/softmax.hpp>
#include <migraphx/gpu/device/softmax.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/tune_axis.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
shape hip_softmax::compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs, *this}.has(2).standard();
return op.normalize_compute_shape({inputs.at(0)});
}
argument hip_softmax::compute(context& ctx, const shape&, const std::vector<argument>& args) const
{
auto n_dim = args.front().get_shape().lens().size();
auto tuned_axis = tune_axis(n_dim, op.axis, op.name());
device::softmax(ctx.get_stream().get(), args.back(), args.front(), tuned_axis);
return args.back();
}
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
......@@ -347,7 +347,7 @@ void tf_parser::parse_node(const std::string& name)
// input was from a node with multiple outputs
if(contains(input_name, ':'))
{
input_name = input_name.substr(0, input.find(':'));
input_name.resize(input.find(':'));
}
else
{
......
......@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "migraphx/dead_code_elimination.hpp"
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/fuse_pointwise.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/pass_manager.hpp>
......
......@@ -40,6 +40,10 @@
#include <migraphx/make_op.hpp>
#include <basic_ops.hpp>
#include <test.hpp>
#include "make_precompile_op.hpp"
// Treat some operators as compilable to enable lowering
MIGRAPHX_GPU_TEST_PRECOMPILE("add", "mul", "convert")
void run_lowering(migraphx::program& p, bool offload_copy = false)
{
......@@ -118,7 +122,7 @@ TEST_CASE(no_copy_dead_param)
auto xb = mm->add_instruction(migraphx::make_op("hip::allocate", {{"shape", to_value(s)}}));
auto gx = mm->add_instruction(migraphx::make_op("hip::copy_to_gpu"), x, xb);
auto ab = mm->add_instruction(migraphx::make_op("hip::allocate", {{"shape", to_value(s)}}));
auto sum = mm->add_instruction(migraphx::make_op("gpu::add"), gx, gx, ab);
auto sum = mm->add_instruction(make_precompile_op("add"), gx, gx, ab);
auto r = mm->add_instruction(migraphx::make_op("hip::copy_from_gpu"), sum);
mm->add_return({r});
......
......@@ -307,12 +307,14 @@ TEST_CASE(compile_math)
"erf(x)",
"exp(x)",
"floor(x)",
"fmod(x, x)",
"isnan(x)",
"log(x)",
"max(x, x)",
"min(x, x)",
"pow(x, 0)",
"pow(x, x)",
"remainder(x,x)",
"round(x)",
"rsqrt(x)",
"sin(x)",
......
......@@ -21,63 +21,46 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/gpu/device/gelu.hpp>
#include <migraphx/gpu/device/nary.hpp>
#include <migraphx/gpu/device/types.hpp>
#include <cmath>
#ifndef MIGRAPHX_GUARD_TEST_GPU_MAKE_PRECOMPILE_OP_HPP
#define MIGRAPHX_GUARD_TEST_GPU_MAKE_PRECOMPILE_OP_HPP
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
#include <migraphx/operation.hpp>
#include <migraphx/gpu/compiler.hpp>
#include <migraphx/make_op.hpp>
// x * 0.5 * (1.0 + erf(x / sqrt(2.0)))
template <class T>
auto gelu_fn(T x) __device__
{
return x * 0.5 * (1 + ::erf(x * M_SQRT1_2));
}
// NOLINTNEXTLINE
#define MIGRAPHX_GPU_TEST_PRECOMPILE(...) \
struct test_compiler : migraphx::gpu::compiler<test_compiler> \
{ \
std::vector<std::string> names() const { return {__VA_ARGS__}; } \
\
template <class... Ts> \
migraphx::operation compile_op(Ts&&...) const \
{ \
MIGRAPHX_THROW("Not compilable"); \
} \
\
template <class... Ts> \
migraphx::gpu::compiler_replace compile(Ts&&...) const \
{ \
MIGRAPHX_THROW("Not compilable"); \
} \
};
// 0.5 * x * (1 + tanh(sqrt(2 / pi) * (x + 0.044715 * pow(x, 3))))
template <class T>
auto gelu_fn_new(T x) __device__
inline migraphx::operation make_precompile_op(migraphx::rank<0>, const migraphx::operation& op)
{
return 0.5 * x * (1 + tanh(sqrt(M_2_PI) * (x + 0.044715 * x * x * x)));
return migraphx::make_op("gpu::precompile_op", {{"op", migraphx::to_value(op)}});
}
void gelu(hipStream_t stream, const argument& result, const argument& arg)
inline migraphx::operation make_precompile_op(migraphx::rank<1>, const std::string& name)
{
nary(stream, result, arg)([](auto x) __device__ { return gelu_fn(to_hip_type(x)); });
return make_precompile_op(migraphx::rank<0>{}, migraphx::make_op(name));
}
void gelu_new(hipStream_t stream, const argument& result, const argument& arg)
{
nary(stream, result, arg)([](auto x) __device__ { return gelu_fn_new(to_hip_type(x)); });
}
void add_gelu(hipStream_t stream,
const argument& result,
const argument& arg1,
const argument& arg2)
{
nary(stream, result, arg1, arg2)([](auto x, auto y) __device__ {
auto sum = to_hip_type(x + y);
return gelu_fn(sum);
});
}
void add_gelu_new(hipStream_t stream,
const argument& result,
const argument& arg1,
const argument& arg2)
template <class T>
auto make_precompile_op(const T& x)
{
nary(stream, result, arg1, arg2)([](auto x, auto y) __device__ {
auto sum = to_hip_type(x + y);
return gelu_fn(sum);
});
return make_precompile_op(migraphx::rank<1>{}, x);
}
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif // MIGRAPHX_GUARD_TEST_GPU_MAKE_PRECOMPILE_OP_HPP
......@@ -144,7 +144,7 @@ TEST_CASE(conv)
{
const std::string mlir_output = R"__migraphx__(
module {
func @main(%arg0: tensor<2x8x3x3xf32>, %arg1: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {kernel = "mixr"} {
func.func @main(%arg0: tensor<2x8x3x3xf32>, %arg1: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {kernel = "mixr"} {
%0 = migraphx.convolution(%arg1, %arg0) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1], use_dynamic_same_auto_pad = 0 : i64} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
return %0 : tensor<1x2x2x2xf32>
}
......@@ -167,7 +167,7 @@ TEST_CASE(conv_add_relu)
{
const std::string mlir_output = R"__migraphx__(
module {
func @main(%arg0: tensor<1x2x2x2xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {kernel = "mixr"} {
func.func @main(%arg0: tensor<1x2x2x2xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {kernel = "mixr"} {
%0 = migraphx.convolution(%arg2, %arg1) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1], use_dynamic_same_auto_pad = 0 : i64} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
%1 = migraphx.add(%0, %arg0) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
%2 = migraphx.relu(%1) : (tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
......
......@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "migraphx/instruction_ref.hpp"
#include <migraphx/instruction_ref.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/gpu/target.hpp>
......@@ -38,6 +38,10 @@
#include <migraphx/pass_manager.hpp>
#include <migraphx/make_op.hpp>
#include <test.hpp>
#include "make_precompile_op.hpp"
// Treat some operators as compilable to enable lowering
MIGRAPHX_GPU_TEST_PRECOMPILE("add", "mul", "convert")
void run_passes(migraphx::module& m)
{
......@@ -116,9 +120,8 @@ TEST_CASE(quant_dot)
m.add_instruction(migraphx::make_op("gpu::contiguous"), beta_broadcast, beta_alloc);
auto mul_alloc = m.add_instruction(
migraphx::make_op("hip::allocate", {{"shape", migraphx::to_value(m3_shape)}}));
auto m3_beta =
m.add_instruction(migraphx::make_op("gpu::mul"), l3, beta_contiguous, mul_alloc);
auto gemm_add = m.add_instruction(migraphx::make_op("gpu::add"), gemm, m3_beta, output);
auto m3_beta = m.add_instruction(make_precompile_op("mul"), l3, beta_contiguous, mul_alloc);
auto gemm_add = m.add_instruction(make_precompile_op("add"), gemm, m3_beta, output);
m.add_return({gemm_add});
return m;
......@@ -187,21 +190,23 @@ TEST_CASE(quant_dot_trans)
// back result to int8
auto tl1_convert_alloc = m.add_instruction(migraphx::make_op(
"hip::allocate", {{"shape", migraphx::to_value(alpha_contiguous->get_shape())}}));
auto tl1_convert = m.add_instruction(
migraphx::make_op("gpu::convert", {{"target_type", alpha->get_shape().type()}}),
conta,
tl1_convert_alloc);
auto mul_alloc = m.add_instruction(migraphx::make_op(
auto tl1_convert =
m.add_instruction(make_precompile_op(migraphx::make_op(
"convert", {{"target_type", alpha->get_shape().type()}})),
conta,
tl1_convert_alloc);
auto mul_alloc = m.add_instruction(migraphx::make_op(
"hip::allocate", {{"shape", migraphx::to_value(tl1_convert->get_shape())}}));
auto tl1_alpha_int32 = m.add_instruction(
migraphx::make_op("gpu::mul"), alpha_contiguous, tl1_convert, mul_alloc);
auto tl1_alpha_int32 =
m.add_instruction(make_precompile_op("mul"), alpha_contiguous, tl1_convert, mul_alloc);
// convert mul_res to int8
auto tl1_alpha_int8_alloc = m.add_instruction(migraphx::make_op(
"hip::allocate", {{"shape", migraphx::to_value(conta->get_shape())}}));
auto tl1_alpha_int8 = m.add_instruction(
migraphx::make_op("gpu::convert", {{"target_type", conta->get_shape().type()}}),
tl1_alpha_int32,
tl1_alpha_int8_alloc);
auto tl1_alpha_int8 =
m.add_instruction(make_precompile_op(migraphx::make_op(
"convert", {{"target_type", conta->get_shape().type()}})),
tl1_alpha_int32,
tl1_alpha_int8_alloc);
auto packb = contb;
if(int8_x4)
......@@ -306,9 +311,8 @@ TEST_CASE(quant_dot_pad)
m.add_instruction(migraphx::make_op("gpu::contiguous"), beta_broadcast, beta_alloc);
auto mul_alloc = m.add_instruction(
migraphx::make_op("hip::allocate", {{"shape", migraphx::to_value(s3)}}));
auto m3_beta =
m.add_instruction(migraphx::make_op("gpu::mul"), l3, beta_contiguous, mul_alloc);
auto gemm_add = m.add_instruction(migraphx::make_op("gpu::add"), gemm, m3_beta, output);
auto m3_beta = m.add_instruction(make_precompile_op("mul"), l3, beta_contiguous, mul_alloc);
auto gemm_add = m.add_instruction(make_precompile_op("add"), gemm, m3_beta, output);
m.add_return({gemm_add});
return m;
};
......@@ -396,14 +400,15 @@ TEST_CASE(quant_dot_trans_pad)
// back result to int8
auto tl1_convert_alloc = m.add_instruction(migraphx::make_op(
"hip::allocate", {{"shape", migraphx::to_value(alpha_contiguous->get_shape())}}));
auto tl1_convert = m.add_instruction(
migraphx::make_op("gpu::convert", {{"target_type", alpha->get_shape().type()}}),
conta,
tl1_convert_alloc);
auto mul_alloc = m.add_instruction(migraphx::make_op(
auto tl1_convert =
m.add_instruction(make_precompile_op(migraphx::make_op(
"convert", {{"target_type", alpha->get_shape().type()}})),
conta,
tl1_convert_alloc);
auto mul_alloc = m.add_instruction(migraphx::make_op(
"hip::allocate", {{"shape", migraphx::to_value(tl1_convert->get_shape())}}));
auto tl1_alpha_int32 = m.add_instruction(
migraphx::make_op("gpu::mul"), alpha_contiguous, tl1_convert, mul_alloc);
auto tl1_alpha_int32 =
m.add_instruction(make_precompile_op("mul"), alpha_contiguous, tl1_convert, mul_alloc);
// convert mul_res to int8
auto tl1_alpha_int8_alloc = m.add_instruction(migraphx::make_op(
"hip::allocate", {{"shape", migraphx::to_value(conta->get_shape())}}));
......@@ -415,10 +420,11 @@ TEST_CASE(quant_dot_trans_pad)
migraphx::make_op("hip::allocate", {{"shape", migraphx::to_value(ps1)}}));
}
auto tl1_alpha_int8 = m.add_instruction(
migraphx::make_op("gpu::convert", {{"target_type", conta->get_shape().type()}}),
tl1_alpha_int32,
tl1_alpha_int8_alloc);
auto tl1_alpha_int8 =
m.add_instruction(make_precompile_op(migraphx::make_op(
"convert", {{"target_type", conta->get_shape().type()}})),
tl1_alpha_int32,
tl1_alpha_int8_alloc);
auto pa = tl1_alpha_int8;
if(int8_x4)
......
......@@ -724,7 +724,7 @@ TEST_CASE(test39)
auto sub_modules = p.get_modules();
std::reverse(sub_modules.begin(), sub_modules.end());
for(auto& smod : sub_modules)
for(const auto& smod : sub_modules)
{
run_pass(*smod);
}
......
batch_norm_1d_test:
7
x
scale
bias
mean
variancey"BatchNormalizationbatch_norm_1d_testZ
x




Z
scale

Z
bias

Z
mean

Z
variance

b
y




B
\ No newline at end of file
batch_norm_2d_test:
7
x
scale
bias
mean
variancey"BatchNormalizationbatch_norm_2d_testZ
x




Z
scale

Z
bias

Z
mean

Z
variance

b
y




B
\ No newline at end of file
batch_norm_3d_test:
J
x
scale
bias
mean
variancey"BatchNormalization*
epsilon75batch_norm_3d_testZ
x






Z
scale


Z
bias


Z
mean


Z
variance


b
y






B
\ No newline at end of file
batch_norm_flat_test:
J
x
scale
bias
mean
variancey"BatchNormalization*
epsilon75batch_norm_flat_testZ
x


Z
scale

Z
bias

Z
mean

Z
variance

b
y


B
\ No newline at end of file
!batch_norm_invalid_bias_rank_test:
7
x
scale
bias
mean
variancey"BatchNormalization!batch_norm_invalid_bias_rank_testZ
x




Z
scale

Z
bias


Z
mean

Z
variance

b
y




B
\ No newline at end of file
batch_norm_invalid_rank_test:
7
x
scale
bias
mean
variancey"BatchNormalizationbatch_norm_invalid_rank_testZ
x


Z
scale

Z
bias

Z
mean

Z
variance

b
y


B
\ No newline at end of file
batchnorm_1d_test:
M
0
1
2
3
45"BatchNormalization*
epsilon75*
momentumfff?batchnorm_1d_testZ
0



Z
1

Z
2

Z
3

Z
4

b
5



B
\ No newline at end of file
batchnorm_3d_test:
M
0
1
2
3
45"BatchNormalization*
epsilon75*
momentumfff?batchnorm_3d_testZ
0





Z
1

Z
2

Z
3

Z
4

b
5





B
\ No newline at end of file
......@@ -314,38 +314,107 @@ def averagepool_same_upper_test():
@onnx_test
def batchnorm_1d_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5])
scale = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
bias = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3])
mean = helper.make_tensor_value_info('3', TensorProto.FLOAT, [3])
var = helper.make_tensor_value_info('4', TensorProto.FLOAT, [3])
out = helper.make_tensor_value_info('5', TensorProto.FLOAT, [1, 3, 5])
node = onnx.helper.make_node('BatchNormalization',
inputs=['0', '1', '2', '3', '4'],
outputs=['5'],
epsilon=1e-6,
momentum=0.9)
def batch_norm_flat_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
scale = helper.make_tensor_value_info('scale', TensorProto.FLOAT, [1])
bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [1])
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT, [1])
var = helper.make_tensor_value_info('variance', TensorProto.FLOAT, [1])
out = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10])
node = onnx.helper.make_node(
'BatchNormalization',
inputs=['x', 'scale', 'bias', 'mean', 'variance'],
outputs=['y'],
epsilon=1e-6)
return ([node], [x, scale, bias, mean, var], [out])
@onnx_test
def batchnorm_3d_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5, 5, 5])
scale = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
bias = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3])
mean = helper.make_tensor_value_info('3', TensorProto.FLOAT, [3])
var = helper.make_tensor_value_info('4', TensorProto.FLOAT, [3])
out = helper.make_tensor_value_info('5', TensorProto.FLOAT,
[1, 3, 5, 5, 5])
node = onnx.helper.make_node('BatchNormalization',
inputs=['0', '1', '2', '3', '4'],
outputs=['5'],
epsilon=1e-6,
momentum=0.9)
def batch_norm_1d_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16, [2, 3, 4])
scale = helper.make_tensor_value_info('scale', TensorProto.FLOAT, [3])
bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [3])
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT, [3])
var = helper.make_tensor_value_info('variance', TensorProto.FLOAT, [3])
out = helper.make_tensor_value_info('y', TensorProto.FLOAT16, [2, 3, 4])
node = onnx.helper.make_node(
'BatchNormalization',
inputs=['x', 'scale', 'bias', 'mean', 'variance'],
outputs=['y'])
return ([node], [x, scale, bias, mean, var], [out])
@onnx_test
def batch_norm_2d_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3, 4, 4])
scale = helper.make_tensor_value_info('scale', TensorProto.FLOAT, [3])
bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [3])
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT, [3])
var = helper.make_tensor_value_info('variance', TensorProto.FLOAT, [3])
out = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3, 4, 4])
node = onnx.helper.make_node(
'BatchNormalization',
inputs=['x', 'scale', 'bias', 'mean', 'variance'],
outputs=['y'])
return ([node], [x, scale, bias, mean, var], [out])
@onnx_test
def batch_norm_3d_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16,
[2, 2, 2, 2, 2])
scale = helper.make_tensor_value_info('scale', TensorProto.FLOAT16, [2])
bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT16, [2])
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT16, [2])
var = helper.make_tensor_value_info('variance', TensorProto.FLOAT16, [2])
out = helper.make_tensor_value_info('y', TensorProto.FLOAT16,
[2, 2, 2, 2, 2])
node = onnx.helper.make_node(
'BatchNormalization',
inputs=['x', 'scale', 'bias', 'mean', 'variance'],
outputs=['y'],
epsilon=1e-6)
return ([node], [x, scale, bias, mean, var], [out])
@onnx_test
def batch_norm_invalid_rank_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [8, 8])
scale = helper.make_tensor_value_info('scale', TensorProto.FLOAT, [8])
bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [8])
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT, [8])
var = helper.make_tensor_value_info('variance', TensorProto.FLOAT, [8])
out = helper.make_tensor_value_info('y', TensorProto.FLOAT, [8, 8])
node = onnx.helper.make_node(
'BatchNormalization',
inputs=['x', 'scale', 'bias', 'mean', 'variance'],
outputs=['y'])
return ([node], [x, scale, bias, mean, var], [out])
@onnx_test
def batch_norm_invalid_bias_rank_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3, 4, 4])
scale = helper.make_tensor_value_info('scale', TensorProto.FLOAT, [3])
bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [3, 1])
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT, [3])
var = helper.make_tensor_value_info('variance', TensorProto.FLOAT, [3])
out = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3, 4, 4])
node = onnx.helper.make_node(
'BatchNormalization',
inputs=['x', 'scale', 'bias', 'mean', 'variance'],
outputs=['y'])
return ([node], [x, scale, bias, mean, var], [out])
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment