"vscode:/vscode.git/clone" did not exist on "238922e98dd0e8254b5c0921b241a1f5a151782f"
Commit 12ff54a7 authored by kahmed10's avatar kahmed10 Committed by mvermeulen
Browse files

Instance norm op (#428)



* fix pad calc

* formatting

* initial testing

* formatting

* add tests

* formatting

* add verify onnx test

* formatting

* python3 test

* fix test case and impl bug

* fix test case and impl bug

* fix test case and impl bug

* formatting

* change parse_test

* formatting

* fix syntax error
Co-authored-by: default avatarmvermeulen <5479696+mvermeulen@users.noreply.github.com>
parent e320f89f
...@@ -35,8 +35,11 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --allow- ...@@ -35,8 +35,11 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-
libpthread-stubs0-dev \ libpthread-stubs0-dev \
libssl-dev \ libssl-dev \
python \ python \
python3 \
python-dev \ python-dev \
python3-dev \
python-pip \ python-pip \
python3-pip \
rocm-device-libs \ rocm-device-libs \
rocm-opencl \ rocm-opencl \
rocm-opencl-dev \ rocm-opencl-dev \
...@@ -53,7 +56,7 @@ RUN pip install cget ...@@ -53,7 +56,7 @@ RUN pip install cget
RUN pip install https://github.com/pfultz2/rclone/archive/master.tar.gz RUN pip install https://github.com/pfultz2/rclone/archive/master.tar.gz
# Install yapf # Install yapf
RUN pip install yapf==0.28.0 RUN pip3 install yapf==0.28.0
# Install hcc # Install hcc
RUN rclone -b roc-2.6.x -c 0f4c96b7851af2663a7f3ac16ecfb76c7c78a5bf https://github.com/RadeonOpenCompute/hcc.git /hcc RUN rclone -b roc-2.6.x -c 0f4c96b7851af2663a7f3ac16ecfb76c7c78a5bf https://github.com/RadeonOpenCompute/hcc.git /hcc
......
...@@ -91,6 +91,7 @@ struct onnx_parser ...@@ -91,6 +91,7 @@ struct onnx_parser
add_mem_op("Gemm", &onnx_parser::parse_gemm); add_mem_op("Gemm", &onnx_parser::parse_gemm);
add_mem_op("MatMul", &onnx_parser::parse_matmul); add_mem_op("MatMul", &onnx_parser::parse_matmul);
add_mem_op("BatchNormalization", &onnx_parser::parse_batchnorm); add_mem_op("BatchNormalization", &onnx_parser::parse_batchnorm);
add_mem_op("InstanceNormalization", &onnx_parser::parse_instancenorm);
add_mem_op("Softmax", &onnx_parser::parse_softmax<op::softmax>); add_mem_op("Softmax", &onnx_parser::parse_softmax<op::softmax>);
add_mem_op("LogSoftmax", &onnx_parser::parse_softmax<op::logsoftmax>); add_mem_op("LogSoftmax", &onnx_parser::parse_softmax<op::logsoftmax>);
add_mem_op("Squeeze", &onnx_parser::parse_squeeze); add_mem_op("Squeeze", &onnx_parser::parse_squeeze);
...@@ -795,6 +796,42 @@ struct onnx_parser ...@@ -795,6 +796,42 @@ struct onnx_parser
return prog.add_instruction(op, std::move(args)); return prog.add_instruction(op, std::move(args));
} }
instruction_ref parse_instancenorm(const std::string&,
attribute_map attributes,
std::vector<instruction_ref> args)
{
// y = scale * ( x - mean ) / sqrt ( variance + epsilon ) + bias
// mean = reduce_mean({H, W}, x)
// variance = reduce_mean({H, W}, (x - mean)^2)
float epsilon = 1e-5f;
if(contains(attributes, "epsilon"))
{
epsilon = parse_value(attributes.at("epsilon")).at<float>();
}
auto x = args[0];
auto scale = args[1];
auto bias = args[2];
auto dims = x->get_shape().lens();
auto mean = prog.add_instruction(op::reduce_mean{{2, 3}}, x);
auto mean_bcast = prog.add_instruction(op::multibroadcast{dims}, mean);
auto l0 = prog.add_instruction(op::sqdiff{}, x, mean_bcast);
auto variance = prog.add_instruction(op::reduce_mean{{2, 3}}, l0);
auto l1 = prog.add_instruction(op::sub{}, x, mean_bcast);
auto epsilon_literal = prog.add_literal(epsilon);
auto epsilon_bcast = prog.add_instruction(op::multibroadcast{dims}, epsilon_literal);
auto variance_bcast = prog.add_instruction(op::multibroadcast{dims}, variance);
auto l2 = prog.add_instruction(op::add{}, variance_bcast, epsilon_bcast);
auto l3 = prog.add_instruction(op::rsqrt{}, l2);
auto l4 = prog.add_instruction(op::mul{}, l1, l3);
auto scale_bcast = prog.add_instruction(op::broadcast{1, dims}, scale);
;
auto bias_bcast = prog.add_instruction(op::broadcast{1, dims}, bias);
auto l5 = prog.add_instruction(op::mul{}, l4, scale_bcast);
return prog.add_instruction(op::add{}, l5, bias_bcast);
}
instruction_ref parse_leaky_relu(const std::string&, instruction_ref parse_leaky_relu(const std::string&,
attribute_map attributes, attribute_map attributes,
std::vector<instruction_ref> args) std::vector<instruction_ref> args)
......
...@@ -902,6 +902,50 @@ def initializer_not_an_input(): ...@@ -902,6 +902,50 @@ def initializer_not_an_input():
return ([node], [x], [y], [w]) return ([node], [x], [y], [w])
@onnx_test
def instance_norm_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 2, 3, 3])
scale = helper.make_tensor_value_info('1', TensorProto.FLOAT, [2])
bias = helper.make_tensor_value_info('2', TensorProto.FLOAT, [2])
y = helper.make_tensor_value_info('3', TensorProto.FLOAT, [1, 2, 3, 3])
node = onnx.helper.make_node('InstanceNormalization',
inputs=['0', '1', '2'],
outputs=['3'])
return ([node], [x, scale, bias], [y])
@onnx_test
def instance_norm_val_test():
x = np.array([[[[0, 1, 2], [3, 4, 5], [6, 7, 8]],
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]]])
scale = np.array([1, 2])
bias = np.array([0, 1])
x_tensor = helper.make_tensor(name='x_tensor',
data_type=TensorProto.FLOAT,
dims=x.shape,
vals=x.flatten().astype(np.float))
scale_tensor = helper.make_tensor(name='scale_tensor',
data_type=TensorProto.FLOAT,
dims=scale.shape,
vals=scale.flatten().astype(np.float))
bias_tensor = helper.make_tensor(name='bias_tensor',
data_type=TensorProto.FLOAT,
dims=bias.shape,
vals=bias.flatten().astype(np.float))
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 3, 3])
node = onnx.helper.make_node(
'InstanceNormalization',
inputs=['x_tensor', 'scale_tensor', 'bias_tensor'],
outputs=['y'])
return ([node], [], [y], [x_tensor, scale_tensor, bias_tensor])
@onnx_test @onnx_test
def leaky_relu_test(): def leaky_relu_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
......
instance_norm_test:
#
0
1
23"InstanceNormalizationinstance_norm_testZ
0




Z
1

Z
2

b
3




B
...@@ -659,6 +659,38 @@ TEST_CASE(initializer_not_an_input) ...@@ -659,6 +659,38 @@ TEST_CASE(initializer_not_an_input)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(instance_norm_test)
{
std::vector<size_t> dims{1, 2, 3, 3};
migraphx::shape s1{migraphx::shape::float_type, dims};
migraphx::shape s2{migraphx::shape::float_type, {2}};
migraphx::program p;
auto x = p.add_parameter("0", s1);
auto scale = p.add_parameter("1", s2);
auto bias = p.add_parameter("2", s2);
auto mean = p.add_instruction(migraphx::op::reduce_mean{{2, 3}}, x);
auto mean_bcast = p.add_instruction(migraphx::op::multibroadcast{dims}, mean);
auto l0 = p.add_instruction(migraphx::op::sqdiff{}, x, mean_bcast);
auto variance = p.add_instruction(migraphx::op::reduce_mean{{2, 3}}, l0);
auto l1 = p.add_instruction(migraphx::op::sub{}, x, mean_bcast);
auto epsilon_literal = p.add_literal(1e-5f);
auto epsilon_bcast = p.add_instruction(migraphx::op::multibroadcast{dims}, epsilon_literal);
auto variance_bcast = p.add_instruction(migraphx::op::multibroadcast{dims}, variance);
auto l2 = p.add_instruction(migraphx::op::add{}, variance_bcast, epsilon_bcast);
auto l3 = p.add_instruction(migraphx::op::rsqrt{}, l2);
auto l4 = p.add_instruction(migraphx::op::mul{}, l1, l3);
auto scale_bcast = p.add_instruction(migraphx::op::broadcast{1, dims}, scale);
auto bias_bcast = p.add_instruction(migraphx::op::broadcast{1, dims}, bias);
auto l5 = p.add_instruction(migraphx::op::mul{}, l4, scale_bcast);
p.add_instruction(migraphx::op::add{}, l5, bias_bcast);
auto prog = optimize_onnx("instance_norm_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(leaky_relu_test) TEST_CASE(leaky_relu_test)
{ {
migraphx::program p; migraphx::program p;
......
#include <iostream>
#include <vector>
#include <migraphx/literal.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/program.hpp>
#include <migraphx/cpu/target.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/verify.hpp>
#include <migraphx/onnx.hpp>
#include "test.hpp"
TEST_CASE(instance_norm_test)
{
migraphx::program p = migraphx::parse_onnx("instance_norm_val_test.onnx");
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> result_vector(9);
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {-1.54919,
-1.16189,
-0.774596,
-0.387298,
0,
0.387298,
0.774596,
1.16189,
1.54919,
-2.09838,
-1.32379,
-0.549192,
0.225404,
1,
1.7746,
2.54919,
3.32379,
4.09838};
EXPECT(migraphx::verify_range(result_vector, gold));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -379,7 +379,7 @@ def template_eval(template, **kwargs): ...@@ -379,7 +379,7 @@ def template_eval(template, **kwargs):
escaped = (re.escape(start), re.escape(end)) escaped = (re.escape(start), re.escape(end))
mark = re.compile('%s(.*?)%s' % escaped, re.DOTALL) mark = re.compile('%s(.*?)%s' % escaped, re.DOTALL)
for key in kwargs: for key in kwargs:
exec ('%s = %s' % (key, kwargs[key])) exec('%s = %s' % (key, kwargs[key]))
for item in mark.findall(template): for item in mark.findall(template):
template = template.replace(start + item + end, template = template.replace(start + item + end,
str(eval(item.strip()))) str(eval(item.strip())))
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment