Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
6598252e
Unverified
Commit
6598252e
authored
Jul 26, 2018
by
Paul Fultz II
Committed by
GitHub
Jul 26, 2018
Browse files
Merge pull request #24 from ROCmSoftwarePlatform/fix-bn-cpu
Fix bn cpu
parents
68d69739
58253f85
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
53 additions
and
22 deletions
+53
-22
.gitignore
.gitignore
+1
-0
src/targets/cpu/cpu_lowering.cpp
src/targets/cpu/cpu_lowering.cpp
+21
-12
test/cpu_ops_test.cpp
test/cpu_ops_test.cpp
+31
-10
No files found.
.gitignore
0 → 100644
View file @
6598252e
*.swp
src/targets/cpu/cpu_lowering.cpp
View file @
6598252e
...
@@ -24,11 +24,11 @@ T zero(const T&)
...
@@ -24,11 +24,11 @@ T zero(const T&)
// args[1] -> mini batch mean
// args[1] -> mini batch mean
// args[2] -> mini batch variance
// args[2] -> mini batch variance
// args[3] -> gamma
// args[3] -> gamma
// args[4] -> b
eta
// args[4] -> b
ias
//
//
// The equation to compute batch norm for inference is:
// The equation to compute batch norm for inference is:
//
//
// output[i] = b
eta
+ gamma * (input[i] + mean) / sqrt(variance + epsilon)
// output[i] = b
ias
+ gamma * (input[i] + mean) / sqrt(variance + epsilon)
//
//
// the input data format should be nchw
// the input data format should be nchw
//
//
...
@@ -46,17 +46,26 @@ struct cpu_batch_norm_inference
...
@@ -46,17 +46,26 @@ struct cpu_batch_norm_inference
double
epsilon
=
op
.
epsilon
;
double
epsilon
=
op
.
epsilon
;
auto
input
=
args
[
0
];
auto
input
=
args
[
0
];
auto
mini_batch_mean
=
args
[
1
].
at
<
float
>
();
auto
mini_batch_mean
=
args
[
1
];
auto
mini_batch_variance
=
args
[
2
].
at
<
float
>
();
auto
mini_batch_variance
=
args
[
2
];
auto
gamma
=
args
[
3
].
at
<
float
>
();
auto
arg_gamma
=
args
[
3
];
auto
beta
=
args
[
4
].
at
<
float
>
();
auto
arg_bias
=
args
[
4
];
visit_all
(
output
,
input
)([
&
](
auto
result
,
auto
buffer
)
{
auto
num_batch
=
output_shape
.
lens
()[
0
];
std
::
transform
(
buffer
.
begin
(),
buffer
.
end
(),
result
.
begin
(),
[
&
](
auto
x
)
{
auto
num_channels
=
output_shape
.
lens
()[
1
];
return
gamma
*
(
x
-
mini_batch_mean
)
/
std
::
sqrt
(
mini_batch_variance
+
epsilon
)
+
auto
image_height
=
output_shape
.
lens
()[
2
];
beta
;
auto
image_width
=
output_shape
.
lens
()[
3
];
visit_all
(
output
,
input
,
mini_batch_mean
,
mini_batch_variance
,
arg_gamma
,
arg_bias
)(
[
&
](
auto
result
,
auto
buffer
,
auto
mean
,
auto
variance
,
auto
gamma
,
auto
bias
)
{
dfor
(
num_batch
,
num_channels
,
image_height
,
image_width
)(
[
&
](
std
::
size_t
n
,
std
::
size_t
c
,
std
::
size_t
h
,
std
::
size_t
w
)
{
result
(
n
,
c
,
h
,
w
)
=
gamma
(
c
)
*
(
buffer
(
n
,
c
,
h
,
w
)
-
mean
(
c
))
/
std
::
sqrt
(
variance
(
c
)
+
epsilon
)
+
bias
(
c
);
});
});
});
});
return
output
;
return
output
;
}
}
...
...
test/cpu_ops_test.cpp
View file @
6598252e
...
@@ -9,19 +9,40 @@
...
@@ -9,19 +9,40 @@
void
batch_norm_inference_test
()
void
batch_norm_inference_test
()
{
{
migraph
::
program
p
;
migraph
::
program
p
;
migraph
::
shape
s
{
migraph
::
shape
::
float_type
,
{
4
}};
const
size_t
width
=
2
,
height
=
2
,
channels
=
4
,
batches
=
2
;
auto
x
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
1
,
2
,
3
,
4
}});
const
float
x_val
=
8.0
f
,
mean_val
=
2.0
f
,
variance_val
=
4.0
f
,
scale_val
=
2.0
f
,
auto
gamma
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
1
}});
bias_val
=
1.0
f
;
auto
beta
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
0
}});
const
float
output_val
=
scale_val
*
(
x_val
-
mean_val
)
/
(
std
::
sqrt
(
variance_val
))
+
bias_val
;
auto
mean
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
0
}});
auto
variance
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
1
}});
migraph
::
shape
s
{
migraph
::
shape
::
float_type
,
{
batches
,
channels
,
height
,
width
}};
p
.
add_instruction
(
migraph
::
batch_norm_inference
{},
x
,
mean
,
variance
,
gamma
,
beta
);
migraph
::
shape
vars
{
migraph
::
shape
::
float_type
,
{
channels
}};
std
::
vector
<
float
>
x_data
(
width
*
height
*
channels
*
batches
);
std
::
vector
<
float
>
scale_data
(
channels
);
std
::
vector
<
float
>
bias_data
(
channels
);
std
::
vector
<
float
>
mean_data
(
channels
);
std
::
vector
<
float
>
variance_data
(
channels
);
std
::
fill
(
x_data
.
begin
(),
x_data
.
end
(),
x_val
);
std
::
fill
(
mean_data
.
begin
(),
mean_data
.
end
(),
mean_val
);
std
::
fill
(
variance_data
.
begin
(),
variance_data
.
end
(),
variance_val
);
std
::
fill
(
scale_data
.
begin
(),
scale_data
.
end
(),
scale_val
);
std
::
fill
(
bias_data
.
begin
(),
bias_data
.
end
(),
bias_val
);
auto
x
=
p
.
add_literal
(
migraph
::
literal
{
s
,
x_data
});
auto
scale
=
p
.
add_literal
(
migraph
::
literal
{
vars
,
scale_data
});
auto
bias
=
p
.
add_literal
(
migraph
::
literal
{
vars
,
bias_data
});
auto
mean
=
p
.
add_literal
(
migraph
::
literal
{
vars
,
mean_data
});
auto
variance
=
p
.
add_literal
(
migraph
::
literal
{
vars
,
variance_data
});
p
.
add_instruction
(
migraph
::
batch_norm_inference
{},
x
,
mean
,
variance
,
scale
,
bias
);
p
.
compile
(
migraph
::
cpu
::
cpu_target
{});
p
.
compile
(
migraph
::
cpu
::
cpu_target
{});
auto
result
=
p
.
eval
({});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
result_vector
(
4
);
std
::
vector
<
float
>
result_vector
(
width
*
height
*
channels
*
batches
);
std
::
vector
<
float
>
gold
(
width
*
height
*
channels
*
batches
);
std
::
fill
(
gold
.
begin
(),
gold
.
end
(),
output_val
);
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
=
{
1
/
(
1
+
1.0e-6
),
2
/
(
1
+
1.0e-6
),
3
/
(
1
+
1.0e-6
),
4
/
(
1
+
1.0e-6
)};
EXPECT
(
test
::
verify_range
(
result_vector
,
gold
));
EXPECT
(
test
::
verify_range
(
result_vector
,
gold
));
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment