Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
41d4e92b
Commit
41d4e92b
authored
Oct 14, 2022
by
Khalique Ahmed
Browse files
Merge branch 'develop' of
https://github.com/ROCmSoftwarePlatform/AMDMIGraphX
into develop
parents
a8eb886b
32f6388c
Changes
51
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
255 additions
and
921 deletions
+255
-921
src/targets/gpu/include/migraphx/gpu/batch_norm_inference.hpp
...targets/gpu/include/migraphx/gpu/batch_norm_inference.hpp
+0
-61
src/targets/gpu/lowering.cpp
src/targets/gpu/lowering.cpp
+0
-39
src/targets/gpu/target.cpp
src/targets/gpu/target.cpp
+0
-3
src/targets/ref/lowering.cpp
src/targets/ref/lowering.cpp
+8
-116
src/tf/parse_batchnorm.cpp
src/tf/parse_batchnorm.cpp
+27
-5
src/tf/parse_conv.cpp
src/tf/parse_conv.cpp
+0
-5
src/tf/parse_depthwiseconv.cpp
src/tf/parse_depthwiseconv.cpp
+0
-5
test/gpu/mlir.cpp
test/gpu/mlir.cpp
+2
-2
test/onnx/onnx_test.cpp
test/onnx/onnx_test.cpp
+8
-15
test/op_shape_test.cpp
test/op_shape_test.cpp
+3
-16
test/ref_ops_test.cpp
test/ref_ops_test.cpp
+81
-206
test/rewrite_batchnorm_test.cpp
test/rewrite_batchnorm_test.cpp
+0
-340
test/tf/batchnorm_half_test.pb
test/tf/batchnorm_half_test.pb
+0
-0
test/tf/batchnorm_test.pb
test/tf/batchnorm_test.pb
+0
-0
test/tf/batchnormv3_test.pb
test/tf/batchnormv3_test.pb
+0
-0
test/tf/gen_tf_pb.py
test/tf/gen_tf_pb.py
+40
-12
test/tf/tf_test.cpp
test/tf/tf_test.cpp
+84
-42
test/verify/quant_conv_default_mode.cpp
test/verify/quant_conv_default_mode.cpp
+1
-4
test/verify/quant_conv_int8x4_default.cpp
test/verify/quant_conv_int8x4_default.cpp
+1
-4
test/verify/quant_conv_valid_mode.cpp
test/verify/quant_conv_valid_mode.cpp
+0
-46
No files found.
src/targets/gpu/include/migraphx/gpu/batch_norm_inference.hpp
deleted
100644 → 0
View file @
a8eb886b
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_RTGLIB_BATCHNORM_HPP
#define MIGRAPHX_GUARD_RTGLIB_BATCHNORM_HPP
#include <migraphx/argument.hpp>
#include <migraphx/op/batch_norm_inference.hpp>
#include <migraphx/reflect.hpp>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
struct
context
;
struct
miopen_batch_norm_inference
{
op
::
batch_norm_inference
op
;
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
migraphx
::
reflect
(
self
.
op
,
f
);
}
std
::
string
name
()
const
{
return
"gpu::batch_norm_inference"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
std
::
ptrdiff_t
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
};
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
#endif
src/targets/gpu/lowering.cpp
View file @
41d4e92b
...
@@ -37,7 +37,6 @@
...
@@ -37,7 +37,6 @@
#include <migraphx/op/quant_convolution.hpp>
#include <migraphx/op/quant_convolution.hpp>
#include <migraphx/op/quant_dot.hpp>
#include <migraphx/op/quant_dot.hpp>
#include <migraphx/gpu/batch_norm_inference.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/convolution.hpp>
#include <migraphx/gpu/convolution.hpp>
#include <migraphx/gpu/deconvolution.hpp>
#include <migraphx/gpu/deconvolution.hpp>
...
@@ -115,7 +114,6 @@ struct miopen_apply
...
@@ -115,7 +114,6 @@ struct miopen_apply
add_extend_op
(
"scatter_none"
);
add_extend_op
(
"scatter_none"
);
add_extend_op
(
"topk"
);
add_extend_op
(
"topk"
);
add_batch_norm_inference_op
();
add_convolution_op
();
add_convolution_op
();
add_deconvolution_op
();
add_deconvolution_op
();
add_gemm_op
<
op
::
dot
>
(
"dot"
);
add_gemm_op
<
op
::
dot
>
(
"dot"
);
...
@@ -336,43 +334,6 @@ struct miopen_apply
...
@@ -336,43 +334,6 @@ struct miopen_apply
});
});
}
}
void
add_batch_norm_inference_op
()
{
apply_map
.
emplace
(
"batch_norm_inference"
,
[
=
](
instruction_ref
ins
)
{
auto
&&
op
=
any_cast
<
op
::
batch_norm_inference
>
(
ins
->
get_operator
());
auto
output
=
insert_allocation
(
ins
,
ins
->
get_shape
());
shape
old_shape
=
ins
->
inputs
().
at
(
1
)
->
get_shape
();
auto
input
=
ins
->
inputs
()[
0
];
auto
input_lens
=
input
->
get_shape
().
lens
();
std
::
vector
<
int64_t
>
rsp_lens
(
input_lens
.
size
(),
1
);
// for per_activation case, also need to reshape input
if
(
op
.
bn_mode
==
op
::
batch_norm_inference
::
per_activation
)
{
std
::
copy
(
input_lens
.
begin
()
+
1
,
input_lens
.
end
(),
rsp_lens
.
begin
()
+
1
);
}
else
{
rsp_lens
[
1
]
=
static_cast
<
int64_t
>
(
old_shape
.
elements
());
}
auto
reshape_op
=
op
::
reshape
{
rsp_lens
};
std
::
vector
<
instruction_ref
>
reshapes
;
std
::
transform
(
ins
->
inputs
().
begin
()
+
1
,
ins
->
inputs
().
end
(),
std
::
back_inserter
(
reshapes
),
[
&
](
auto
i
)
{
return
mod
->
insert_instruction
(
ins
,
reshape_op
,
i
);
});
return
mod
->
replace_instruction
(
ins
,
miopen_batch_norm_inference
{
op
},
input
,
reshapes
[
0
],
reshapes
[
1
],
reshapes
[
2
],
reshapes
[
3
],
output
);
});
}
// use 0 - input to represent neg
// use 0 - input to represent neg
void
add_neg_op
()
void
add_neg_op
()
{
{
...
...
src/targets/gpu/target.cpp
View file @
41d4e92b
...
@@ -41,7 +41,6 @@
...
@@ -41,7 +41,6 @@
#include <migraphx/propagate_constant.hpp>
#include <migraphx/propagate_constant.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/replace_allocate.hpp>
#include <migraphx/replace_allocate.hpp>
#include <migraphx/rewrite_batchnorm.hpp>
#include <migraphx/rewrite_gelu.hpp>
#include <migraphx/rewrite_gelu.hpp>
#include <migraphx/rewrite_pooling.hpp>
#include <migraphx/rewrite_pooling.hpp>
#include <migraphx/rewrite_quantization.hpp>
#include <migraphx/rewrite_quantization.hpp>
...
@@ -110,8 +109,6 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti
...
@@ -110,8 +109,6 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti
dead_code_elimination
{},
dead_code_elimination
{},
insert_pad
{},
insert_pad
{},
dead_code_elimination
{},
dead_code_elimination
{},
rewrite_batchnorm
{},
dead_code_elimination
{},
rewrite_rnn
{},
rewrite_rnn
{},
dead_code_elimination
{},
dead_code_elimination
{},
inline_module
{},
inline_module
{},
...
...
src/targets/ref/lowering.cpp
View file @
41d4e92b
...
@@ -26,7 +26,6 @@
...
@@ -26,7 +26,6 @@
#include <migraphx/instruction.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/op/identity.hpp>
#include <migraphx/op/identity.hpp>
#include <migraphx/op/batch_norm_inference.hpp>
#include <migraphx/op/convolution.hpp>
#include <migraphx/op/convolution.hpp>
#include <migraphx/op/deconvolution.hpp>
#include <migraphx/op/deconvolution.hpp>
#include <migraphx/op/quant_convolution.hpp>
#include <migraphx/op/quant_convolution.hpp>
...
@@ -75,84 +74,6 @@ typename std::conditional_t<std::is_integral<T>{}, std::make_signed<T>, std::ena
...
@@ -75,84 +74,6 @@ typename std::conditional_t<std::is_integral<T>{}, std::make_signed<T>, std::ena
return
x
;
return
x
;
}
}
//
// ref implemenataion of batch norm for inference
//
// inputs are:
// args[0] -> input data buffer
// args[1] -> mini batch mean
// args[2] -> mini batch variance
// args[3] -> gamma
// args[4] -> bias
//
// The equation to compute batch norm for inference is:
//
// output[i] = bias + gamma * (input[i] + mean) / sqrt(variance + epsilon)
//
// the input data format should be nchw
//
struct
ref_batch_norm_inference
{
op
::
batch_norm_inference
op
;
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
migraphx
::
reflect
(
self
.
op
,
f
);
}
std
::
string
name
()
const
{
return
"ref::batch_norm_inference"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
return
op
.
compute_shape
(
inputs
);
}
argument
compute
(
context
&
,
const
shape
&
output_shape
,
std
::
vector
<
argument
>
args
)
const
{
argument
output
{
output_shape
};
double
epsilon
=
op
.
epsilon
;
auto
input
=
args
[
0
];
auto
arg_gamma
=
args
[
1
];
auto
arg_bias
=
args
[
2
];
auto
mini_batch_mean
=
args
[
3
];
auto
mini_batch_variance
=
args
[
4
];
if
(
op
.
bn_mode
==
op
::
batch_norm_inference
::
spatial
)
{
visit_all
(
output
,
input
,
mini_batch_mean
,
mini_batch_variance
,
arg_gamma
,
arg_bias
)(
[
&
](
auto
result
,
auto
buffer
,
auto
mean
,
auto
variance
,
auto
gamma
,
auto
bias
)
{
par_for
(
output_shape
.
elements
(),
[
&
](
auto
i
)
{
auto
idx
=
output_shape
.
multi
(
i
);
auto
c
=
idx
[
1
];
assert
((
variance
[
c
]
+
epsilon
)
>
0
);
result
[
i
]
=
gamma
[
c
]
*
(
buffer
[
i
]
-
mean
[
c
])
/
std
::
sqrt
(
variance
[
c
]
+
epsilon
)
+
bias
[
c
];
});
});
}
if
(
op
.
bn_mode
==
op
::
batch_norm_inference
::
per_activation
)
{
visit_all
(
output
,
input
,
mini_batch_mean
,
mini_batch_variance
,
arg_gamma
,
arg_bias
)(
[
&
](
auto
result
,
auto
buffer
,
auto
mean
,
auto
variance
,
auto
gamma
,
auto
bias
)
{
par_for
(
output_shape
.
elements
(),
[
&
](
auto
i
)
{
auto
idx
=
output_shape
.
multi
(
i
);
idx
[
0
]
=
0
;
auto
index
=
output_shape
.
index
(
idx
);
assert
((
variance
[
index
]
+
epsilon
)
>
0
);
result
[
i
]
=
gamma
[
index
]
*
(
buffer
[
i
]
-
mean
[
index
])
/
std
::
sqrt
(
variance
[
index
]
+
epsilon
)
+
bias
[
index
];
});
});
}
return
output
;
}
};
MIGRAPHX_REGISTER_OP
(
ref_batch_norm_inference
)
struct
ref_lrn
struct
ref_lrn
{
{
op
::
lrn
op
;
op
::
lrn
op
;
...
@@ -237,15 +158,16 @@ struct ref_convolution : auto_register_op<ref_convolution<Op>>
...
@@ -237,15 +158,16 @@ struct ref_convolution : auto_register_op<ref_convolution<Op>>
argument
compute
(
context
&
,
shape
output_shape
,
std
::
vector
<
argument
>
args
)
const
argument
compute
(
context
&
,
shape
output_shape
,
std
::
vector
<
argument
>
args
)
const
{
{
std
::
vector
<
std
::
size_t
>
padding
;
std
::
vector
<
std
::
size_t
>
padding
;
if
(
op
.
use_dynamic_same_auto_pad
)
if
(
op
.
padding_mode
!=
op
::
padding_mode_t
::
default_
)
{
{
auto
input_lens
=
args
[
0
].
get_shape
().
lens
();
auto
input_lens
=
args
[
0
].
get_shape
().
lens
();
std
::
vector
<
std
::
size_t
>
img_lens
{
input_lens
.
begin
()
+
2
,
input_lens
.
end
()};
auto
weights_lens
=
args
[
1
].
get_shape
().
lens
();
auto
weights_lens
=
args
[
1
].
get_shape
().
lens
();
std
::
vector
<
std
::
size_t
>
k_lens
{
weights_lens
.
begin
()
+
2
,
weights_lens
.
end
()};
padding
=
padding
=
calc_dyn_auto_pad
(
img_lens
,
k_lens
,
op
.
stride
,
op
.
dilation
);
op
.
padding_mode
==
op
::
same_upper
output_shape
=
?
calc_dyn_auto_pad
(
input_lens
,
weights_lens
,
op
.
stride
,
op
.
dilation
,
true
)
compute_padded_shape
({
args
.
at
(
0
).
get_shape
(),
args
.
at
(
1
).
get_shape
()},
padding
);
:
calc_dyn_auto_pad
(
input_lens
,
weights_lens
,
op
.
stride
,
op
.
dilation
,
false
);
output_shape
=
compute_padded_shape
(
args
[
0
].
get_shape
(),
args
[
1
].
get_shape
(),
padding
,
op
.
stride
,
op
.
dilation
);
}
}
else
else
{
{
...
@@ -313,34 +235,6 @@ struct ref_convolution : auto_register_op<ref_convolution<Op>>
...
@@ -313,34 +235,6 @@ struct ref_convolution : auto_register_op<ref_convolution<Op>>
});
});
return
result
;
return
result
;
}
}
private:
/*!
* Used for dynamic auto padding since padding needs to be computed at evaulation time.
* \param inputs two fixed shape inputs [input_tensor, weights]
* \param padding from auto_pad calculation
*/
shape
compute_padded_shape
(
const
std
::
vector
<
shape
>&
inputs
,
const
std
::
vector
<
std
::
size_t
>&
padding
)
const
{
const
shape
&
input
=
inputs
.
at
(
0
);
const
shape
&
weights
=
inputs
.
at
(
1
);
const
size_t
num_spatial_dims
=
input
.
lens
().
size
()
-
2
;
std
::
vector
<
size_t
>
output_lens
{
input
.
lens
()[
0
],
weights
.
lens
()[
0
]};
// calculate the output shape of the convolution: ((W - K + 2P) / S) + 1
for
(
size_t
i
=
0
;
i
<
num_spatial_dims
;
i
++
)
{
auto
padding_factor
=
padding
[
i
]
+
padding
[
i
+
num_spatial_dims
];
output_lens
.
push_back
(
std
::
size_t
(
std
::
max
<
std
::
ptrdiff_t
>
(
1
,
(
input
.
lens
()[
i
+
2
]
-
(
1
+
op
.
dilation
[
i
]
*
(
weights
.
lens
()[
i
+
2
]
-
1
))
+
padding_factor
)
/
op
.
stride
[
i
]
+
1
)));
}
return
inputs
[
0
].
with_lens
(
output_lens
);
}
};
};
struct
ref_im2col
struct
ref_im2col
...
@@ -731,8 +625,6 @@ struct ref_apply
...
@@ -731,8 +625,6 @@ struct ref_apply
void
init
()
void
init
()
{
{
apply_map
[
"batch_norm_inference"
]
=
extend_op
<
ref_batch_norm_inference
,
op
::
batch_norm_inference
>
();
apply_map
[
"convolution"
]
=
extend_op
<
ref_convolution
<
op
::
convolution
>
,
op
::
convolution
>
();
apply_map
[
"convolution"
]
=
extend_op
<
ref_convolution
<
op
::
convolution
>
,
op
::
convolution
>
();
apply_map
[
"dot"
]
=
extend_op
<
ref_gemm
,
op
::
dot
>
();
apply_map
[
"dot"
]
=
extend_op
<
ref_gemm
,
op
::
dot
>
();
apply_map
[
"quant_dot"
]
=
extend_op
<
ref_quant_gemm
,
op
::
quant_dot
>
();
apply_map
[
"quant_dot"
]
=
extend_op
<
ref_quant_gemm
,
op
::
quant_dot
>
();
...
...
src/tf/parse_batchnorm.cpp
View file @
41d4e92b
...
@@ -23,6 +23,7 @@
...
@@ -23,6 +23,7 @@
*/
*/
#include <migraphx/tf/op_parser.hpp>
#include <migraphx/tf/op_parser.hpp>
#include <migraphx/tf/tf_parser.hpp>
#include <migraphx/tf/tf_parser.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/make_op.hpp>
...
@@ -38,16 +39,37 @@ struct parse_batchnorm : op_parser<parse_batchnorm>
...
@@ -38,16 +39,37 @@ struct parse_batchnorm : op_parser<parse_batchnorm>
instruction_ref
parse
(
const
op_desc
&
/*opd*/
,
instruction_ref
parse
(
const
op_desc
&
/*opd*/
,
const
tf_parser
&
/*parser*/
,
const
tf_parser
&
/*parser*/
,
tf_parser
::
node_info
info
,
tf_parser
::
node_info
info
,
const
std
::
vector
<
instruction_ref
>
&
args
)
const
std
::
vector
<
instruction_ref
>
args
)
const
{
{
float
epsilon
=
1e-5
f
;
// different default epsilon than from ONNX
float
momentum
=
0.9
f
;
float
epsilon
=
1e-4
f
;
if
(
contains
(
info
.
attributes
,
"epsilon"
))
if
(
contains
(
info
.
attributes
,
"epsilon"
))
{
{
epsilon
=
info
.
attributes
.
at
(
"epsilon"
).
f
();
epsilon
=
info
.
attributes
.
at
(
"epsilon"
).
f
();
}
}
auto
op
=
make_op
(
"batch_norm_inference"
,
{{
"epsilon"
,
epsilon
},
{
"momentum"
,
momentum
}});
return
info
.
add_instruction
(
op
,
args
);
auto
x_lens
=
args
[
0
]
->
get_shape
().
lens
();
auto
x_type
=
args
[
0
]
->
get_shape
().
type
();
// unsqueeze tensors of shape (C) to broadcast correctly
auto
rt
=
info
.
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
{
x_type
},
{
0.5
}});
auto
eps
=
info
.
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
{
x_type
},
{
epsilon
}});
auto
scale_unsqueeze
=
info
.
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
args
[
1
]);
auto
bias_unsqueeze
=
info
.
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
args
[
2
]);
auto
mean_unsqueeze
=
info
.
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
args
[
3
]);
auto
var_unsqueeze
=
info
.
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
args
[
4
]);
auto
numer
=
info
.
add_broadcastable_binary_op
(
"sub"
,
args
[
0
],
mean_unsqueeze
);
auto
var_eps
=
info
.
add_broadcastable_binary_op
(
"add"
,
var_unsqueeze
,
eps
);
auto
denom
=
info
.
add_broadcastable_binary_op
(
"pow"
,
var_eps
,
rt
);
auto
div0
=
info
.
add_broadcastable_binary_op
(
"div"
,
numer
,
denom
);
auto
r0
=
info
.
add_broadcastable_binary_op
(
"mul"
,
div0
,
scale_unsqueeze
);
return
info
.
add_broadcastable_binary_op
(
"add"
,
r0
,
bias_unsqueeze
);
}
}
};
};
...
...
src/tf/parse_conv.cpp
View file @
41d4e92b
...
@@ -75,7 +75,6 @@ struct parse_conv : op_parser<parse_conv>
...
@@ -75,7 +75,6 @@ struct parse_conv : op_parser<parse_conv>
const
std
::
string
&
pad_mode
=
info
.
attributes
.
at
(
"padding"
).
s
();
const
std
::
string
&
pad_mode
=
info
.
attributes
.
at
(
"padding"
).
s
();
if
(
pad_mode
.
find
(
"SAME"
)
!=
std
::
string
::
npos
)
if
(
pad_mode
.
find
(
"SAME"
)
!=
std
::
string
::
npos
)
{
{
op
.
padding_mode
=
op
::
padding_mode_t
::
same
;
std
::
vector
<
size_t
>
weight_dims
=
weights
->
get_shape
().
lens
();
std
::
vector
<
size_t
>
weight_dims
=
weights
->
get_shape
().
lens
();
size_t
weight_h
=
weight_dims
[
2
];
size_t
weight_h
=
weight_dims
[
2
];
size_t
weight_w
=
weight_dims
[
3
];
size_t
weight_w
=
weight_dims
[
3
];
...
@@ -87,10 +86,6 @@ struct parse_conv : op_parser<parse_conv>
...
@@ -87,10 +86,6 @@ struct parse_conv : op_parser<parse_conv>
op
.
padding
=
std
::
vector
<
size_t
>
(
pads
.
begin
(),
pads
.
end
());
op
.
padding
=
std
::
vector
<
size_t
>
(
pads
.
begin
(),
pads
.
end
());
}
}
else
if
(
pad_mode
.
find
(
"VALID"
)
!=
std
::
string
::
npos
)
{
op
.
padding_mode
=
op
::
padding_mode_t
::
valid
;
}
else
if
(
pad_mode
.
find
(
"EXPLICIT"
)
!=
std
::
string
::
npos
)
else
if
(
pad_mode
.
find
(
"EXPLICIT"
)
!=
std
::
string
::
npos
)
{
{
std
::
vector
<
size_t
>
padding
;
std
::
vector
<
size_t
>
padding
;
...
...
src/tf/parse_depthwiseconv.cpp
View file @
41d4e92b
...
@@ -80,7 +80,6 @@ struct parse_depthwiseconv : op_parser<parse_depthwiseconv>
...
@@ -80,7 +80,6 @@ struct parse_depthwiseconv : op_parser<parse_depthwiseconv>
if
(
pad_mode
.
find
(
"SAME"
)
!=
std
::
string
::
npos
)
if
(
pad_mode
.
find
(
"SAME"
)
!=
std
::
string
::
npos
)
{
{
op
.
padding_mode
=
op
::
padding_mode_t
::
same
;
std
::
vector
<
size_t
>
weight_dims
=
weights
->
get_shape
().
lens
();
std
::
vector
<
size_t
>
weight_dims
=
weights
->
get_shape
().
lens
();
size_t
weight_h
=
weight_dims
[
2
];
size_t
weight_h
=
weight_dims
[
2
];
size_t
weight_w
=
weight_dims
[
3
];
size_t
weight_w
=
weight_dims
[
3
];
...
@@ -101,10 +100,6 @@ struct parse_depthwiseconv : op_parser<parse_depthwiseconv>
...
@@ -101,10 +100,6 @@ struct parse_depthwiseconv : op_parser<parse_depthwiseconv>
op
.
padding
[
1
]
=
pads
[
1
];
op
.
padding
[
1
]
=
pads
[
1
];
}
}
}
}
else
if
(
pad_mode
.
find
(
"VALID"
)
!=
std
::
string
::
npos
)
{
op
.
padding_mode
=
op
::
padding_mode_t
::
valid
;
}
}
}
std
::
vector
<
int64_t
>
new_weights_shape
;
std
::
vector
<
int64_t
>
new_weights_shape
;
...
...
test/gpu/mlir.cpp
View file @
41d4e92b
...
@@ -141,7 +141,7 @@ TEST_CASE(conv)
...
@@ -141,7 +141,7 @@ TEST_CASE(conv)
const
std
::
string
mlir_output
=
R"__migraphx__(
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
module {
func.func @main(%arg0: tensor<2x8x3x3xf32>, %arg1: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {kernel = "mixr"} {
func.func @main(%arg0: tensor<2x8x3x3xf32>, %arg1: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {kernel = "mixr"} {
%0 = migraphx.convolution(%arg1, %arg0) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]
, use_dynamic_same_auto_pad = 0 : i64
} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
%0 = migraphx.convolution(%arg1, %arg0) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
return %0 : tensor<1x2x2x2xf32>
return %0 : tensor<1x2x2x2xf32>
}
}
}
}
...
@@ -164,7 +164,7 @@ TEST_CASE(conv_add_relu)
...
@@ -164,7 +164,7 @@ TEST_CASE(conv_add_relu)
const
std
::
string
mlir_output
=
R"__migraphx__(
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
module {
func.func @main(%arg0: tensor<1x2x2x2xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {kernel = "mixr"} {
func.func @main(%arg0: tensor<1x2x2x2xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {kernel = "mixr"} {
%0 = migraphx.convolution(%arg2, %arg1) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]
, use_dynamic_same_auto_pad = 0 : i64
} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
%0 = migraphx.convolution(%arg2, %arg1) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
%1 = migraphx.add(%0, %arg0) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
%1 = migraphx.add(%0, %arg0) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
%2 = migraphx.relu(%1) : (tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
%2 = migraphx.relu(%1) : (tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
return %2 : tensor<1x2x2x2xf32>
return %2 : tensor<1x2x2x2xf32>
...
...
test/onnx/onnx_test.cpp
View file @
41d4e92b
...
@@ -856,8 +856,7 @@ TEST_CASE(conv_autopad_same_test)
...
@@ -856,8 +856,7 @@ TEST_CASE(conv_autopad_same_test)
auto
l0
=
mm
->
add_parameter
(
"0"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
32
,
32
}});
auto
l0
=
mm
->
add_parameter
(
"0"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
32
,
32
}});
auto
l1
=
mm
->
add_parameter
(
"1"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
3
,
3
}});
auto
l1
=
mm
->
add_parameter
(
"1"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
3
,
3
}});
migraphx
::
op
::
convolution
op
;
migraphx
::
op
::
convolution
op
;
op
.
padding
=
{
1
,
1
,
1
,
1
};
op
.
padding
=
{
1
,
1
,
1
,
1
};
op
.
padding_mode
=
migraphx
::
op
::
padding_mode_t
::
same
;
mm
->
add_instruction
(
op
,
l0
,
l1
);
mm
->
add_instruction
(
op
,
l0
,
l1
);
auto
prog
=
optimize_onnx
(
"conv_autopad_same_test.onnx"
);
auto
prog
=
optimize_onnx
(
"conv_autopad_same_test.onnx"
);
...
@@ -1034,15 +1033,11 @@ TEST_CASE(conv_dynamic_batch_same_upper)
...
@@ -1034,15 +1033,11 @@ TEST_CASE(conv_dynamic_batch_same_upper)
auto
l0
=
mm
->
add_parameter
(
auto
l0
=
mm
->
add_parameter
(
"0"
,
{
migraphx
::
shape
::
float_type
,
{{
1
,
10
,
0
},
{
3
,
3
,
0
},
{
5
,
5
,
0
},
{
5
,
5
,
0
}}});
"0"
,
{
migraphx
::
shape
::
float_type
,
{{
1
,
10
,
0
},
{
3
,
3
,
0
},
{
5
,
5
,
0
},
{
5
,
5
,
0
}}});
auto
l1
=
mm
->
add_parameter
(
"1"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
3
,
3
}});
auto
l1
=
mm
->
add_parameter
(
"1"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
3
,
3
}});
auto
c0
=
auto
c0
=
mm
->
add_instruction
(
mm
->
add_instruction
(
migraphx
::
make_op
(
"convolution"
,
migraphx
::
make_op
(
"convolution"
,
{{
"padding"
,
{
1
,
1
,
1
,
1
}},
{{
"padding"
,
{
1
,
1
,
1
,
1
}},
{
"stride"
,
{
1
,
1
}},
{
"dilation"
,
{
1
,
1
}}}),
{
"stride"
,
{
1
,
1
}},
l0
,
{
"dilation"
,
{
1
,
1
}},
l1
);
{
"padding_mode"
,
migraphx
::
op
::
padding_mode_t
::
same
},
{
"use_dynamic_same_auto_pad"
,
false
}}),
l0
,
l1
);
mm
->
add_return
({
c0
});
mm
->
add_return
({
c0
});
migraphx
::
onnx_options
options
;
migraphx
::
onnx_options
options
;
...
@@ -1064,8 +1059,7 @@ TEST_CASE(conv_dynamic_img_same_upper)
...
@@ -1064,8 +1059,7 @@ TEST_CASE(conv_dynamic_img_same_upper)
{{
"padding"
,
{
0
,
0
}},
{{
"padding"
,
{
0
,
0
}},
{
"stride"
,
{
1
,
1
}},
{
"stride"
,
{
1
,
1
}},
{
"dilation"
,
{
1
,
1
}},
{
"dilation"
,
{
1
,
1
}},
{
"padding_mode"
,
migraphx
::
op
::
padding_mode_t
::
same_upper
},
{
"padding_mode"
,
migraphx
::
op
::
padding_mode_t
::
same_upper
}}),
{
"use_dynamic_same_auto_pad"
,
true
}}),
l0
,
l0
,
l1
);
l1
);
mm
->
add_return
({
c0
});
mm
->
add_return
({
c0
});
...
@@ -1089,8 +1083,7 @@ TEST_CASE(conv_dynamic_kernel_same_lower)
...
@@ -1089,8 +1083,7 @@ TEST_CASE(conv_dynamic_kernel_same_lower)
{{
"padding"
,
{
0
,
0
}},
{{
"padding"
,
{
0
,
0
}},
{
"stride"
,
{
1
,
1
}},
{
"stride"
,
{
1
,
1
}},
{
"dilation"
,
{
1
,
1
}},
{
"dilation"
,
{
1
,
1
}},
{
"padding_mode"
,
migraphx
::
op
::
padding_mode_t
::
same_lower
},
{
"padding_mode"
,
migraphx
::
op
::
padding_mode_t
::
same_lower
}}),
{
"use_dynamic_same_auto_pad"
,
true
}}),
l0
,
l0
,
l1
);
l1
);
mm
->
add_return
({
c0
});
mm
->
add_return
({
c0
});
...
...
test/op_shape_test.cpp
View file @
41d4e92b
...
@@ -81,16 +81,6 @@ void throws_shape(const migraphx::shape&, Ts...)
...
@@ -81,16 +81,6 @@ void throws_shape(const migraphx::shape&, Ts...)
"An expected shape should not be passed to throws_shape function"
);
"An expected shape should not be passed to throws_shape function"
);
}
}
TEST_CASE
(
batch_norm_inference_shape
)
{
const
size_t
channels
=
3
;
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
4
,
channels
,
3
,
3
}};
migraphx
::
shape
vars
{
migraphx
::
shape
::
float_type
,
{
channels
}};
expect_shape
(
s
,
migraphx
::
make_op
(
"batch_norm_inference"
),
s
,
vars
,
vars
,
vars
,
vars
);
throws_shape
(
migraphx
::
make_op
(
"batch_norm_inference"
),
s
);
throws_shape
(
migraphx
::
make_op
(
"batch_norm_inference"
),
s
,
vars
,
vars
,
vars
,
vars
,
vars
);
}
TEST_CASE
(
broadcast
)
TEST_CASE
(
broadcast
)
{
{
{
{
...
@@ -261,8 +251,7 @@ TEST_CASE(convolution_shape)
...
@@ -261,8 +251,7 @@ TEST_CASE(convolution_shape)
migraphx
::
make_op
(
"convolution"
,
migraphx
::
make_op
(
"convolution"
,
{{
"stride"
,
{
1
,
1
}},
{{
"stride"
,
{
1
,
1
}},
{
"dilation"
,
{
1
,
1
}},
{
"dilation"
,
{
1
,
1
}},
{
"padding_mode"
,
migraphx
::
op
::
padding_mode_t
::
same_upper
},
{
"padding_mode"
,
migraphx
::
op
::
padding_mode_t
::
same_upper
}}),
{
"use_dynamic_same_auto_pad"
,
true
}}),
input_dyn_shape
,
input_dyn_shape
,
weights_shape
);
weights_shape
);
...
@@ -275,8 +264,7 @@ TEST_CASE(convolution_shape)
...
@@ -275,8 +264,7 @@ TEST_CASE(convolution_shape)
migraphx
::
make_op
(
"convolution"
,
migraphx
::
make_op
(
"convolution"
,
{{
"stride"
,
{
1
,
1
}},
{{
"stride"
,
{
1
,
1
}},
{
"dilation"
,
{
1
,
1
}},
{
"dilation"
,
{
1
,
1
}},
{
"padding_mode"
,
migraphx
::
op
::
padding_mode_t
::
same_upper
},
{
"padding_mode"
,
migraphx
::
op
::
padding_mode_t
::
same_upper
}}),
{
"use_dynamic_same_auto_pad"
,
true
}}),
input_dyn_shape
,
input_dyn_shape
,
weights_shape
);
weights_shape
);
...
@@ -290,8 +278,7 @@ TEST_CASE(convolution_shape)
...
@@ -290,8 +278,7 @@ TEST_CASE(convolution_shape)
migraphx
::
make_op
(
"convolution"
,
migraphx
::
make_op
(
"convolution"
,
{{
"stride"
,
{
1
,
1
}},
{{
"stride"
,
{
1
,
1
}},
{
"dilation"
,
{
1
,
1
}},
{
"dilation"
,
{
1
,
1
}},
{
"padding_mode"
,
migraphx
::
op
::
padding_mode_t
::
same_lower
},
{
"padding_mode"
,
migraphx
::
op
::
padding_mode_t
::
same_lower
}}),
{
"use_dynamic_same_auto_pad"
,
true
}}),
input_dyn_shape
,
input_dyn_shape
,
weights_shape
);
weights_shape
);
}
}
...
...
test/ref_ops_test.cpp
View file @
41d4e92b
...
@@ -28,7 +28,6 @@
...
@@ -28,7 +28,6 @@
#include <limits>
#include <limits>
#include <migraphx/literal.hpp>
#include <migraphx/literal.hpp>
#include <migraphx/op/pooling.hpp>
#include <migraphx/op/pooling.hpp>
#include <migraphx/op/batch_norm_inference.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/quantization.hpp>
#include <migraphx/quantization.hpp>
#include <migraphx/ref/target.hpp>
#include <migraphx/ref/target.hpp>
...
@@ -493,202 +492,6 @@ TEST_CASE(avgpool_test)
...
@@ -493,202 +492,6 @@ TEST_CASE(avgpool_test)
}
}
}
}
TEST_CASE
(
batch_norm_1d_per_actv_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
x_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
2
,
4
}};
migraphx
::
shape
c_shape
(
migraphx
::
shape
::
float_type
,
{
2
,
4
});
std
::
vector
<
float
>
x_data
=
{
0.3547
,
0.477
,
-
1.8575
,
0.663
,
-
0.1881
,
-
0.5113
,
-
0.1803
,
-
0.5915
,
-
0.1552
,
0.9821
,
1.827
,
0.0558
,
-
0.0417
,
-
1.0693
,
1.9948
,
-
0.7448
};
std
::
vector
<
float
>
scale_data
=
{
-
0.3181
,
-
0.3885
,
1.655
,
0.0704
,
-
0.2565
,
-
1.1761
,
-
0.3751
,
0.1057
};
std
::
vector
<
float
>
bias_data
=
{
-
1.2118
,
-
2.1156
,
0.0046
,
-
0.1341
,
-
0.2724
,
-
1.0718
,
0.5535
,
-
0.889
};
std
::
vector
<
float
>
mean_data
=
{
0.0997
,
0.7295
,
-
0.0153
,
0.3594
,
-
0.1149
,
-
0.7903
,
0.9073
,
-
0.6681
};
std
::
vector
<
float
>
variance_data
=
{
0.13
,
0.1276
,
6.7878
,
0.1843
,
0.0107
,
0.1556
,
2.3655
,
0.0117
};
auto
x
=
mm
->
add_literal
(
migraphx
::
literal
{
x_shape
,
x_data
});
auto
scale
=
mm
->
add_literal
(
migraphx
::
literal
{
c_shape
,
scale_data
});
auto
bias
=
mm
->
add_literal
(
migraphx
::
literal
{
c_shape
,
bias_data
});
auto
mean
=
mm
->
add_literal
(
migraphx
::
literal
{
c_shape
,
mean_data
});
auto
variance
=
mm
->
add_literal
(
migraphx
::
literal
{
c_shape
,
variance_data
});
mm
->
add_instruction
(
migraphx
::
make_op
(
"batch_norm_inference"
,
{{
"epsilon"
,
1e-6
},
{
"momentum"
,
0.9
},
{
"bn_mode"
,
migraphx
::
to_value
(
migraphx
::
op
::
batch_norm_inference
::
per_activation
)}}),
x
,
scale
,
bias
,
mean
,
variance
);
p
.
compile
(
migraphx
::
ref
::
target
{});
auto
result
=
p
.
eval
({}).
back
();
std
::
vector
<
float
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
=
{
-
1.43677
,
-
1.84098
,
-
1.16563
,
-
0.0843136
,
-
0.090896
,
-
1.90364
,
0.81875
,
-
0.81415
,
-
0.986915
,
-
2.39032
,
1.17489
,
-
0.183886
,
-
0.453904
,
-
0.239955
,
0.288275
,
-
0.963948
};
EXPECT
(
migraphx
::
verify_range
(
result_vector
,
gold
));
}
TEST_CASE
(
batch_norm_1d_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
x_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
}};
migraphx
::
shape
c_shape
(
migraphx
::
shape
::
float_type
,
{
3
});
std
::
vector
<
float
>
x_data
=
{
0.7253
,
-
0.6356
,
0.4606
,
-
0.8689
,
-
1.1932
,
0.4538
,
-
1.0018
,
-
0.365
,
-
0.214
,
-
0.9553
,
-
0.7672
,
0.2331
,
-
0.8416
,
-
0.6142
,
0.0814
,
0.2498
,
-
0.6706
,
1.4872
,
0.5112
,
-
1.5212
,
-
0.9126
,
0.0735
,
1.085
,
-
0.3417
};
std
::
vector
<
float
>
scale_data
=
{
1.1
,
1.2
,
1.3
};
std
::
vector
<
float
>
bias_data
=
{
0.1
,
0.2
,
0.3
};
std
::
vector
<
float
>
mean_data
=
{
-
0.1804
,
-
0.2875
,
-
0.2249
};
std
::
vector
<
float
>
variance_data
=
{
2.7914
,
7.3424
,
3.3287
};
auto
x
=
mm
->
add_literal
(
migraphx
::
literal
{
x_shape
,
x_data
});
auto
scale
=
mm
->
add_literal
(
migraphx
::
literal
{
c_shape
,
scale_data
});
auto
bias
=
mm
->
add_literal
(
migraphx
::
literal
{
c_shape
,
bias_data
});
auto
mean
=
mm
->
add_literal
(
migraphx
::
literal
{
c_shape
,
mean_data
});
auto
variance
=
mm
->
add_literal
(
migraphx
::
literal
{
c_shape
,
variance_data
});
mm
->
add_instruction
(
migraphx
::
make_op
(
"batch_norm_inference"
,
{{
"epsilon"
,
1e-5
}}),
x
,
scale
,
bias
,
mean
,
variance
);
p
.
compile
(
migraphx
::
ref
::
target
{});
auto
result
=
p
.
eval
({}).
back
();
std
::
vector
<
float
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
=
{
0.696301
,
-
0.199697
,
0.522026
,
-
0.353299
,
-
0.201094
,
0.528289
,
-
0.116332
,
0.165679
,
0.307767
,
-
0.220435
,
-
0.086407
,
0.62634
,
-
0.335325
,
-
0.185608
,
0.272366
,
0.383238
,
0.0303421
,
0.985936
,
0.553709
,
-
0.346351
,
-
0.190009
,
0.51262
,
1.23335
,
0.216776
};
EXPECT
(
migraphx
::
verify_range
(
result_vector
,
gold
));
}
TEST_CASE
(
batch_norm_3d_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
x_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
2
,
2
,
2
,
2
}};
migraphx
::
shape
c_shape
(
migraphx
::
shape
::
float_type
,
{
2
});
std
::
vector
<
float
>
x_data
=
{
-
1.0833
,
1.9681
,
1.2075
,
-
0.723
,
-
0.4076
,
-
0.8738
,
0.5853
,
-
0.5357
,
1.734
,
0.7904
,
0.6953
,
-
0.468
,
-
0.425
,
0.6895
,
0.0096
,
0.4205
,
-
0.1749
,
1.2821
,
2.1453
,
-
0.8538
,
1.0687
,
0.0906
,
0.0714
,
-
1.3079
,
-
0.6376
,
1.3023
,
0.945
,
0.0927
,
-
0.7421
,
-
1.4341
,
-
1.0309
,
1.5153
};
std
::
vector
<
float
>
scale_data
=
{
1.1
,
1.3
};
std
::
vector
<
float
>
bias_data
=
{
0.1
,
0.2
};
std
::
vector
<
float
>
mean_data
=
{
0.1537
,
0.2161
};
std
::
vector
<
float
>
variance_data
=
{
18.0805
,
13.3906
};
auto
x
=
mm
->
add_literal
(
migraphx
::
literal
{
x_shape
,
x_data
});
auto
scale
=
mm
->
add_literal
(
migraphx
::
literal
{
c_shape
,
scale_data
});
auto
bias
=
mm
->
add_literal
(
migraphx
::
literal
{
c_shape
,
bias_data
});
auto
mean
=
mm
->
add_literal
(
migraphx
::
literal
{
c_shape
,
mean_data
});
auto
variance
=
mm
->
add_literal
(
migraphx
::
literal
{
c_shape
,
variance_data
});
mm
->
add_instruction
(
migraphx
::
make_op
(
"batch_norm_inference"
),
x
,
scale
,
bias
,
mean
,
variance
);
p
.
compile
(
migraphx
::
ref
::
target
{});
auto
result
=
p
.
eval
({}).
back
();
std
::
vector
<
float
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
=
{
-
0.220005
,
0.569376
,
0.372612
,
-
0.126798
,
-
0.0452053
,
-
0.165809
,
0.211653
,
-
0.0783441
,
0.739245
,
0.404024
,
0.370239
,
-
0.0430317
,
-
0.0277556
,
0.368179
,
0.126639
,
0.272615
,
0.0149929
,
0.391911
,
0.615216
,
-
0.160635
,
0.336706
,
0.0836764
,
0.0787094
,
-
0.278108
,
-
0.103283
,
0.585881
,
0.458947
,
0.156161
,
-
0.140408
,
-
0.386246
,
-
0.243006
,
0.661551
};
EXPECT
(
migraphx
::
verify_range
(
result_vector
,
gold
));
}
TEST_CASE
(
batch_norm_inference_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
const
size_t
width
=
2
;
const
size_t
height
=
2
;
const
size_t
channels
=
4
;
const
size_t
batches
=
2
;
const
float
x_val
=
8.0
;
const
float
mean_val
=
2.0
;
const
float
variance_val
=
4.0
;
const
float
scale_val
=
2.0
f
;
const
float
bias_val
=
1.0
f
;
const
float
output_val
=
scale_val
*
(
x_val
-
mean_val
)
/
(
std
::
sqrt
(
variance_val
))
+
bias_val
;
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
batches
,
channels
,
height
,
width
}};
migraphx
::
shape
vars
{
migraphx
::
shape
::
float_type
,
{
channels
}};
std
::
vector
<
float
>
x_data
(
width
*
height
*
channels
*
batches
);
std
::
vector
<
float
>
scale_data
(
channels
);
std
::
vector
<
float
>
bias_data
(
channels
);
std
::
vector
<
float
>
mean_data
(
channels
);
std
::
vector
<
float
>
variance_data
(
channels
);
std
::
fill
(
x_data
.
begin
(),
x_data
.
end
(),
x_val
);
std
::
fill
(
mean_data
.
begin
(),
mean_data
.
end
(),
mean_val
);
std
::
fill
(
variance_data
.
begin
(),
variance_data
.
end
(),
variance_val
);
std
::
fill
(
scale_data
.
begin
(),
scale_data
.
end
(),
scale_val
);
std
::
fill
(
bias_data
.
begin
(),
bias_data
.
end
(),
bias_val
);
auto
x
=
mm
->
add_literal
(
migraphx
::
literal
{
s
,
x_data
});
auto
scale
=
mm
->
add_literal
(
migraphx
::
literal
{
vars
,
scale_data
});
auto
bias
=
mm
->
add_literal
(
migraphx
::
literal
{
vars
,
bias_data
});
auto
mean
=
mm
->
add_literal
(
migraphx
::
literal
{
vars
,
mean_data
});
auto
variance
=
mm
->
add_literal
(
migraphx
::
literal
{
vars
,
variance_data
});
mm
->
add_instruction
(
migraphx
::
make_op
(
"batch_norm_inference"
),
x
,
scale
,
bias
,
mean
,
variance
);
p
.
compile
(
migraphx
::
ref
::
target
{});
auto
result
=
p
.
eval
({}).
back
();
std
::
vector
<
float
>
result_vector
(
width
*
height
*
channels
*
batches
);
std
::
vector
<
float
>
gold
(
width
*
height
*
channels
*
batches
);
std
::
fill
(
gold
.
begin
(),
gold
.
end
(),
output_val
);
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify_range
(
result_vector
,
gold
));
}
TEST_CASE
(
broadcast_test
)
TEST_CASE
(
broadcast_test
)
{
{
migraphx
::
program
p
;
migraphx
::
program
p
;
...
@@ -1168,10 +971,9 @@ TEST_CASE(conv_dynamic_img_same_upper_test)
...
@@ -1168,10 +971,9 @@ TEST_CASE(conv_dynamic_img_same_upper_test)
auto
input
=
mm
->
add_parameter
(
"X"
,
input_dyn_shape
);
auto
input
=
mm
->
add_parameter
(
"X"
,
input_dyn_shape
);
auto
weights
=
mm
->
add_parameter
(
"W"
,
weights_shape
);
auto
weights
=
mm
->
add_parameter
(
"W"
,
weights_shape
);
mm
->
add_instruction
(
mm
->
add_instruction
(
migraphx
::
make_op
(
"convolution"
,
migraphx
::
make_op
(
{{
"stride"
,
{
1
,
1
}},
"convolution"
,
{
"padding_mode"
,
migraphx
::
op
::
padding_mode_t
::
same_upper
},
{{
"stride"
,
{
1
,
1
}},
{
"padding_mode"
,
migraphx
::
op
::
padding_mode_t
::
same_upper
}}),
{
"use_dynamic_same_auto_pad"
,
true
}}),
input
,
input
,
weights
);
weights
);
...
@@ -1228,7 +1030,7 @@ TEST_CASE(conv_dynamic_img_same_upper_test)
...
@@ -1228,7 +1030,7 @@ TEST_CASE(conv_dynamic_img_same_upper_test)
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
sol
));
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
sol
));
}
}
TEST_CASE
(
conv_dynamic_kernel_same_
low
er_test
)
TEST_CASE
(
conv_dynamic_kernel_same_
upp
er_test
)
{
{
migraphx
::
program
p
;
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
*
mm
=
p
.
get_main_module
();
...
@@ -1240,10 +1042,9 @@ TEST_CASE(conv_dynamic_kernel_same_lower_test)
...
@@ -1240,10 +1042,9 @@ TEST_CASE(conv_dynamic_kernel_same_lower_test)
auto
input
=
mm
->
add_parameter
(
"X"
,
input_shape
);
auto
input
=
mm
->
add_parameter
(
"X"
,
input_shape
);
auto
weights
=
mm
->
add_parameter
(
"W"
,
weights_shape
);
auto
weights
=
mm
->
add_parameter
(
"W"
,
weights_shape
);
mm
->
add_instruction
(
mm
->
add_instruction
(
migraphx
::
make_op
(
"convolution"
,
migraphx
::
make_op
(
{{
"stride"
,
{
1
,
1
}},
"convolution"
,
{
"padding_mode"
,
migraphx
::
op
::
padding_mode_t
::
same_lower
},
{{
"stride"
,
{
1
,
1
}},
{
"padding_mode"
,
migraphx
::
op
::
padding_mode_t
::
same_upper
}}),
{
"use_dynamic_same_auto_pad"
,
true
}}),
input
,
input
,
weights
);
weights
);
...
@@ -1303,6 +1104,80 @@ TEST_CASE(conv_dynamic_kernel_same_lower_test)
...
@@ -1303,6 +1104,80 @@ TEST_CASE(conv_dynamic_kernel_same_lower_test)
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
sol
));
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
sol
));
}
}
TEST_CASE
(
conv_dynamic_kernel_same_lower_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
input_shape
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
4
,
4
}};
migraphx
::
shape
weights_shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
,
0
},
{
3
,
3
,
0
},
{
2
,
3
,
0
},
{
2
,
3
,
0
}}};
auto
input
=
mm
->
add_parameter
(
"X"
,
input_shape
);
auto
weights
=
mm
->
add_parameter
(
"W"
,
weights_shape
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"convolution"
,
{{
"stride"
,
{
1
,
1
}},
{
"padding_mode"
,
migraphx
::
op
::
padding_mode_t
::
same_lower
}}),
input
,
weights
);
p
.
compile
(
migraphx
::
ref
::
target
{});
std
::
vector
<
float
>
a
=
{
0.63321185
,
0.6466339
,
0.8515352
,
0.44240063
,
0.5018913
,
0.5068494
,
0.75330657
,
0.7383877
,
0.15870683
,
0.8171611
,
0.56118083
,
0.87004256
,
0.24401724
,
0.8815178
,
0.4222333
,
0.27191755
,
0.41633207
,
0.2460619
,
0.32004243
,
0.6962248
,
0.12284133
,
0.2620491
,
0.96931046
,
0.6030955
,
0.7623861
,
0.2395751
,
0.61440414
,
0.577285
,
0.80087787
,
0.12776066
,
0.26566318
,
0.46569306
,
0.96701574
,
0.3850145
,
0.14165345
,
0.5887347
,
0.7152134
,
0.5295342
,
0.6303507
,
0.4037548
,
0.18556239
,
0.79416305
,
0.29107493
,
0.18770285
,
0.6870904
,
0.30701008
,
0.314684
,
0.91075855
};
std
::
vector
<
float
>
c
=
{
2.8150102e-01
,
3.3198616e-01
,
9.5149356e-01
,
7.4039467e-02
,
9.6555042e-01
,
2.8815505e-01
,
2.5100240e-01
,
5.2186239e-01
,
2.3850012e-01
,
8.2963020e-01
,
3.0763101e-04
,
6.7026985e-01
};
std
::
vector
<
float
>
sol
=
{
0.91231215
,
1.1416453
,
1.00216
,
1.6813052
,
1.7131033
,
2.453681
,
2.536207
,
3.0187201
,
1.3293691
,
2.1738236
,
2.9695358
,
3.2319589
,
1.3228729
,
2.5953722
,
2.50734
,
2.7736917
};
migraphx
::
shape
weight_fixed_shape0
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
2
,
2
}};
migraphx
::
parameter_map
params0
;
params0
[
"X"
]
=
migraphx
::
argument
(
input_shape
,
a
.
data
());
params0
[
"W"
]
=
migraphx
::
argument
(
weight_fixed_shape0
,
c
.
data
());
auto
result
=
p
.
eval
(
params0
).
back
();
std
::
vector
<
float
>
results_vector
(
16
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
sol
));
}
TEST_CASE
(
conv2d_padding_stride_test
)
TEST_CASE
(
conv2d_padding_stride_test
)
{
{
migraphx
::
program
p
;
migraphx
::
program
p
;
...
...
test/rewrite_batchnorm_test.cpp
deleted
100644 → 0
View file @
a8eb886b
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/rewrite_batchnorm.hpp>
#include <migraphx/program.hpp>
#include <migraphx/ref/target.hpp>
#include <migraphx/op/convolution.hpp>
#include <migraphx/op/reshape.hpp>
#include <migraphx/op/batch_norm_inference.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/ranges.hpp>
#include <test.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/serialize.hpp>
#include <migraphx/verify.hpp>
bool
is_batch_norm
(
migraphx
::
instruction
&
ins
)
{
return
ins
.
name
()
==
"batch_norm_inference"
;
}
TEST_CASE
(
fwd_conv_batchnorm_rewrite_test
)
{
std
::
vector
<
float
>
xdata
=
{
0.26485917
,
0.61703885
,
0.32762103
,
0.2503367
,
0.6552712
,
0.07947932
,
0.95442678
,
0.70892651
,
0.890563
,
0.80808088
,
0.89540492
,
0.52657048
,
0.94614791
,
0.64371508
,
0.0971229
,
0.2475562
,
0.47405955
,
0.85538928
,
0.05428386
,
0.993078
,
0.72771973
,
0.18312255
,
0.3091522
,
0.51396558
,
0.35158192
,
0.2419852
,
0.83691474
,
0.36355352
,
0.04769134
,
0.08312604
,
0.61804092
,
0.0508887
,
0.30987137
,
0.81307629
,
0.16398955
,
0.69886166
,
0.02415926
,
0.60608918
,
0.81907569
,
0.13208211
,
0.48303735
,
0.87533734
,
0.92998813
,
0.65553674
,
0.73223327
,
0.99401001
,
0.09850688
,
0.76972609
,
0.11118327
,
0.04392097
,
0.39252306
,
0.91129653
,
0.89078693
,
0.60571206
,
0.98410397
,
0.15290698
,
0.86992609
,
0.7575111
,
0.80583525
,
0.23649562
,
0.7478029
,
0.62888878
,
0.39886601
,
0.37066793
,
0.72627947
,
0.8745595
,
0.13568234
,
0.7413787
,
0.5039495
,
0.18945697
,
0.87046838
,
0.63970494
,
0.01124038
,
0.27459063
,
0.65745586
,
0.69182619
,
0.80470603
,
0.58039348
,
0.36950583
,
0.43634225
,
0.01694425
,
0.14099377
,
0.77015849
,
0.35809292
,
0.40547674
,
0.46538817
,
0.65835358
,
0.2266954
,
0.39057646
,
0.64642207
,
0.84491134
,
0.20998067
,
0.41074121
,
0.73055221
,
0.26424874
,
0.10612507
,
0.24478521
,
0.24091282
,
0.52536754
,
0.57292341
,
0.82190903
,
0.51858515
,
0.17162996
,
0.52048114
,
0.96624787
,
0.17527163
,
0.56384485
,
0.91991603
};
std
::
vector
<
float
>
wdata
=
{
-
1.12125056
,
0.50228441
,
1.12719446
,
-
2.61705068
,
-
0.2027315
,
-
0.82199441
,
0.05337102
,
-
0.62146691
,
-
2.40572931
,
-
1.47175612
,
1.49654601
,
-
1.07070376
,
-
0.65908074
,
-
0.28457694
,
1.60046717
,
0.20677642
,
-
1.51844486
,
0.41203847
,
-
0.01285751
,
0.07948031
,
-
0.91507006
,
-
1.59481079
,
-
0.12856238
,
0.39970482
,
-
1.89015158
,
0.66969754
,
0.10312618
};
migraphx
::
shape
xs
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
6
,
6
}};
migraphx
::
shape
ws
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
3
,
3
}};
migraphx
::
shape
vars
{
migraphx
::
shape
::
float_type
,
{
1
}};
auto
create_program
=
[
&
]()
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
x
=
mm
->
add_literal
(
xs
,
xdata
);
auto
w
=
mm
->
add_literal
(
ws
,
wdata
);
auto
conv
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"convolution"
,
{{
"padding"
,
{
0
,
0
}},
{
"stride"
,
{
1
,
1
}},
{
"dilation"
,
{
1
,
1
}}}),
x
,
w
);
auto
scale
=
mm
->
add_literal
(
migraphx
::
literal
{
vars
,
{
3.0
f
}});
auto
bias
=
mm
->
add_literal
(
migraphx
::
literal
{
vars
,
{
8.1
f
}});
auto
mean
=
mm
->
add_literal
(
migraphx
::
literal
{
vars
,
{
4.0
f
}});
auto
variance
=
mm
->
add_literal
(
migraphx
::
literal
{
vars
,
{
37.11
f
}});
mm
->
add_instruction
(
migraphx
::
make_op
(
"batch_norm_inference"
),
conv
,
scale
,
bias
,
mean
,
variance
);
return
p
;
};
migraphx
::
program
p1
=
create_program
();
migraphx
::
program
p2
=
create_program
();
migraphx
::
rewrite_batchnorm
opt
;
opt
.
apply
(
*
p2
.
get_main_module
());
p1
.
compile
(
migraphx
::
ref
::
target
{});
p2
.
compile
(
migraphx
::
ref
::
target
{});
auto
result1
=
p1
.
eval
({}).
back
();
auto
result2
=
p2
.
eval
({}).
back
();
std
::
vector
<
float
>
results_vector1
;
std
::
vector
<
float
>
results_vector2
;
result1
.
visit
([
&
](
auto
output
)
{
results_vector1
.
assign
(
output
.
begin
(),
output
.
end
());
});
result2
.
visit
([
&
](
auto
output
)
{
results_vector2
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify_range
(
results_vector1
,
results_vector2
));
}
TEST_CASE
(
non_literal
)
{
migraphx
::
shape
xs
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
8
,
8
}};
migraphx
::
shape
ws
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
1
,
1
}};
migraphx
::
shape
vars
{
migraphx
::
shape
::
float_type
,
{
4
}};
auto
create_program
=
[
&
]()
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
xs
);
auto
w
=
mm
->
add_parameter
(
"w"
,
ws
);
auto
conv
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"convolution"
),
x
,
w
);
auto
scale
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
1
)));
auto
bias
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
2
)));
auto
mean
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
3
)));
auto
variance
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
4
)));
mm
->
add_instruction
(
migraphx
::
make_op
(
"batch_norm_inference"
),
conv
,
scale
,
bias
,
mean
,
variance
);
return
p
;
};
migraphx
::
program
p1
=
create_program
();
migraphx
::
program
p2
=
create_program
();
migraphx
::
rewrite_batchnorm
opt
;
opt
.
apply
(
*
p2
.
get_main_module
());
EXPECT
(
any_of
(
*
p1
.
get_main_module
(),
&
is_batch_norm
));
EXPECT
(
none_of
(
*
p2
.
get_main_module
(),
&
is_batch_norm
));
}
TEST_CASE
(
as_literal
)
{
migraphx
::
shape
xs
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
8
,
8
}};
migraphx
::
shape
ws
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
1
,
1
}};
migraphx
::
shape
vars
{
migraphx
::
shape
::
float_type
,
{
4
}};
auto
create_program
=
[
&
]()
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
x
=
mm
->
add_literal
(
migraphx
::
generate_literal
(
xs
,
1
));
auto
w
=
mm
->
add_literal
(
migraphx
::
generate_literal
(
ws
,
1
));
auto
conv
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"convolution"
),
x
,
w
);
auto
scale
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
1
)));
auto
bias
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
2
)));
auto
mean
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
3
)));
auto
variance
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
4
)));
mm
->
add_instruction
(
migraphx
::
make_op
(
"batch_norm_inference"
),
conv
,
scale
,
bias
,
mean
,
variance
);
return
p
;
};
migraphx
::
program
p1
=
create_program
();
migraphx
::
program
p2
=
create_program
();
migraphx
::
rewrite_batchnorm
opt
;
opt
.
apply
(
*
p2
.
get_main_module
());
EXPECT
(
any_of
(
*
p1
.
get_main_module
(),
&
is_batch_norm
));
EXPECT
(
none_of
(
*
p2
.
get_main_module
(),
&
is_batch_norm
));
p1
.
compile
(
migraphx
::
ref
::
target
{});
p2
.
compile
(
migraphx
::
ref
::
target
{});
auto
result1
=
p1
.
eval
({}).
back
();
auto
result2
=
p2
.
eval
({}).
back
();
visit_all
(
result1
,
result2
)([
&
](
auto
r1
,
auto
r2
)
{
EXPECT
(
migraphx
::
verify_range
(
r1
,
r2
));
});
}
TEST_CASE
(
as_literal_1d
)
{
migraphx
::
shape
xs
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
8
}};
migraphx
::
shape
ws
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
1
}};
migraphx
::
shape
vars
{
migraphx
::
shape
::
float_type
,
{
4
}};
auto
create_program
=
[
&
]()
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
x
=
mm
->
add_literal
(
migraphx
::
generate_literal
(
xs
,
1
));
auto
w
=
mm
->
add_literal
(
migraphx
::
generate_literal
(
ws
,
1
));
auto
conv
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"convolution"
,
{{
"padding"
,
{
0
}},
{
"stride"
,
{
1
}},
{
"dilation"
,
{
1
}}}),
x
,
w
);
auto
scale
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
1
)));
auto
bias
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
2
)));
auto
mean
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
3
)));
auto
variance
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
4
)));
mm
->
add_instruction
(
migraphx
::
make_op
(
"batch_norm_inference"
),
conv
,
scale
,
bias
,
mean
,
variance
);
return
p
;
};
migraphx
::
program
p1
=
create_program
();
migraphx
::
program
p2
=
create_program
();
migraphx
::
rewrite_batchnorm
opt
;
opt
.
apply
(
*
p2
.
get_main_module
());
EXPECT
(
any_of
(
*
p1
.
get_main_module
(),
&
is_batch_norm
));
EXPECT
(
none_of
(
*
p2
.
get_main_module
(),
&
is_batch_norm
));
p1
.
compile
(
migraphx
::
ref
::
target
{});
p2
.
compile
(
migraphx
::
ref
::
target
{});
auto
result1
=
p1
.
eval
({}).
back
();
auto
result2
=
p2
.
eval
({}).
back
();
visit_all
(
result1
,
result2
)([
&
](
auto
r1
,
auto
r2
)
{
EXPECT
(
migraphx
::
verify_range
(
r1
,
r2
));
});
}
TEST_CASE
(
as_literal_3d
)
{
migraphx
::
shape
xs
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
2
,
4
,
8
}};
migraphx
::
shape
ws
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
1
,
1
,
1
}};
migraphx
::
shape
vars
{
migraphx
::
shape
::
float_type
,
{
4
}};
auto
create_program
=
[
&
]()
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
op
::
convolution
conv_op
;
conv_op
.
padding
=
{
0
,
0
,
0
};
conv_op
.
stride
=
{
1
,
1
,
1
};
conv_op
.
dilation
=
{
1
,
1
,
1
};
auto
x
=
mm
->
add_literal
(
migraphx
::
generate_literal
(
xs
,
1
));
auto
w
=
mm
->
add_literal
(
migraphx
::
generate_literal
(
ws
,
1
));
auto
conv
=
mm
->
add_instruction
(
conv_op
,
x
,
w
);
auto
scale
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
1
)));
auto
bias
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
2
)));
auto
mean
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
3
)));
auto
variance
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
4
)));
mm
->
add_instruction
(
migraphx
::
make_op
(
"batch_norm_inference"
),
conv
,
scale
,
bias
,
mean
,
variance
);
return
p
;
};
migraphx
::
program
p1
=
create_program
();
migraphx
::
program
p2
=
create_program
();
migraphx
::
rewrite_batchnorm
opt
;
opt
.
apply
(
*
p2
.
get_main_module
());
EXPECT
(
any_of
(
*
p1
.
get_main_module
(),
&
is_batch_norm
));
EXPECT
(
none_of
(
*
p2
.
get_main_module
(),
&
is_batch_norm
));
p1
.
compile
(
migraphx
::
ref
::
target
{});
p2
.
compile
(
migraphx
::
ref
::
target
{});
auto
result1
=
p1
.
eval
({}).
back
();
auto
result2
=
p2
.
eval
({}).
back
();
visit_all
(
result1
,
result2
)([
&
](
auto
r1
,
auto
r2
)
{
EXPECT
(
migraphx
::
verify_range
(
r1
,
r2
));
});
}
TEST_CASE
(
literal_reshape
)
{
migraphx
::
shape
xs
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
8
,
8
}};
migraphx
::
shape
ws
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
1
,
1
}};
migraphx
::
shape
vars
{
migraphx
::
shape
::
float_type
,
{
4
}};
auto
create_program
=
[
&
]()
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
x
=
mm
->
add_literal
(
migraphx
::
generate_literal
(
xs
,
1
));
auto
w
=
mm
->
add_literal
(
migraphx
::
generate_literal
(
ws
,
1
));
auto
conv
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"convolution"
),
x
,
w
);
auto
scale
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
1
)));
auto
bias
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
2
)));
auto
mean
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
3
)));
auto
variance
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
4
)));
mm
->
add_instruction
(
migraphx
::
make_op
(
"batch_norm_inference"
),
conv
,
scale
,
bias
,
mean
,
variance
);
return
p
;
};
migraphx
::
program
p1
=
create_program
();
migraphx
::
program
p2
=
create_program
();
migraphx
::
rewrite_batchnorm
opt
;
opt
.
apply
(
*
p2
.
get_main_module
());
EXPECT
(
any_of
(
*
p1
.
get_main_module
(),
&
is_batch_norm
));
EXPECT
(
none_of
(
*
p2
.
get_main_module
(),
&
is_batch_norm
));
p1
.
compile
(
migraphx
::
ref
::
target
{});
p2
.
compile
(
migraphx
::
ref
::
target
{});
auto
result1
=
p1
.
eval
({}).
back
();
auto
result2
=
p2
.
eval
({}).
back
();
visit_all
(
result1
,
result2
)([
&
](
auto
r1
,
auto
r2
)
{
EXPECT
(
migraphx
::
verify_range
(
r1
,
r2
));
});
}
TEST_CASE
(
literal_reshape_per_actv
)
{
migraphx
::
shape
xs
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
8
,
7
,
4
}};
migraphx
::
shape
ws
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
1
,
1
,
1
}};
migraphx
::
shape
vars
{
migraphx
::
shape
::
float_type
,
{
4
,
8
,
7
,
4
}};
auto
create_program
=
[
&
]()
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
x
=
mm
->
add_literal
(
migraphx
::
generate_literal
(
xs
,
1
));
auto
w
=
mm
->
add_literal
(
migraphx
::
generate_literal
(
ws
,
1
));
auto
conv
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"convolution"
,
{{
"padding"
,
{
0
,
0
,
0
}},
{
"stride"
,
{
1
,
1
,
1
}},
{
"dilation"
,
{
1
,
1
,
1
}}}),
x
,
w
);
auto
scale
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
1
)));
auto
bias
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
2
)));
auto
mean
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
3
)));
auto
variance
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
4
)));
mm
->
add_instruction
(
migraphx
::
make_op
(
"batch_norm_inference"
,
{{
"epsilon"
,
1.0e-5
},
{
"momentum"
,
0.88
},
{
"bn_mode"
,
migraphx
::
to_value
(
migraphx
::
op
::
batch_norm_inference
::
per_activation
)}}),
conv
,
scale
,
bias
,
mean
,
variance
);
return
p
;
};
migraphx
::
program
p1
=
create_program
();
migraphx
::
program
p2
=
create_program
();
migraphx
::
rewrite_batchnorm
opt
;
opt
.
apply
(
*
p2
.
get_main_module
());
EXPECT
(
any_of
(
*
p1
.
get_main_module
(),
&
is_batch_norm
));
EXPECT
(
none_of
(
*
p2
.
get_main_module
(),
&
is_batch_norm
));
p1
.
compile
(
migraphx
::
ref
::
target
{});
p2
.
compile
(
migraphx
::
ref
::
target
{});
auto
result1
=
p1
.
eval
({}).
back
();
auto
result2
=
p2
.
eval
({}).
back
();
visit_all
(
result1
,
result2
)([
&
](
auto
r1
,
auto
r2
)
{
EXPECT
(
migraphx
::
verify_range
(
r1
,
r2
));
});
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/tf/batchnorm_half_test.pb
0 → 100644
View file @
41d4e92b
File added
test/tf/batchnorm_test.pb
View file @
41d4e92b
No preview for this file type
test/tf/batchnormv3_test.pb
View file @
41d4e92b
No preview for this file type
test/tf/gen_tf_pb.py
View file @
41d4e92b
...
@@ -120,19 +120,45 @@ def batchnorm_test(g1):
...
@@ -120,19 +120,45 @@ def batchnorm_test(g1):
with
g1
.
as_default
():
with
g1
.
as_default
():
g1_input
=
tf
.
compat
.
v1
.
placeholder
(
tf
.
float32
,
g1_input
=
tf
.
compat
.
v1
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
16
,
16
,
32
),
shape
=
(
1
,
16
,
16
,
32
),
name
=
'0'
)
name
=
'x'
)
g1_scale
=
tf
.
constant
(
1.0
,
dtype
=
tf
.
float32
,
shape
=
[
32
],
name
=
'1'
)
g1_scale
=
tf
.
constant
(
1.0
,
dtype
=
tf
.
float32
,
shape
=
[
32
],
name
=
'scale'
)
g1_offset
=
tf
.
compat
.
v1
.
placeholder
(
tf
.
float32
,
shape
=
(
32
),
name
=
'2'
)
g1_offset
=
tf
.
compat
.
v1
.
placeholder
(
tf
.
float32
,
g1_mean
=
tf
.
compat
.
v1
.
placeholder
(
tf
.
float32
,
shape
=
(
32
),
name
=
'3'
)
shape
=
(
32
),
name
=
'bias'
)
g1_mean
=
tf
.
compat
.
v1
.
placeholder
(
tf
.
float32
,
shape
=
(
32
),
name
=
'mean'
)
g1_variance
=
tf
.
compat
.
v1
.
placeholder
(
tf
.
float32
,
g1_variance
=
tf
.
compat
.
v1
.
placeholder
(
tf
.
float32
,
shape
=
(
32
),
shape
=
(
32
),
name
=
'
4
'
)
name
=
'
variance
'
)
tf
.
compat
.
v1
.
nn
.
fused_batch_norm
(
x
=
g1_input
,
tf
.
compat
.
v1
.
nn
.
fused_batch_norm
(
x
=
g1_input
,
scale
=
g1_scale
,
scale
=
g1_scale
,
offset
=
g1_offset
,
offset
=
g1_offset
,
mean
=
g1_mean
,
mean
=
g1_mean
,
variance
=
g1_variance
,
variance
=
g1_variance
,
epsilon
=
0.00001
,
epsilon
=
1e-4
,
is_training
=
False
,
name
=
'batchnorm1'
)
@
tf_test
def
batchnorm_half_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
compat
.
v1
.
placeholder
(
tf
.
float16
,
shape
=
(
1
,
16
,
16
,
32
),
name
=
'x'
)
g1_scale
=
tf
.
constant
(
1.0
,
dtype
=
tf
.
float32
,
shape
=
[
32
],
name
=
'scale'
)
g1_offset
=
tf
.
compat
.
v1
.
placeholder
(
tf
.
float32
,
shape
=
(
32
),
name
=
'bias'
)
g1_mean
=
tf
.
compat
.
v1
.
placeholder
(
tf
.
float32
,
shape
=
(
32
),
name
=
'mean'
)
g1_variance
=
tf
.
compat
.
v1
.
placeholder
(
tf
.
float32
,
shape
=
(
32
),
name
=
'variance'
)
tf
.
compat
.
v1
.
nn
.
fused_batch_norm
(
x
=
g1_input
,
scale
=
g1_scale
,
offset
=
g1_offset
,
mean
=
g1_mean
,
variance
=
g1_variance
,
epsilon
=
1e-4
,
is_training
=
False
,
is_training
=
False
,
name
=
'batchnorm1'
)
name
=
'batchnorm1'
)
...
@@ -142,19 +168,21 @@ def batchnormv3_test(g1):
...
@@ -142,19 +168,21 @@ def batchnormv3_test(g1):
with
g1
.
as_default
():
with
g1
.
as_default
():
g1_input
=
tf
.
compat
.
v1
.
placeholder
(
tf
.
float32
,
g1_input
=
tf
.
compat
.
v1
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
16
,
16
,
32
),
shape
=
(
1
,
16
,
16
,
32
),
name
=
'0'
)
name
=
'x'
)
g1_scale
=
tf
.
constant
(
1.0
,
dtype
=
tf
.
float32
,
shape
=
[
32
],
name
=
'1'
)
g1_scale
=
tf
.
constant
(
1.0
,
dtype
=
tf
.
float32
,
shape
=
[
32
],
name
=
'scale'
)
g1_offset
=
tf
.
compat
.
v1
.
placeholder
(
tf
.
float32
,
shape
=
(
32
),
name
=
'2'
)
g1_offset
=
tf
.
compat
.
v1
.
placeholder
(
tf
.
float32
,
g1_mean
=
tf
.
compat
.
v1
.
placeholder
(
tf
.
float32
,
shape
=
(
32
),
name
=
'3'
)
shape
=
(
32
),
name
=
'bias'
)
g1_mean
=
tf
.
compat
.
v1
.
placeholder
(
tf
.
float32
,
shape
=
(
32
),
name
=
'mean'
)
g1_variance
=
tf
.
compat
.
v1
.
placeholder
(
tf
.
float32
,
g1_variance
=
tf
.
compat
.
v1
.
placeholder
(
tf
.
float32
,
shape
=
(
32
),
shape
=
(
32
),
name
=
'
4
'
)
name
=
'
variance
'
)
tf
.
raw_ops
.
FusedBatchNormV3
(
x
=
g1_input
,
tf
.
raw_ops
.
FusedBatchNormV3
(
x
=
g1_input
,
scale
=
g1_scale
,
scale
=
g1_scale
,
offset
=
g1_offset
,
offset
=
g1_offset
,
mean
=
g1_mean
,
mean
=
g1_mean
,
variance
=
g1_variance
,
variance
=
g1_variance
,
epsilon
=
0.00001
,
epsilon
=
1e-6
,
is_training
=
False
,
is_training
=
False
,
name
=
'batchnorm1'
)
name
=
'batchnorm1'
)
...
...
test/tf/tf_test.cpp
View file @
41d4e92b
...
@@ -24,6 +24,7 @@
...
@@ -24,6 +24,7 @@
#include <iostream>
#include <iostream>
#include <vector>
#include <vector>
#include <unordered_map>
#include <unordered_map>
#include <migraphx/common.hpp>
#include <migraphx/literal.hpp>
#include <migraphx/literal.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/simplify_reshapes.hpp>
#include <migraphx/simplify_reshapes.hpp>
...
@@ -33,7 +34,6 @@
...
@@ -33,7 +34,6 @@
#include <migraphx/instruction.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/tf.hpp>
#include <migraphx/tf.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/batch_norm_inference.hpp>
#include <migraphx/op/convolution.hpp>
#include <migraphx/op/convolution.hpp>
#include <migraphx/op/reduce_mean.hpp>
#include <migraphx/op/reduce_mean.hpp>
#include <migraphx/op/pooling.hpp>
#include <migraphx/op/pooling.hpp>
...
@@ -186,50 +186,94 @@ TEST_CASE(batchmatmul_test)
...
@@ -186,50 +186,94 @@ TEST_CASE(batchmatmul_test)
TEST_CASE
(
batchnorm_test
)
TEST_CASE
(
batchnorm_test
)
{
{
float
epsilon
=
1.001e-5
f
;
float
momentum
=
0.9
f
;
migraphx
::
program
p
;
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
op
::
batch_norm_inference
op
{
epsilon
,
momentum
,
migraphx
::
op
::
batch_norm_inference
::
spatial
};
auto
x
=
mm
->
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
32
,
16
,
16
}});
migraphx
::
shape
s0
{
migraphx
::
shape
::
float_type
,
{
32
}};
auto
bias
=
mm
->
add_parameter
(
"bias"
,
{
migraphx
::
shape
::
float_type
,
{
32
}});
auto
l0
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
32
,
16
,
16
}});
auto
mean
=
mm
->
add_parameter
(
"mean"
,
{
migraphx
::
shape
::
float_type
,
{
32
}});
std
::
vector
<
float
>
const_vals
(
32
);
auto
var
=
mm
->
add_parameter
(
"variance"
,
{
migraphx
::
shape
::
float_type
,
{
32
}});
std
::
fill
(
const_vals
.
begin
(),
const_vals
.
end
(),
1.0
f
);
std
::
vector
<
float
>
scale_data
(
32
,
1.0
);
auto
l2
=
mm
->
add_parameter
(
"2"
,
s0
);
auto
scale
=
mm
->
add_literal
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
32
}},
scale_data
);
auto
l3
=
mm
->
add_parameter
(
"3"
,
s0
);
auto
rt
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
float_type
,
{
0.5
}});
auto
l4
=
mm
->
add_parameter
(
"4"
,
s0
);
auto
eps
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
float_type
,
{
1e-4
f
}});
auto
l1
=
mm
->
add_literal
(
migraphx
::
literal
{
s0
,
const_vals
});
mm
->
add_instruction
(
op
,
l0
,
l1
,
l2
,
l3
,
l4
);
auto
usq_scale
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
scale
);
auto
usq_bias
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
bias
);
auto
usq_mean
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
mean
);
auto
usq_var
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
var
);
auto
numer
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"sub"
),
{
x
,
usq_mean
});
auto
var_eps
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"add"
),
{
usq_var
,
eps
});
auto
denom
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"pow"
),
{
var_eps
,
rt
});
auto
div0
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"div"
),
{
numer
,
denom
});
auto
r0
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"mul"
),
{
div0
,
usq_scale
});
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"add"
),
{
r0
,
usq_bias
});
auto
prog
=
optimize_tf
(
"batchnorm_test.pb"
,
true
);
auto
prog
=
optimize_tf
(
"batchnorm_test.pb"
,
true
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
batchnorm_half_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
{
migraphx
::
shape
::
half_type
,
{
1
,
32
,
16
,
16
}});
auto
bias
=
mm
->
add_parameter
(
"bias"
,
{
migraphx
::
shape
::
float_type
,
{
32
}});
auto
mean
=
mm
->
add_parameter
(
"mean"
,
{
migraphx
::
shape
::
float_type
,
{
32
}});
auto
var
=
mm
->
add_parameter
(
"variance"
,
{
migraphx
::
shape
::
float_type
,
{
32
}});
std
::
vector
<
float
>
scale_data
(
32
,
1.0
);
auto
scale
=
mm
->
add_literal
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
32
}},
scale_data
);
auto
rt
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
half_type
,
{
0.5
}});
auto
eps
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
half_type
,
{
1e-4
f
}});
auto
usq_scale
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
scale
);
auto
usq_bias
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
bias
);
auto
usq_mean
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
mean
);
auto
usq_var
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
var
);
auto
numer
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"sub"
),
{
x
,
usq_mean
});
auto
var_eps
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"add"
),
{
usq_var
,
eps
});
auto
denom
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"pow"
),
{
var_eps
,
rt
});
auto
div0
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"div"
),
{
numer
,
denom
});
auto
r0
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"mul"
),
{
div0
,
usq_scale
});
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"add"
),
{
r0
,
usq_bias
});
auto
prog
=
optimize_tf
(
"batchnorm_half_test.pb"
,
true
);
EXPECT
(
p
==
prog
);
EXPECT
(
p
==
prog
);
}
}
TEST_CASE
(
batchnormv3_test
)
TEST_CASE
(
batchnormv3_test
)
{
{
float
epsilon
=
1.0e-5
f
;
float
momentum
=
0.9
f
;
migraphx
::
program
p
;
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
op
::
batch_norm_inference
op
{
epsilon
,
momentum
,
migraphx
::
op
::
batch_norm_inference
::
spatial
};
migraphx
::
shape
s0
{
migraphx
::
shape
::
float_type
,
{
32
}};
auto
l0
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
32
,
16
,
16
}});
std
::
vector
<
float
>
const_vals
(
32
);
std
::
fill
(
const_vals
.
begin
(),
const_vals
.
end
(),
1.0
f
);
auto
l2
=
mm
->
add_parameter
(
"2"
,
s0
);
auto
l3
=
mm
->
add_parameter
(
"3"
,
s0
);
auto
l4
=
mm
->
add_parameter
(
"4"
,
s0
);
auto
l1
=
mm
->
add_literal
(
migraphx
::
literal
{
s0
,
const_vals
});
mm
->
add_instruction
(
op
,
l0
,
l1
,
l2
,
l3
,
l4
);
auto
prog
=
optimize_tf
(
"batchnormv3_test.pb"
,
true
);
auto
x
=
mm
->
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
32
,
16
,
16
}});
auto
bias
=
mm
->
add_parameter
(
"bias"
,
{
migraphx
::
shape
::
float_type
,
{
32
}});
auto
mean
=
mm
->
add_parameter
(
"mean"
,
{
migraphx
::
shape
::
float_type
,
{
32
}});
auto
var
=
mm
->
add_parameter
(
"variance"
,
{
migraphx
::
shape
::
float_type
,
{
32
}});
std
::
vector
<
float
>
scale_data
(
32
,
1.0
);
auto
scale
=
mm
->
add_literal
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
32
}},
scale_data
);
auto
rt
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
float_type
,
{
0.5
}});
auto
eps
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
float_type
,
{
1e-6
f
}});
auto
usq_scale
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
scale
);
auto
usq_bias
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
bias
);
auto
usq_mean
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
mean
);
auto
usq_var
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
var
);
auto
numer
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"sub"
),
{
x
,
usq_mean
});
auto
var_eps
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"add"
),
{
usq_var
,
eps
});
auto
denom
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"pow"
),
{
var_eps
,
rt
});
auto
div0
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"div"
),
{
numer
,
denom
});
auto
r0
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"mul"
),
{
div0
,
usq_scale
});
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"add"
),
{
r0
,
usq_bias
});
auto
prog
=
optimize_tf
(
"batchnormv3_test.pb"
,
true
);
EXPECT
(
p
==
prog
);
EXPECT
(
p
==
prog
);
}
}
...
@@ -327,10 +371,9 @@ migraphx::program create_conv()
...
@@ -327,10 +371,9 @@ migraphx::program create_conv()
mm
->
add_literal
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
3
,
3
,
3
,
32
}},
weight_data
);
mm
->
add_literal
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
3
,
3
,
3
,
32
}},
weight_data
);
migraphx
::
op
::
convolution
op
;
migraphx
::
op
::
convolution
op
;
op
.
padding_mode
=
migraphx
::
op
::
padding_mode_t
::
same
;
op
.
padding
=
{
1
,
1
,
1
,
1
};
op
.
padding
=
{
1
,
1
,
1
,
1
};
op
.
stride
=
{
1
,
1
};
op
.
stride
=
{
1
,
1
};
op
.
dilation
=
{
1
,
1
};
op
.
dilation
=
{
1
,
1
};
auto
l2
=
auto
l2
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
3
,
2
,
0
,
1
}}}),
l1
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
3
,
2
,
0
,
1
}}}),
l1
);
mm
->
add_instruction
(
op
,
l0
,
l2
);
mm
->
add_instruction
(
op
,
l0
,
l2
);
...
@@ -406,11 +449,10 @@ TEST_CASE(depthwiseconv_test)
...
@@ -406,11 +449,10 @@ TEST_CASE(depthwiseconv_test)
mm
->
add_literal
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
3
,
3
,
3
,
1
}},
weight_data
);
mm
->
add_literal
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
3
,
3
,
3
,
1
}},
weight_data
);
migraphx
::
op
::
convolution
op
;
migraphx
::
op
::
convolution
op
;
op
.
padding_mode
=
migraphx
::
op
::
padding_mode_t
::
same
;
op
.
padding
=
{
1
,
1
};
op
.
padding
=
{
1
,
1
};
op
.
stride
=
{
1
,
1
};
op
.
stride
=
{
1
,
1
};
op
.
dilation
=
{
1
,
1
};
op
.
dilation
=
{
1
,
1
};
op
.
group
=
3
;
op
.
group
=
3
;
auto
l3
=
auto
l3
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
3
,
2
,
0
,
1
}}}),
l1
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
3
,
2
,
0
,
1
}}}),
l1
);
auto
l4
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
l3
);
auto
l4
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
l3
);
...
...
test/verify/quant_conv_default_mode.cpp
View file @
41d4e92b
...
@@ -37,10 +37,7 @@ struct quant_conv_default_mode : verify_program<quant_conv_default_mode>
...
@@ -37,10 +37,7 @@ struct quant_conv_default_mode : verify_program<quant_conv_default_mode>
auto
pa
=
mm
->
add_parameter
(
"a"
,
a_shape
);
auto
pa
=
mm
->
add_parameter
(
"a"
,
a_shape
);
migraphx
::
shape
c_shape
{
migraphx
::
shape
::
int8_type
,
{
2
,
3
,
3
,
3
}};
migraphx
::
shape
c_shape
{
migraphx
::
shape
::
int8_type
,
{
2
,
3
,
3
,
3
}};
auto
pc
=
mm
->
add_parameter
(
"c"
,
c_shape
);
auto
pc
=
mm
->
add_parameter
(
"c"
,
c_shape
);
mm
->
add_instruction
(
mm
->
add_instruction
(
migraphx
::
op
::
quant_convolution
{{{
0
,
0
}},
{{
1
,
1
}},
{{
1
,
1
}}},
pa
,
pc
);
migraphx
::
op
::
quant_convolution
{{{
0
,
0
}},
{{
1
,
1
}},
{{
1
,
1
}},
migraphx
::
op
::
same
},
pa
,
pc
);
return
p
;
return
p
;
}
}
};
};
test/verify/quant_conv_int8x4_default.cpp
View file @
41d4e92b
...
@@ -37,10 +37,7 @@ struct quant_conv_int8x4_default : verify_program<quant_conv_int8x4_default>
...
@@ -37,10 +37,7 @@ struct quant_conv_int8x4_default : verify_program<quant_conv_int8x4_default>
auto
pa
=
mm
->
add_parameter
(
"a"
,
a_shape
);
auto
pa
=
mm
->
add_parameter
(
"a"
,
a_shape
);
migraphx
::
shape
c_shape
{
migraphx
::
shape
::
int8_type
,
{
16
,
16
,
3
,
3
}};
migraphx
::
shape
c_shape
{
migraphx
::
shape
::
int8_type
,
{
16
,
16
,
3
,
3
}};
auto
pc
=
mm
->
add_parameter
(
"c"
,
c_shape
);
auto
pc
=
mm
->
add_parameter
(
"c"
,
c_shape
);
mm
->
add_instruction
(
mm
->
add_instruction
(
migraphx
::
op
::
quant_convolution
{{{
0
,
0
}},
{{
1
,
1
}},
{{
1
,
1
}}},
pa
,
pc
);
migraphx
::
op
::
quant_convolution
{{{
0
,
0
}},
{{
1
,
1
}},
{{
1
,
1
}},
migraphx
::
op
::
same
},
pa
,
pc
);
return
p
;
return
p
;
}
}
};
};
test/verify/quant_conv_valid_mode.cpp
deleted
100644 → 0
View file @
a8eb886b
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/op/quant_convolution.hpp>
struct
quant_conv_valid_mode
:
verify_program
<
quant_conv_valid_mode
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
int8_type
,
{
2
,
3
,
4
,
4
}};
auto
pa
=
mm
->
add_parameter
(
"a"
,
a_shape
);
migraphx
::
shape
c_shape
{
migraphx
::
shape
::
int8_type
,
{
2
,
3
,
3
,
3
}};
auto
pc
=
mm
->
add_parameter
(
"c"
,
c_shape
);
mm
->
add_instruction
(
migraphx
::
op
::
quant_convolution
{{{
0
,
0
}},
{{
1
,
1
}},
{{
1
,
1
}},
migraphx
::
op
::
valid
},
pa
,
pc
);
return
p
;
}
};
Prev
1
2
3
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment