Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
9db8a28d
"tests/vscode:/vscode.git/clone" did not exist on "080438477f319149db4f09f3a8835dde23609f7a"
Commit
9db8a28d
authored
Oct 27, 2022
by
Paul
Browse files
Merge
parents
1f8aa24f
4b1c1c41
Changes
110
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
175 additions
and
228 deletions
+175
-228
test/verify/test_batchnorm_3d_per_actv.cpp
test/verify/test_batchnorm_3d_per_actv.cpp
+0
-68
test/verify/test_batchnorm_inference.cpp
test/verify/test_batchnorm_inference.cpp
+0
-53
test/verify/test_batchnorm_inference_2.cpp
test/verify/test_batchnorm_inference_2.cpp
+0
-53
test/verify/test_conv_bn.cpp
test/verify/test_conv_bn.cpp
+25
-4
test/verify/test_conv_bn_add.cpp
test/verify/test_conv_bn_add.cpp
+30
-13
test/verify/test_conv_bn_relu_pooling.cpp
test/verify/test_conv_bn_relu_pooling.cpp
+22
-2
test/verify/test_conv_bn_relu_pooling2.cpp
test/verify/test_conv_bn_relu_pooling2.cpp
+31
-13
test/verify/test_pad_large.cpp
test/verify/test_pad_large.cpp
+5
-3
test/verify/test_shape_alloc.cpp
test/verify/test_shape_alloc.cpp
+25
-4
tools/include/operation.hpp
tools/include/operation.hpp
+37
-15
No files found.
test/verify/test_batchnorm_3d_per_actv.cpp
deleted
100644 → 0
View file @
1f8aa24f
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/serialize.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/batch_norm_inference.hpp>
struct
test_batchnorm_3d_per_actv
:
verify_program
<
test_batchnorm_3d_per_actv
>
{
const
size_t
d1
=
2
;
const
size_t
d2
=
4
;
const
size_t
d3
=
5
;
const
size_t
channels
=
2
;
const
size_t
batches
=
3
;
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
batches
,
channels
,
d1
,
d2
,
d3
}};
migraphx
::
shape
vars
{
migraphx
::
shape
::
float_type
,
{
channels
,
d1
,
d2
,
d3
}};
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
scale
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
1
)));
auto
bias
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
2
)));
auto
mean
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
3
)));
auto
variance
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
4
)));
mm
->
add_instruction
(
migraphx
::
make_op
(
"batch_norm_inference"
,
{{
"epsilon"
,
1.0e-6
},
{
"momentum"
,
0.8
f
},
{
"bn_mode"
,
migraphx
::
to_value
(
migraphx
::
op
::
batch_norm_inference
::
per_activation
)}}),
x
,
scale
,
bias
,
mean
,
variance
);
return
p
;
}
};
test/verify/test_batchnorm_inference.cpp
deleted
100644 → 0
View file @
1f8aa24f
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct
test_batchnorm_inference
:
verify_program
<
test_batchnorm_inference
>
{
const
size_t
width
=
3
;
const
size_t
height
=
3
;
const
size_t
channels
=
3
;
const
size_t
batches
=
4
;
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
batches
,
channels
,
height
,
width
}};
migraphx
::
shape
vars
{
migraphx
::
shape
::
float_type
,
{
channels
}};
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
scale
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
1
)));
auto
bias
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
2
)));
auto
mean
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
3
)));
auto
variance
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
4
)));
mm
->
add_instruction
(
migraphx
::
make_op
(
"batch_norm_inference"
),
x
,
scale
,
bias
,
mean
,
variance
);
return
p
;
}
};
test/verify/test_batchnorm_inference_2.cpp
deleted
100644 → 0
View file @
1f8aa24f
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct
test_batchnorm_inference_2
:
verify_program
<
test_batchnorm_inference_2
>
{
const
size_t
width
=
14
;
const
size_t
height
=
14
;
const
size_t
channels
=
256
;
const
size_t
batches
=
1
;
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
batches
,
channels
,
height
,
width
}};
migraphx
::
shape
vars
{
migraphx
::
shape
::
float_type
,
{
channels
}};
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
scale
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
1
)));
auto
bias
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
2
)));
auto
mean
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
3
)));
auto
variance
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
4
)));
mm
->
add_instruction
(
migraphx
::
make_op
(
"batch_norm_inference"
),
x
,
scale
,
bias
,
mean
,
variance
);
return
p
;
}
};
test/verify/test_conv_bn.cpp
View file @
9db8a28d
...
...
@@ -26,6 +26,8 @@
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/common.hpp>
struct
test_conv_bn
:
verify_program
<
test_conv_bn
>
{
...
...
@@ -37,19 +39,38 @@ struct test_conv_bn : verify_program<test_conv_bn>
migraphx
::
shape
xs
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
224
,
224
}};
migraphx
::
shape
ws
{
migraphx
::
shape
::
float_type
,
{
64
,
3
,
7
,
7
}};
migraphx
::
shape
vars
{
migraphx
::
shape
::
float_type
,
{
64
}};
auto
x
=
mm
->
add_parameter
(
"x"
,
xs
);
auto
w
=
mm
->
add_parameter
(
"w"
,
ws
);
auto
x
=
mm
->
add_parameter
(
"x"
,
xs
);
auto
w
=
mm
->
add_parameter
(
"w"
,
ws
);
// non-symmetrical tiling
auto
conv
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"convolution"
,
{{
"padding"
,
{
3
,
3
}},
{
"stride"
,
{
2
,
2
}},
{
"dilation"
,
{
1
,
1
}}}),
x
,
w
);
auto
scale
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
1
)));
auto
bias
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
2
)));
auto
mean
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
3
)));
auto
variance
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
4
)));
mm
->
add_instruction
(
migraphx
::
make_op
(
"batch_norm_inference"
),
conv
,
scale
,
bias
,
mean
,
variance
);
auto
rt
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
float_type
,
{
0.5
}});
auto
eps
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
float_type
,
{
1e-5
f
}});
auto
usq_scale
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
scale
);
auto
usq_bias
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
bias
);
auto
usq_mean
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
mean
);
auto
usq_var
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
variance
);
auto
numer
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"sub"
),
{
conv
,
usq_mean
});
auto
var_eps
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"add"
),
{
usq_var
,
eps
});
auto
denom
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"pow"
),
{
var_eps
,
rt
});
auto
div0
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"div"
),
{
numer
,
denom
});
auto
r0
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"mul"
),
{
div0
,
usq_scale
});
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"add"
),
{
r0
,
usq_bias
});
return
p
;
}
};
test/verify/test_conv_bn_add.cpp
View file @
9db8a28d
...
...
@@ -26,21 +26,38 @@
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/common.hpp>
struct
test_conv_bn_add
:
verify_program
<
test_conv_bn_add
>
{
static
migraphx
::
instruction_ref
add_bn
(
migraphx
::
module
&
m
,
migraphx
::
instruction_ref
x
,
std
::
size_t
channels
,
std
::
size_t
seed
=
1
)
static
migraphx
::
instruction_ref
add_bn
(
migraphx
::
module
&
m
,
migraphx
::
instruction_ref
x
)
{
migraphx
::
shape
vars
{
migraphx
::
shape
::
float_type
,
{
channels
}};
auto
scale
=
m
.
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
1
+
seed
)));
auto
bias
=
m
.
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
2
+
seed
)));
auto
mean
=
m
.
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
3
+
seed
)));
auto
variance
=
m
.
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
4
+
seed
)));
return
m
.
add_instruction
(
migraphx
::
make_op
(
"batch_norm_inference"
),
x
,
scale
,
bias
,
mean
,
variance
);
auto
bn_lens
=
x
->
get_shape
().
lens
();
auto
c_len
=
bn_lens
.
at
(
1
);
migraphx
::
shape
vars
{
migraphx
::
shape
::
float_type
,
{
c_len
}};
auto
scale
=
m
.
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
1
+
c_len
)));
auto
bias
=
m
.
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
2
+
c_len
)));
auto
mean
=
m
.
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
3
+
c_len
)));
auto
variance
=
m
.
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
4
+
c_len
)));
auto
rt
=
m
.
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
float_type
,
{
0.5
}});
auto
eps
=
m
.
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
float_type
,
{
1e-5
f
}});
auto
usq_scale
=
m
.
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
scale
);
auto
usq_bias
=
m
.
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
bias
);
auto
usq_mean
=
m
.
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
mean
);
auto
usq_var
=
m
.
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
variance
);
auto
numer
=
add_common_op
(
m
,
migraphx
::
make_op
(
"sub"
),
{
x
,
usq_mean
});
auto
var_eps
=
add_common_op
(
m
,
migraphx
::
make_op
(
"add"
),
{
usq_var
,
eps
});
auto
denom
=
add_common_op
(
m
,
migraphx
::
make_op
(
"pow"
),
{
var_eps
,
rt
});
auto
div0
=
add_common_op
(
m
,
migraphx
::
make_op
(
"div"
),
{
numer
,
denom
});
auto
r0
=
add_common_op
(
m
,
migraphx
::
make_op
(
"mul"
),
{
div0
,
usq_scale
});
return
add_common_op
(
m
,
migraphx
::
make_op
(
"add"
),
{
r0
,
usq_bias
});
}
migraphx
::
program
create_program
()
const
...
...
@@ -57,10 +74,10 @@ struct test_conv_bn_add : verify_program<test_conv_bn_add>
{
migraphx
::
shape
::
float_type
,
{
ochannels
,
ichannels
,
1
,
1
}},
2
));
auto
relu1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"relu"
),
x
);
auto
conv1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"convolution"
),
relu1
,
w
);
auto
bn1
=
add_bn
(
*
mm
,
conv1
,
ochannels
,
1
);
auto
bn1
=
add_bn
(
*
mm
,
conv1
);
auto
relu2
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"relu"
),
y
);
auto
conv2
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"convolution"
),
relu2
,
v
);
auto
bn2
=
add_bn
(
*
mm
,
conv2
,
ochannels
,
1
);
auto
bn2
=
add_bn
(
*
mm
,
conv2
);
auto
sum
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
bn1
,
bn2
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"relu"
),
sum
);
return
p
;
...
...
test/verify/test_conv_bn_relu_pooling.cpp
View file @
9db8a28d
...
...
@@ -27,6 +27,8 @@
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/common.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/common.hpp>
struct
test_conv_bn_relu_pooling
:
verify_program
<
test_conv_bn_relu_pooling
>
{
...
...
@@ -49,8 +51,26 @@ struct test_conv_bn_relu_pooling : verify_program<test_conv_bn_relu_pooling>
auto
bias
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
2
)));
auto
mean
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
3
)));
auto
variance
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
4
)));
auto
bn
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"batch_norm_inference"
),
conv
,
scale
,
bias
,
mean
,
variance
);
auto
rt
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
float_type
,
{
0.5
}});
auto
eps
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
float_type
,
{
1e-5
f
}});
auto
usq_scale
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
scale
);
auto
usq_bias
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
bias
);
auto
usq_mean
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
mean
);
auto
usq_var
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
variance
);
auto
numer
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"sub"
),
{
conv
,
usq_mean
});
auto
var_eps
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"add"
),
{
usq_var
,
eps
});
auto
denom
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"pow"
),
{
var_eps
,
rt
});
auto
div0
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"div"
),
{
numer
,
denom
});
auto
r0
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"mul"
),
{
div0
,
usq_scale
});
auto
bn
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"add"
),
{
r0
,
usq_bias
});
auto
relu
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"relu"
),
bn
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
average
},
...
...
test/verify/test_conv_bn_relu_pooling2.cpp
View file @
9db8a28d
...
...
@@ -27,22 +27,40 @@
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/common.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/common.hpp>
struct
test_conv_bn_relu_pooling2
:
verify_program
<
test_conv_bn_relu_pooling2
>
{
static
migraphx
::
instruction_ref
add_bn
(
migraphx
::
program
&
p
,
migraphx
::
instruction_ref
x
,
std
::
size_t
channels
)
static
migraphx
::
instruction_ref
add_bn
(
migraphx
::
module
&
m
,
migraphx
::
instruction_ref
x
)
{
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
vars
{
migraphx
::
shape
::
float_type
,
{
channels
}};
auto
scale
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
1
+
channels
)));
auto
bias
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
2
+
channels
)));
auto
mean
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
3
+
channels
)));
auto
variance
=
mm
->
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
4
+
channels
)));
return
mm
->
add_instruction
(
migraphx
::
make_op
(
"batch_norm_inference"
),
x
,
scale
,
bias
,
mean
,
variance
);
auto
bn_lens
=
x
->
get_shape
().
lens
();
auto
c_len
=
bn_lens
.
at
(
1
);
migraphx
::
shape
vars
{
migraphx
::
shape
::
float_type
,
{
c_len
}};
auto
scale
=
m
.
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
1
+
c_len
)));
auto
bias
=
m
.
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
2
+
c_len
)));
auto
mean
=
m
.
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
3
+
c_len
)));
auto
variance
=
m
.
add_literal
(
migraphx
::
abs
(
migraphx
::
generate_literal
(
vars
,
4
+
c_len
)));
auto
rt
=
m
.
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
float_type
,
{
0.5
}});
auto
eps
=
m
.
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
float_type
,
{
1e-5
f
}});
auto
usq_scale
=
m
.
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
scale
);
auto
usq_bias
=
m
.
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
bias
);
auto
usq_mean
=
m
.
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
mean
);
auto
usq_var
=
m
.
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
1
,
2
}}}),
variance
);
auto
numer
=
add_common_op
(
m
,
migraphx
::
make_op
(
"sub"
),
{
x
,
usq_mean
});
auto
var_eps
=
add_common_op
(
m
,
migraphx
::
make_op
(
"add"
),
{
usq_var
,
eps
});
auto
denom
=
add_common_op
(
m
,
migraphx
::
make_op
(
"pow"
),
{
var_eps
,
rt
});
auto
div0
=
add_common_op
(
m
,
migraphx
::
make_op
(
"div"
),
{
numer
,
denom
});
auto
r0
=
add_common_op
(
m
,
migraphx
::
make_op
(
"mul"
),
{
div0
,
usq_scale
});
return
add_common_op
(
m
,
migraphx
::
make_op
(
"add"
),
{
r0
,
usq_bias
});
}
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
...
...
@@ -59,7 +77,7 @@ struct test_conv_bn_relu_pooling2 : verify_program<test_conv_bn_relu_pooling2>
{{
"padding"
,
{
0
,
0
}},
{
"stride"
,
{
1
,
1
}},
{
"dilation"
,
{
1
,
1
}}}),
x1
,
w1
);
auto
bn1
=
add_bn
(
p
,
conv1
,
2048
);
auto
bn1
=
add_bn
(
*
mm
,
conv1
);
auto
x2
=
mm
->
add_parameter
(
"x2"
,
xs2
);
auto
w2
=
mm
->
add_parameter
(
"w2"
,
ws2
);
auto
conv2
=
mm
->
add_instruction
(
...
...
@@ -67,7 +85,7 @@ struct test_conv_bn_relu_pooling2 : verify_program<test_conv_bn_relu_pooling2>
{{
"padding"
,
{
0
,
0
}},
{
"stride"
,
{
2
,
2
}},
{
"dilation"
,
{
1
,
1
}}}),
x2
,
w2
);
auto
bn2
=
add_bn
(
p
,
conv2
,
2048
);
auto
bn2
=
add_bn
(
*
mm
,
conv2
);
auto
add
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
bn1
,
bn2
);
auto
relu
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"relu"
),
add
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"pooling"
,
...
...
test/verify/test_
elu
.cpp
→
test/verify/test_
pad_large
.cpp
View file @
9db8a28d
...
...
@@ -27,14 +27,16 @@
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct
test_
elu
:
verify_program
<
test_
elu
>
struct
test_
pad_large
:
verify_program
<
test_
pad_large
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}});
mm
->
add_instruction
(
migraphx
::
make_op
(
"leaky_relu"
,
{{
"alpha"
,
1.0
}}),
x
);
migraphx
::
shape
s0
{
migraphx
::
shape
::
float_type
,
{
586
,
3
,
224
,
224
}};
std
::
vector
<
int64_t
>
pads0
=
{
0
,
0
,
1
,
1
,
0
,
0
,
1
,
1
};
auto
l0
=
mm
->
add_parameter
(
"x"
,
s0
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"pad"
,
{{
"pads"
,
pads0
}}),
l0
);
return
p
;
}
};
test/verify/test_
leaky_relu
.cpp
→
test/verify/test_
shape_alloc
.cpp
View file @
9db8a28d
...
...
@@ -21,20 +21,41 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/reduce_mean.hpp>
struct
test_leaky_relu
:
verify_program
<
test_leaky_relu
>
/**
* @brief test_shape_alloc sets up a situation that could lead to an exception "convolution: Shapes
* are not in standard layout" if a "replace_allocate" compiler pass is not followed with
* "adjust_allocation". The last transpose instruction generates a shape with a stride of 1 in
* the 2nd index, a non-standard layout that should be reallocated by adjust_allocation.
*/
struct
test_shape_alloc
:
verify_program
<
test_shape_alloc
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}});
mm
->
add_instruction
(
migraphx
::
make_op
(
"leaky_relu"
,
{{
"alpha"
,
0.01
}}),
x
);
auto
weights
=
mm
->
add_literal
(
migraphx
::
generate_literal
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
11
,
8
,
1
,
1
},
{
8
,
1
,
1
,
1
}}));
auto
x
=
mm
->
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
8
,
7
,
7
}});
auto
transpose1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
3
,
1
}}}),
x
);
// -> float_type, {1, 7, 7, 8}, {392, 7, 1, 49}
auto
reduce_ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_mean"
,
{{
"axes"
,
{
1
,
2
}}}),
transpose1
);
// -> float_type, {1, 1, 1, 8}, {8, 8, 8, 1}
auto
transpose2
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
3
,
1
,
2
}}}),
reduce_ins
);
// -> float_type, {1, 8, 1, 1}, {8, 1, 8, 8}
auto
conv_op
=
migraphx
::
make_op
(
"convolution"
);
mm
->
add_instruction
(
conv_op
,
transpose2
,
weights
);
return
p
;
}
};
tools/include/operation.hpp
View file @
9db8a28d
...
...
@@ -32,6 +32,8 @@
#include <utility>
#include <unordered_map>
#include <migraphx/reflect.hpp>
#include <migraphx/dyn_output.hpp>
#include <migraphx/functional.hpp>
#include <migraphx/streamutils.hpp>
#include <migraphx/normalize_attributes.hpp>
#include <migraphx/argument.hpp>
...
...
@@ -199,9 +201,12 @@ auto compute_op(rank<1>,
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
input
)
->
decltype
(
x
.
compute
(
auto_any_cast
(
ctx
),
output_shape
,
input
))
->
decltype
(
x
.
compute
(
auto_any_cast
(
ctx
),
make_compute_output_shape
(
pack
(
x
,
output_shape
,
input
)),
input
))
{
return
x
.
compute
(
auto_any_cast
(
ctx
),
output_shape
,
input
);
return
x
.
compute
(
auto_any_cast
(
ctx
),
make_compute_output_shape
(
pack
(
x
,
output_shape
,
input
)),
input
);
}
template
<
class
T
>
...
...
@@ -220,9 +225,9 @@ compute_op(const T& x, context& ctx, const shape& output_shape, const std::vecto
template
<
class
T
>
auto
compute_op
(
rank
<
1
>
,
const
T
&
x
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
input
)
->
decltype
(
x
.
compute
(
output_shape
,
input
))
->
decltype
(
x
.
compute
(
make_compute_output_shape
(
pack
(
x
,
output_shape
,
input
))
,
input
))
{
return
x
.
compute
(
output_shape
,
input
);
return
x
.
compute
(
make_compute_output_shape
(
pack
(
x
,
output_shape
,
input
))
,
input
);
}
template
<
class
T
>
...
...
@@ -244,9 +249,11 @@ auto compute_op(rank<1>,
const
shape
&
output
,
const
std
::
vector
<
argument
>&
inputs
,
const
std
::
vector
<
module_ref
>&
module_args
,
F
f
)
->
decltype
(
x
.
compute
(
output
,
inputs
,
module_args
,
f
))
F
f
)
->
decltype
(
x
.
compute
(
make_compute_output_shape
(
pack
(
x
,
output
,
inputs
)),
inputs
,
module_args
,
f
))
{
return
x
.
compute
(
output
,
inputs
,
module_args
,
f
);
return
x
.
compute
(
make_compute_output_shape
(
pack
(
x
,
output
,
inputs
))
,
inputs
,
module_args
,
f
);
}
template
<
class
T
,
class
F
>
...
...
@@ -278,9 +285,17 @@ auto compute_op(rank<4>,
const
shape
&
output
,
const
std
::
vector
<
argument
>&
inputs
,
const
std
::
vector
<
module_ref
>&
module_args
,
F
f
)
->
decltype
(
x
.
compute
(
auto_any_cast
(
ctx
),
output
,
inputs
,
module_args
,
f
))
F
f
)
->
decltype
(
x
.
compute
(
auto_any_cast
(
ctx
),
make_compute_output_shape
(
pack
(
x
,
output
,
inputs
)),
inputs
,
module_args
,
f
))
{
return
x
.
compute
(
auto_any_cast
(
ctx
),
output
,
inputs
,
module_args
,
f
);
return
x
.
compute
(
auto_any_cast
(
ctx
),
make_compute_output_shape
(
pack
(
x
,
output
,
inputs
)),
inputs
,
module_args
,
f
);
}
template
<
class
T
,
class
F
>
...
...
@@ -290,9 +305,11 @@ auto compute_op(rank<3>,
const
shape
&
output
,
const
std
::
vector
<
argument
>&
inputs
,
const
std
::
vector
<
module_ref
>&
module_args
,
F
f
)
->
decltype
(
x
.
compute
(
output
,
inputs
,
module_args
,
f
))
F
f
)
->
decltype
(
x
.
compute
(
make_compute_output_shape
(
pack
(
x
,
output
,
inputs
)),
inputs
,
module_args
,
f
))
{
return
x
.
compute
(
output
,
inputs
,
module_args
,
f
);
return
x
.
compute
(
make_compute_output_shape
(
pack
(
x
,
output
,
inputs
))
,
inputs
,
module_args
,
f
);
}
template
<
class
T
,
class
F
>
...
...
@@ -302,9 +319,10 @@ auto compute_op(rank<2>,
const
shape
&
output
,
const
std
::
vector
<
argument
>&
inputs
,
const
std
::
vector
<
module_ref
>&
,
F
)
->
decltype
(
x
.
compute
(
output
,
inputs
))
F
)
->
decltype
(
x
.
compute
(
make_compute_output_shape
(
pack
(
x
,
output
,
inputs
)),
inputs
))
{
return
x
.
compute
(
output
,
inputs
);
return
x
.
compute
(
make_compute_output_shape
(
pack
(
x
,
output
,
inputs
))
,
inputs
);
}
template
<
class
T
,
class
F
>
...
...
@@ -314,9 +332,12 @@ auto compute_op(rank<1>,
const
shape
&
output
,
const
std
::
vector
<
argument
>&
inputs
,
const
std
::
vector
<
module_ref
>&
,
F
)
->
decltype
(
x
.
compute
(
auto_any_cast
(
ctx
),
output
,
inputs
))
F
)
->
decltype
(
x
.
compute
(
auto_any_cast
(
ctx
),
make_compute_output_shape
(
pack
(
x
,
output
,
inputs
)),
inputs
))
{
return
x
.
compute
(
auto_any_cast
(
ctx
),
output
,
inputs
);
return
x
.
compute
(
auto_any_cast
(
ctx
),
make_compute_output_shape
(
pack
(
x
,
output
,
inputs
)),
inputs
);
}
template
<
class
T
,
class
F
>
...
...
@@ -348,7 +369,8 @@ auto is_context_free_op(rank<1>,
const
T
&
x
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
input
)
->
decltype
(
x
.
compute
(
output_shape
,
input
),
std
::
true_type
{});
->
decltype
(
x
.
compute
(
make_compute_output_shape
(
pack
(
x
,
output_shape
,
input
)),
input
),
std
::
true_type
{});
template
<
class
T
>
auto
is_context_free_op
(
rank
<
0
>
,
const
T
&
,
const
shape
&
,
const
std
::
vector
<
argument
>&
)
...
...
Prev
1
2
3
4
5
6
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment