Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
23851d62
Commit
23851d62
authored
Oct 19, 2022
by
Khalique Ahmed
Browse files
Merge branch 'develop' of
https://github.com/ROCmSoftwarePlatform/AMDMIGraphX
into develop
parents
41d4e92b
5fa42993
Changes
26
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
160 additions
and
18 deletions
+160
-18
test/gpu/mlir.cpp
test/gpu/mlir.cpp
+1
-1
test/onnx/batch_norm_rank_2_test.onnx
test/onnx/batch_norm_rank_2_test.onnx
+32
-0
test/onnx/gen_onnx.py
test/onnx/gen_onnx.py
+18
-17
test/onnx/onnx_test.cpp
test/onnx/onnx_test.cpp
+25
-0
test/onnx/verify_onnx.cpp
test/onnx/verify_onnx.cpp
+37
-0
test/verify/quant_conv_1d.cpp
test/verify/quant_conv_1d.cpp
+47
-0
No files found.
test/gpu/mlir.cpp
View file @
23851d62
...
@@ -84,7 +84,7 @@ migraphx::program create_program_from_mlir(const migraphx::module& mmlir)
...
@@ -84,7 +84,7 @@ migraphx::program create_program_from_mlir(const migraphx::module& mmlir)
inputs
.
push_back
(
mm
->
add_parameter
(
"output"
,
mmlir
.
get_output_shapes
().
front
()));
inputs
.
push_back
(
mm
->
add_parameter
(
"output"
,
mmlir
.
get_output_shapes
().
front
()));
migraphx
::
gpu
::
context
ctx
;
migraphx
::
gpu
::
context
ctx
;
migraphx
::
gpu
::
insert_mlir
(
*
mm
,
mm
->
end
(),
compile_mlir
(
ctx
,
mmlir
),
inputs
);
migraphx
::
gpu
::
insert_mlir
(
*
mm
,
mm
->
end
(),
compile_mlir
(
ctx
,
mmlir
,
inputs
),
inputs
);
return
p
;
return
p
;
}
}
...
...
test/onnx/batch_norm_
invalid_
rank_test.onnx
→
test/onnx/batch_norm_rank_
2_
test.onnx
View file @
23851d62
batch_norm_
invalid_
rank_test:
batch_norm_rank_
2_
test:
7
J
x
x
scale
scale
bias
bias
mean
mean
variancey"BatchNormalizationbatch_norm_invalid_rank_testZ
variancey"BatchNormalization*
epsilon75batch_norm_rank_2_testZ
x
x
Z
Z
scale
scale
Z
Z
bias
bias
Z
Z
mean
mean
Z
Z
variance
variance
b
b
y
y
B
B
\ No newline at end of file
\ No newline at end of file
test/onnx/gen_onnx.py
View file @
23851d62
...
@@ -331,6 +331,24 @@ def batch_norm_flat_test():
...
@@ -331,6 +331,24 @@ def batch_norm_flat_test():
return
([
node
],
[
x
,
scale
,
bias
,
mean
,
var
],
[
out
])
return
([
node
],
[
x
,
scale
,
bias
,
mean
,
var
],
[
out
])
@
onnx_test
def
batch_norm_rank_2_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
2
,
5
])
scale
=
helper
.
make_tensor_value_info
(
'scale'
,
TensorProto
.
FLOAT
,
[
5
])
bias
=
helper
.
make_tensor_value_info
(
'bias'
,
TensorProto
.
FLOAT
,
[
5
])
mean
=
helper
.
make_tensor_value_info
(
'mean'
,
TensorProto
.
FLOAT
,
[
5
])
var
=
helper
.
make_tensor_value_info
(
'variance'
,
TensorProto
.
FLOAT
,
[
5
])
out
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
2
,
5
])
node
=
onnx
.
helper
.
make_node
(
'BatchNormalization'
,
inputs
=
[
'x'
,
'scale'
,
'bias'
,
'mean'
,
'variance'
],
outputs
=
[
'y'
],
epsilon
=
1e-6
)
return
([
node
],
[
x
,
scale
,
bias
,
mean
,
var
],
[
out
])
@
onnx_test
@
onnx_test
def
batch_norm_1d_test
():
def
batch_norm_1d_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT16
,
[
2
,
3
,
4
])
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT16
,
[
2
,
3
,
4
])
...
@@ -385,23 +403,6 @@ def batch_norm_3d_test():
...
@@ -385,23 +403,6 @@ def batch_norm_3d_test():
return
([
node
],
[
x
,
scale
,
bias
,
mean
,
var
],
[
out
])
return
([
node
],
[
x
,
scale
,
bias
,
mean
,
var
],
[
out
])
@
onnx_test
def
batch_norm_invalid_rank_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
8
,
8
])
scale
=
helper
.
make_tensor_value_info
(
'scale'
,
TensorProto
.
FLOAT
,
[
8
])
bias
=
helper
.
make_tensor_value_info
(
'bias'
,
TensorProto
.
FLOAT
,
[
8
])
mean
=
helper
.
make_tensor_value_info
(
'mean'
,
TensorProto
.
FLOAT
,
[
8
])
var
=
helper
.
make_tensor_value_info
(
'variance'
,
TensorProto
.
FLOAT
,
[
8
])
out
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
8
,
8
])
node
=
onnx
.
helper
.
make_node
(
'BatchNormalization'
,
inputs
=
[
'x'
,
'scale'
,
'bias'
,
'mean'
,
'variance'
],
outputs
=
[
'y'
])
return
([
node
],
[
x
,
scale
,
bias
,
mean
,
var
],
[
out
])
@
onnx_test
@
onnx_test
def
batch_norm_invalid_bias_rank_test
():
def
batch_norm_invalid_bias_rank_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
2
,
3
,
4
,
4
])
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
2
,
3
,
4
,
4
])
...
...
test/onnx/onnx_test.cpp
View file @
23851d62
...
@@ -394,6 +394,31 @@ TEST_CASE(batch_norm_flat_test)
...
@@ -394,6 +394,31 @@ TEST_CASE(batch_norm_flat_test)
EXPECT
(
p
==
prog
);
EXPECT
(
p
==
prog
);
}
}
TEST_CASE
(
batch_norm_rank_2_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
2
,
5
}});
auto
scale
=
mm
->
add_parameter
(
"scale"
,
{
migraphx
::
shape
::
float_type
,
{
5
}});
auto
bias
=
mm
->
add_parameter
(
"bias"
,
{
migraphx
::
shape
::
float_type
,
{
5
}});
auto
mean
=
mm
->
add_parameter
(
"mean"
,
{
migraphx
::
shape
::
float_type
,
{
5
}});
auto
var
=
mm
->
add_parameter
(
"variance"
,
{
migraphx
::
shape
::
float_type
,
{
5
}});
auto
rt
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
float_type
,
{
0.5
}});
auto
eps
=
mm
->
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
::
float_type
,
{
1e-6
f
}});
auto
numer
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"sub"
),
{
x
,
mean
});
auto
var_eps
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"add"
),
{
var
,
eps
});
auto
denom
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"pow"
),
{
var_eps
,
rt
});
auto
div0
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"div"
),
{
numer
,
denom
});
auto
r0
=
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"mul"
),
{
div0
,
scale
});
add_common_op
(
*
mm
,
migraphx
::
make_op
(
"add"
),
{
r0
,
bias
});
auto
prog
=
optimize_onnx
(
"batch_norm_rank_2_test.onnx"
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
batch_norm_1d_test
)
TEST_CASE
(
batch_norm_1d_test
)
{
{
migraphx
::
program
p
;
migraphx
::
program
p
;
...
...
test/onnx/verify_onnx.cpp
View file @
23851d62
...
@@ -115,6 +115,43 @@ TEST_CASE(batch_norm_flat_test)
...
@@ -115,6 +115,43 @@ TEST_CASE(batch_norm_flat_test)
EXPECT
(
migraphx
::
verify_range
(
result_vector
,
gold
));
EXPECT
(
migraphx
::
verify_range
(
result_vector
,
gold
));
}
}
TEST_CASE
(
batch_norm_rank_2_test
)
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"batch_norm_rank_2_test.onnx"
);
p
.
compile
(
migraphx
::
ref
::
target
{});
migraphx
::
shape
x_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
5
}};
migraphx
::
shape
c_shape
(
migraphx
::
shape
::
float_type
,
{
5
});
std
::
vector
<
float
>
x_data
=
{
1.
,
2.
,
3.
,
4.
,
5.
,
6.
,
7.
,
8.
,
9.
,
10.
};
std
::
vector
<
float
>
scale_data
(
5
,
1.
);
std
::
vector
<
float
>
bias_data
(
5
,
0.
);
std
::
vector
<
float
>
mean_data
=
{
1.
,
2.
,
1.
,
2.
,
1.
};
std
::
vector
<
float
>
variance_data
(
5
,
0.5
);
migraphx
::
parameter_map
params
;
params
[
"x"
]
=
migraphx
::
argument
(
x_shape
,
x_data
.
data
());
params
[
"scale"
]
=
migraphx
::
argument
(
c_shape
,
scale_data
.
data
());
params
[
"bias"
]
=
migraphx
::
argument
(
c_shape
,
bias_data
.
data
());
params
[
"mean"
]
=
migraphx
::
argument
(
c_shape
,
mean_data
.
data
());
params
[
"variance"
]
=
migraphx
::
argument
(
c_shape
,
variance_data
.
data
());
auto
result
=
p
.
eval
(
params
).
back
();
std
::
vector
<
float
>
result_vector
;
result
.
visit
([
&
](
auto
output
)
{
result_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
=
{
0.
,
0.
,
2.8284243
,
2.8284243
,
5.65684859
,
7.07106074
,
7.07106074
,
9.89948504
,
9.89948504
,
12.72790933
};
EXPECT
(
migraphx
::
verify_range
(
result_vector
,
gold
));
}
TEST_CASE
(
batch_norm_1d_test
)
TEST_CASE
(
batch_norm_1d_test
)
{
{
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"batch_norm_1d_test.onnx"
);
migraphx
::
program
p
=
migraphx
::
parse_onnx
(
"batch_norm_1d_test.onnx"
);
...
...
src/targets/gpu/include/migraphx/gpu/deconvolution.h
pp
→
test/verify/quant_conv_1d.c
pp
View file @
23851d62
...
@@ -21,47 +21,27 @@
...
@@ -21,47 +21,27 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
* THE SOFTWARE.
*/
*/
#ifndef MIGRAPHX_GUARD_RTGLIB_DECONVOLUTION_HPP
#define MIGRAPHX_GUARD_RTGLIB_DECONVOLUTION_HPP
#include <migraphx/shape.hpp>
#include "verify_program.hpp"
#include <migraphx/op/deconvolution.hpp>
#include <migraphx/program.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
namespace
migraphx
{
struct
quant_conv_1d
:
verify_program
<
quant_conv_1d
>
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
struct
context
;
struct
miopen_deconvolution
{
{
op
::
deconvolution
op
;
migraphx
::
program
create_program
()
const
shared
<
convolution_descriptor
>
cd
;
miopenConvFwdAlgorithm_t
algo
{};
uint64_t
solution_id
=
0
;
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
pack_join
(
op
::
deconvolution
::
reflect
(
self
.
op
,
f
),
pack
(
f
(
self
.
solution_id
,
"solution_id"
)));
}
std
::
string
name
()
const
{
return
"gpu::deconv"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
shape
find
(
context
&
ctx
,
const
shape
&
output_shape
,
std
::
vector
<
shape
>
inputs
);
void
finalize
(
context
&
ctx
,
const
shape
&
output_shape
,
std
::
vector
<
shape
>
inputs
);
std
::
ptrdiff_t
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
{
return
shapes
.
size
()
-
1
;
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
int8_type
,
{
2
,
3
,
4
}};
auto
pa
=
mm
->
add_parameter
(
"a"
,
a_shape
);
migraphx
::
shape
c_shape
{
migraphx
::
shape
::
int8_type
,
{
2
,
3
,
3
}};
auto
pc
=
mm
->
add_parameter
(
"c"
,
c_shape
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"quant_convolution"
,
{{
"padding"
,
{
0
}},
{
"stride"
,
{
1
}},
{
"dilation"
,
{
1
}}}),
pa
,
pc
);
return
p
;
}
}
};
};
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
#endif
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment