Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
18cf0435
Unverified
Commit
18cf0435
authored
Apr 18, 2023
by
Umang Yadav
Committed by
GitHub
Apr 18, 2023
Browse files
Merge branch 'develop' into blas_tuning
parents
12258d8f
3e8d7196
Changes
81
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
766 additions
and
145 deletions
+766
-145
src/targets/ref/lowering.cpp
src/targets/ref/lowering.cpp
+2
-108
test/CMakeLists.txt
test/CMakeLists.txt
+2
-2
test/api/CMakeLists.txt
test/api/CMakeLists.txt
+3
-3
test/fuse_pointwise.cpp
test/fuse_pointwise.cpp
+32
-0
test/fuse_reduce.cpp
test/fuse_reduce.cpp
+330
-0
test/gpu/hip.cpp
test/gpu/hip.cpp
+20
-1
test/gpu/mlir.cpp
test/gpu/mlir.cpp
+33
-0
test/include/pointwise.hpp
test/include/pointwise.hpp
+10
-1
test/onnx/.onnxrt-commit
test/onnx/.onnxrt-commit
+1
-1
test/onnx/onnx_test.cpp
test/onnx/onnx_test.cpp
+19
-2
test/op_shape_test.cpp
test/op_shape_test.cpp
+83
-0
test/py/test_shape.py
test/py/test_shape.py
+10
-0
test/rewrite_quantization_test.cpp
test/rewrite_quantization_test.cpp
+17
-9
test/shape_test.cpp
test/shape_test.cpp
+43
-0
test/simplify_algebra_test.cpp
test/simplify_algebra_test.cpp
+28
-0
test/simplify_qdq_test.cpp
test/simplify_qdq_test.cpp
+10
-5
test/split_single_dyn_dim_test.cpp
test/split_single_dyn_dim_test.cpp
+66
-6
test/targets.cpp
test/targets.cpp
+5
-3
test/verify/test_add_conv_constant.cpp
test/verify/test_add_conv_constant.cpp
+45
-0
test/verify/test_quantizelinear_int32.cpp
test/verify/test_quantizelinear_int32.cpp
+7
-4
No files found.
src/targets/ref/lowering.cpp
View file @
18cf0435
...
@@ -132,109 +132,6 @@ auto visit_quantize(T&& x, Ts&&... xs)
...
@@ -132,109 +132,6 @@ auto visit_quantize(T&& x, Ts&&... xs)
};
};
}
}
template
<
class
Op
>
struct
ref_convolution
:
auto_register_op
<
ref_convolution
<
Op
>>
{
ref_convolution
()
=
default
;
ref_convolution
(
Op
pop
)
:
op
(
std
::
move
(
pop
))
{}
Op
op
;
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
migraphx
::
reflect
(
self
.
op
,
f
);
}
std
::
string
name
()
const
{
return
"ref::"
+
op
.
name
();
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
return
op
.
normalize_compute_shape
(
inputs
);
}
argument
compute
(
context
&
,
shape
output_shape
,
std
::
vector
<
argument
>
args
)
const
{
std
::
vector
<
std
::
size_t
>
padding
;
if
(
op
.
padding_mode
!=
op
::
padding_mode_t
::
default_
)
{
auto
input_lens
=
args
[
0
].
get_shape
().
lens
();
auto
weights_lens
=
args
[
1
].
get_shape
().
lens
();
padding
=
op
.
padding_mode
==
op
::
same_upper
?
calc_dyn_auto_pad
(
input_lens
,
weights_lens
,
op
.
stride
,
op
.
dilation
,
true
)
:
calc_dyn_auto_pad
(
input_lens
,
weights_lens
,
op
.
stride
,
op
.
dilation
,
false
);
output_shape
=
compute_padded_shape
(
args
[
0
].
get_shape
(),
args
[
1
].
get_shape
(),
padding
,
op
.
stride
,
op
.
dilation
);
}
else
{
padding
=
op
.
padding
;
if
(
output_shape
.
dynamic
())
{
output_shape
=
op
.
normalize_compute_shape
({
args
.
at
(
0
).
get_shape
(),
args
.
at
(
1
).
get_shape
()});
}
}
argument
result
{
output_shape
};
visit_quantize
(
result
,
args
[
0
],
args
[
1
])([
&
](
auto
output
,
auto
input
,
auto
weights
)
{
auto
in_lens
=
input
.
get_shape
().
lens
();
auto
wei_lens
=
weights
.
get_shape
().
lens
();
auto
wei_n
=
wei_lens
[
0
];
auto
wei_c
=
wei_lens
[
1
];
std
::
vector
<
std
::
size_t
>
win_size
(
wei_lens
.
begin
()
+
1
,
wei_lens
.
end
());
par_for
(
output_shape
.
elements
(),
[
&
](
auto
i
)
{
auto
idx_o
=
output_shape
.
multi
(
i
);
auto
w
=
idx_o
[
1
];
auto
n_dim
=
idx_o
.
size
();
std
::
vector
<
std
::
ptrdiff_t
>
win_start
;
for
(
std
::
size_t
dim
=
2
;
dim
<
n_dim
;
++
dim
)
{
auto
d_2
=
dim
-
2
;
win_start
.
push_back
(
std
::
ptrdiff_t
(
idx_o
[
dim
]
*
op
.
stride
[
d_2
])
-
std
::
ptrdiff_t
(
padding
[
d_2
]));
}
const
auto
group_id
=
w
/
(
wei_n
/
op
.
group
);
shape
win_shape
{
output_shape
.
type
(),
win_size
};
double
acc
=
0.0
;
shape_for_each
(
win_shape
,
[
&
](
auto
idx_win
)
{
auto
k
=
idx_win
[
0
];
const
auto
in_ch
=
group_id
*
wei_c
+
k
;
std
::
vector
<
std
::
ptrdiff_t
>
idx
(
idx_o
.
begin
(),
idx_o
.
end
());
idx
[
1
]
=
in_ch
;
std
::
transform
(
idx_win
.
begin
()
+
1
,
idx_win
.
end
(),
win_start
.
begin
(),
idx
.
begin
()
+
2
,
[](
std
::
ptrdiff_t
ii
,
std
::
ptrdiff_t
jj
)
{
return
ii
+
jj
;
});
std
::
vector
<
std
::
ptrdiff_t
>
idx_wei
(
idx_o
.
size
());
idx_wei
[
0
]
=
w
;
std
::
copy
(
idx_win
.
begin
(),
idx_win
.
end
(),
idx_wei
.
begin
()
+
1
);
if
(
std
::
all_of
(
idx
.
begin
()
+
2
,
idx
.
end
(),
[
&
](
auto
ii
)
{
return
ii
>=
0
;
})
and
std
::
equal
(
idx
.
begin
(),
idx
.
end
(),
in_lens
.
begin
(),
in_lens
.
end
(),
std
::
less
<
std
::
ptrdiff_t
>
{}))
{
acc
+=
input
(
idx
.
begin
(),
idx
.
end
())
*
weights
(
idx_wei
.
begin
(),
idx_wei
.
end
());
}
});
output
[
i
]
=
acc
;
});
});
return
result
;
}
};
struct
ref_im2col
struct
ref_im2col
{
{
op
::
im2col
op
;
op
::
im2col
op
;
...
@@ -564,11 +461,8 @@ struct ref_apply
...
@@ -564,11 +461,8 @@ struct ref_apply
void
init
()
void
init
()
{
{
apply_map
[
"convolution"
]
=
extend_op
<
ref_convolution
<
op
::
convolution
>
,
op
::
convolution
>
();
apply_map
[
"dot"
]
=
extend_op
<
ref_gemm
,
op
::
dot
>
();
apply_map
[
"dot"
]
=
extend_op
<
ref_gemm
,
op
::
dot
>
();
apply_map
[
"quant_dot"
]
=
extend_op
<
ref_quant_gemm
,
op
::
quant_dot
>
();
apply_map
[
"quant_dot"
]
=
extend_op
<
ref_quant_gemm
,
op
::
quant_dot
>
();
apply_map
[
"quant_convolution"
]
=
extend_op
<
ref_convolution
<
op
::
quant_convolution
>
,
op
::
quant_convolution
>
();
apply_map
[
"im2col"
]
=
extend_op
<
ref_im2col
,
op
::
im2col
>
();
apply_map
[
"im2col"
]
=
extend_op
<
ref_im2col
,
op
::
im2col
>
();
apply_map
[
"logsoftmax"
]
=
extend_op
<
ref_softmax
<
op
::
logsoftmax
>
,
op
::
logsoftmax
>
();
apply_map
[
"logsoftmax"
]
=
extend_op
<
ref_softmax
<
op
::
logsoftmax
>
,
op
::
logsoftmax
>
();
apply_map
[
"lrn"
]
=
extend_op
<
ref_lrn
,
op
::
lrn
>
();
apply_map
[
"lrn"
]
=
extend_op
<
ref_lrn
,
op
::
lrn
>
();
...
...
test/CMakeLists.txt
View file @
18cf0435
...
@@ -110,7 +110,7 @@ function(add_test_executable TEST_NAME)
...
@@ -110,7 +110,7 @@ function(add_test_executable TEST_NAME)
add_test_command
(
${
TEST_NAME
}
${
TEST_COMMAND
}
)
add_test_command
(
${
TEST_NAME
}
${
TEST_COMMAND
}
)
add_dependencies
(
tests
${
TEST_NAME
}
)
add_dependencies
(
tests
${
TEST_NAME
}
)
add_dependencies
(
check
${
TEST_NAME
}
)
add_dependencies
(
check
${
TEST_NAME
}
)
target_link_libraries
(
${
TEST_NAME
}
migraphx migraphx_onnx
)
target_link_libraries
(
${
TEST_NAME
}
migraphx migraphx_onnx
migraphx_ref
)
target_include_directories
(
${
TEST_NAME
}
PUBLIC include
)
target_include_directories
(
${
TEST_NAME
}
PUBLIC include
)
endfunction
(
add_test_executable
)
endfunction
(
add_test_executable
)
...
@@ -163,7 +163,7 @@ foreach(ONNX_TEST ${ONNX_TESTS})
...
@@ -163,7 +163,7 @@ foreach(ONNX_TEST ${ONNX_TESTS})
set
(
TEST_NAME test_
${
BASE_NAME
}
)
set
(
TEST_NAME test_
${
BASE_NAME
}
)
add_executable
(
${
TEST_NAME
}
${
ONNX_TEST
}
)
add_executable
(
${
TEST_NAME
}
${
ONNX_TEST
}
)
rocm_clang_tidy_check
(
${
TEST_NAME
}
)
rocm_clang_tidy_check
(
${
TEST_NAME
}
)
target_link_libraries
(
${
TEST_NAME
}
migraphx_onnx
)
target_link_libraries
(
${
TEST_NAME
}
migraphx_onnx
migraphx_ref
)
target_include_directories
(
${
TEST_NAME
}
PUBLIC include
)
target_include_directories
(
${
TEST_NAME
}
PUBLIC include
)
add_test
(
NAME
${
TEST_NAME
}
COMMAND $<TARGET_FILE:
${
TEST_NAME
}
> WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
add_test
(
NAME
${
TEST_NAME
}
COMMAND $<TARGET_FILE:
${
TEST_NAME
}
> WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
add_dependencies
(
tests
${
TEST_NAME
}
)
add_dependencies
(
tests
${
TEST_NAME
}
)
...
...
test/api/CMakeLists.txt
View file @
18cf0435
...
@@ -25,7 +25,7 @@ function(add_api_test TEST_NAME TEST_SRC TEST_DIR)
...
@@ -25,7 +25,7 @@ function(add_api_test TEST_NAME TEST_SRC TEST_DIR)
set
(
NAME test_api_
${
TEST_NAME
}
)
set
(
NAME test_api_
${
TEST_NAME
}
)
add_executable
(
${
NAME
}
EXCLUDE_FROM_ALL
${
TEST_SRC
}
)
add_executable
(
${
NAME
}
EXCLUDE_FROM_ALL
${
TEST_SRC
}
)
rocm_clang_tidy_check
(
${
NAME
}
)
rocm_clang_tidy_check
(
${
NAME
}
)
target_link_libraries
(
${
NAME
}
migraphx_c migraphx
)
target_link_libraries
(
${
NAME
}
migraphx_c migraphx
migraphx_all_targets
)
target_include_directories
(
${
NAME
}
PUBLIC ../include
)
target_include_directories
(
${
NAME
}
PUBLIC ../include
)
add_test
(
NAME
${
NAME
}
COMMAND $<TARGET_FILE:
${
NAME
}
> WORKING_DIRECTORY
${
TEST_DIR
}
)
add_test
(
NAME
${
NAME
}
COMMAND $<TARGET_FILE:
${
NAME
}
> WORKING_DIRECTORY
${
TEST_DIR
}
)
add_dependencies
(
tests
${
NAME
}
)
add_dependencies
(
tests
${
NAME
}
)
...
@@ -59,7 +59,7 @@ if(MIGRAPHX_ENABLE_GPU)
...
@@ -59,7 +59,7 @@ if(MIGRAPHX_ENABLE_GPU)
list
(
APPEND CMAKE_PREFIX_PATH /opt/rocm
)
list
(
APPEND CMAKE_PREFIX_PATH /opt/rocm
)
find_package
(
hip
)
find_package
(
hip
)
add_api_test
(
gpu test_gpu.cpp
${
TEST_ONNX_DIR
}
)
add_api_test
(
gpu test_gpu.cpp
${
TEST_ONNX_DIR
}
)
target_link_libraries
(
test_api_gpu
hip::host
)
target_link_libraries
(
test_api_gpu
)
add_api_test
(
custom_op_gpu test_custom_op_gpu.cpp
${
TEST_ONNX_DIR
}
)
add_api_test
(
custom_op_gpu test_custom_op_gpu.cpp
${
TEST_ONNX_DIR
}
)
target_link_libraries
(
test_api_custom_op_gpu
hip::host
)
target_link_libraries
(
test_api_custom_op_gpu
)
endif
()
endif
()
test/fuse_pointwise.cpp
View file @
18cf0435
...
@@ -329,4 +329,36 @@ TEST_CASE(all_scalar_input)
...
@@ -329,4 +329,36 @@ TEST_CASE(all_scalar_input)
EXPECT
(
p1
==
p2
);
EXPECT
(
p1
==
p2
);
}
}
TEST_CASE
(
no_input
)
{
migraphx
::
program
p
;
{
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
g_shape
{
migraphx
::
shape
::
int64_type
,
{
1
},
{
0
}};
migraphx
::
shape
s_indices
{
migraphx
::
shape
::
int32_type
,
{
3
}};
std
::
vector
<
int
>
indices
{
3
,
800
,
800
};
auto
a0
=
mm
->
add_literal
(
migraphx
::
literal
{
s_indices
,
indices
});
auto
a1
=
mm
->
add_literal
(
migraphx
::
literal
{
g_shape
,
{
1
}});
int
axis
=
0
;
auto
out
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"gather"
,
{{
"axis"
,
axis
}}),
a0
,
a1
);
mm
->
add_return
({
out
});
}
run_pass
(
p
);
// This should NOT create a pointwise module if there are no inputs here.
migraphx
::
program
p2
;
{
auto
*
mm
=
p2
.
get_main_module
();
migraphx
::
shape
g_shape
{
migraphx
::
shape
::
int64_type
,
{
1
},
{
0
}};
migraphx
::
shape
s_indices
{
migraphx
::
shape
::
int32_type
,
{
3
}};
std
::
vector
<
int
>
indices
{
3
,
800
,
800
};
auto
a0
=
mm
->
add_literal
(
migraphx
::
literal
{
s_indices
,
indices
});
auto
a1
=
mm
->
add_literal
(
migraphx
::
literal
{
g_shape
,
{
1
}});
int
axis
=
0
;
auto
out
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"gather"
,
{{
"axis"
,
axis
}}),
a0
,
a1
);
mm
->
add_return
({
out
});
}
EXPECT
(
p
==
p2
);
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/fuse_reduce.cpp
0 → 100644
View file @
18cf0435
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/fuse_reduce.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/program.hpp>
#include <basic_ops.hpp>
#include <migraphx/make_op.hpp>
#include <test.hpp>
#include <pointwise.hpp>
void
run_pass
(
migraphx
::
program
&
p
)
{
migraphx
::
run_passes
(
p
,
{
migraphx
::
fuse_reduce
{},
migraphx
::
dead_code_elimination
{}});
}
bool
all_instructions_are_local
(
const
migraphx
::
module
&
m
)
{
return
std
::
all_of
(
m
.
begin
(),
m
.
end
(),
[
&
](
const
auto
&
ins
)
{
return
std
::
all_of
(
ins
.
inputs
().
begin
(),
ins
.
inputs
().
end
(),
[
&
](
auto
input
)
{
return
m
.
has_instruction
(
input
);
});
});
}
template
<
class
F
>
migraphx
::
instruction_ref
add_reduce
(
migraphx
::
program
&
p
,
const
std
::
string
&
name
,
std
::
vector
<
migraphx
::
instruction_ref
>
inputs
,
const
std
::
vector
<
int64_t
>&
axes
,
F
f
)
{
auto
*
rm
=
p
.
create_module
(
name
);
auto
*
mm
=
p
.
get_main_module
();
rm
->
set_bypass
();
std
::
vector
<
migraphx
::
instruction_ref
>
params
;
std
::
transform
(
inputs
.
begin
(),
inputs
.
end
(),
std
::
back_inserter
(
params
),
[
&
](
auto
input
)
{
return
rm
->
add_parameter
(
"x"
+
std
::
to_string
(
params
.
size
()),
migraphx
::
shape
{
input
->
get_shape
().
type
(),
input
->
get_shape
().
lens
()});
});
auto
r
=
f
(
rm
,
params
,
axes
);
rm
->
add_return
({
r
});
EXPECT
(
all_instructions_are_local
(
*
rm
));
return
mm
->
add_instruction
(
migraphx
::
make_op
(
"fused_reduce"
,
{{
"axes"
,
axes
}}),
inputs
,
{
rm
});
}
inline
auto
single_reduce
(
const
std
::
string
&
name
)
{
return
[
=
](
auto
*
rm
,
const
auto
&
inputs
,
const
auto
&
axes
)
{
return
rm
->
add_instruction
(
migraphx
::
make_op
(
name
,
{{
"axes"
,
axes
}}),
inputs
);
};
}
TEST_CASE
(
single
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
migraphx
::
program
p1
;
{
auto
*
mm
=
p1
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
y
=
mm
->
add_parameter
(
"y"
,
s
);
auto
rsum1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
{
1
}}}),
x
);
auto
rsum2
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
{
1
}}}),
y
);
mm
->
add_return
({
rsum1
,
rsum2
});
}
run_pass
(
p1
);
migraphx
::
program
p2
;
{
auto
*
mm
=
p2
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
y
=
mm
->
add_parameter
(
"y"
,
s
);
auto
rsum1
=
add_reduce
(
p2
,
"main:reduce_sum0"
,
{
x
},
{
1
},
single_reduce
(
"reduce_sum"
));
auto
rsum2
=
add_reduce
(
p2
,
"main:reduce_sum1"
,
{
y
},
{
1
},
single_reduce
(
"reduce_sum"
));
mm
->
add_return
({
rsum1
,
rsum2
});
}
EXPECT
(
p1
==
p2
);
}
TEST_CASE
(
pointwise_reduce
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
migraphx
::
program
p1
;
{
auto
*
mm
=
p1
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
y
=
mm
->
add_parameter
(
"y"
,
s
);
auto
add
=
add_pointwise
(
p1
,
"main:pointwise0"
,
{
x
,
y
},
single_pointwise
(
"add"
));
auto
rsum
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
{
1
}}}),
add
);
mm
->
add_return
({
rsum
});
}
run_pass
(
p1
);
migraphx
::
program
p2
;
{
auto
*
mm
=
p2
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
y
=
mm
->
add_parameter
(
"y"
,
s
);
auto
rsum
=
add_reduce
(
p2
,
"main:pointwise0:main:reduce_sum0"
,
{
x
,
y
},
{
1
},
[
&
](
auto
*
rm
,
const
auto
&
inputs
,
const
auto
&
axes
)
{
auto
add
=
add_pointwise
(
p2
,
rm
,
"main:pointwise0"
,
inputs
,
single_pointwise
(
"add"
));
return
rm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
axes
}}),
add
);
});
mm
->
add_return
({
rsum
});
}
EXPECT
(
p1
==
p2
);
}
TEST_CASE
(
reduce_pointwise
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
migraphx
::
program
p1
;
{
auto
*
mm
=
p1
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
y
=
mm
->
add_parameter
(
"y"
,
s
);
auto
rsum
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
{
1
}}}),
x
);
auto
rsumb
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
s
.
lens
()}}),
rsum
);
auto
add
=
add_pointwise
(
p1
,
"main:pointwise0"
,
{
rsumb
,
y
},
single_pointwise
(
"add"
));
mm
->
add_return
({
add
});
}
run_pass
(
p1
);
migraphx
::
program
p2
;
{
auto
*
mm
=
p2
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
y
=
mm
->
add_parameter
(
"y"
,
s
);
auto
add
=
add_reduce
(
p2
,
"main:reduce_sum0:main:pointwise0"
,
{
x
,
y
},
{
1
},
[
&
](
auto
*
rm
,
const
auto
&
inputs
,
const
auto
&
axes
)
{
auto
rsum
=
rm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
axes
}}),
inputs
[
0
]);
auto
rsumb
=
rm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
s
.
lens
()}}),
rsum
);
return
add_pointwise
(
p2
,
rm
,
"main:pointwise0"
,
{
rsumb
,
inputs
[
1
]},
single_pointwise
(
"add"
));
});
mm
->
add_return
({
add
});
}
EXPECT
(
p1
==
p2
);
}
TEST_CASE
(
reduce_reduce
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
migraphx
::
program
p1
;
{
auto
*
mm
=
p1
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
rsum
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
{
1
}}}),
x
);
auto
rsumb
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
s
.
lens
()}}),
rsum
);
auto
rsumdiff
=
add_pointwise
(
p1
,
"main:pointwise0"
,
{
rsumb
,
x
},
single_pointwise
(
"sub"
));
auto
rsum2
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
{
1
}}}),
rsumdiff
);
auto
sqrt
=
add_pointwise
(
p1
,
"main:pointwise1"
,
{
rsum2
},
single_pointwise
(
"sqrt"
));
mm
->
add_return
({
sqrt
});
}
run_pass
(
p1
);
migraphx
::
program
p2
;
{
auto
*
mm
=
p2
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
sqrt
=
add_reduce
(
p2
,
"main:reduce_sum1:main:reduce_sum0:main:pointwise0:main:pointwise1"
,
{
x
},
{
1
},
[
&
](
auto
*
rm
,
const
auto
&
inputs
,
const
auto
&
axes
)
{
auto
rsum
=
rm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
axes
}}),
inputs
[
0
]);
auto
rsumb
=
rm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
s
.
lens
()}}),
rsum
);
auto
rsumdiff
=
add_pointwise
(
p2
,
rm
,
"main:pointwise0"
,
{
rsumb
,
inputs
[
0
]},
single_pointwise
(
"sub"
));
auto
rsum2
=
rm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
axes
}}),
rsumdiff
);
return
add_pointwise
(
p2
,
rm
,
"main:pointwise1"
,
{
rsum2
},
single_pointwise
(
"sqrt"
));
});
mm
->
add_return
({
sqrt
});
}
EXPECT
(
p1
==
p2
);
}
TEST_CASE
(
reduce_reduce_mismatch_axis
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
4
,
2
,
3
}};
migraphx
::
program
p1
;
{
auto
*
mm
=
p1
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
rsum1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
{
1
}}}),
x
);
auto
rsum2
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
{
2
}}}),
rsum1
);
mm
->
add_return
({
rsum2
});
}
run_pass
(
p1
);
migraphx
::
program
p2
;
{
auto
*
mm
=
p2
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
rsum1
=
add_reduce
(
p2
,
"main:reduce_sum0"
,
{
x
},
{
1
},
single_reduce
(
"reduce_sum"
));
auto
rsum2
=
add_reduce
(
p2
,
"main:reduce_sum1"
,
{
rsum1
},
{
2
},
single_reduce
(
"reduce_sum"
));
mm
->
add_return
({
rsum2
});
}
EXPECT
(
p1
==
p2
);
}
TEST_CASE
(
pointwise_reduce_broadcast
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
migraphx
::
program
p1
;
{
auto
*
mm
=
p1
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
rsum1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
{
1
}}}),
x
);
auto
sqrt
=
add_pointwise
(
p1
,
"main:pointwise0"
,
{
rsum1
},
single_pointwise
(
"sqrt"
));
auto
sqrtb
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
s
.
lens
()}}),
sqrt
);
auto
add1
=
add_pointwise
(
p1
,
"main:pointwise1"
,
{
sqrtb
,
x
},
single_pointwise
(
"add"
));
auto
rsum2
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
{
1
}}}),
add1
);
auto
add2
=
add_pointwise
(
p1
,
"main:pointwise2"
,
{
rsum2
,
rsum1
},
single_pointwise
(
"add"
));
mm
->
add_return
({
add2
});
}
run_pass
(
p1
);
migraphx
::
program
p2
;
{
auto
*
mm
=
p2
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
add2
=
add_reduce
(
p2
,
"main:pointwise0:main:pointwise1:main:reduce_sum1:main:pointwise2:main:reduce_sum0"
,
{
x
},
{
1
},
[
&
](
auto
*
rm
,
const
auto
&
inputs
,
const
auto
&
axes
)
{
auto
rsum1
=
rm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
axes
}}),
inputs
[
0
]);
auto
sqrt
=
add_pointwise
(
p2
,
rm
,
"main:pointwise0"
,
{
rsum1
},
single_pointwise
(
"sqrt"
));
auto
sqrtb
=
rm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
s
.
lens
()}}),
sqrt
);
auto
add1
=
add_pointwise
(
p2
,
rm
,
"main:pointwise1"
,
{
sqrtb
,
inputs
[
0
]},
single_pointwise
(
"add"
));
auto
rsum2
=
rm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
axes
}}),
add1
);
return
add_pointwise
(
p2
,
rm
,
"main:pointwise2"
,
{
rsum2
,
rsum1
},
single_pointwise
(
"add"
));
});
mm
->
add_return
({
add2
});
}
EXPECT
(
p1
==
p2
);
}
TEST_CASE
(
reduce_reduce_broadcast
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
4
,
2
,
3
}};
migraphx
::
program
p1
;
{
auto
*
mm
=
p1
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
rsum1
=
add_reduce
(
p1
,
"test:reduce_sum0"
,
{
x
},
{
1
},
single_reduce
(
"reduce_sum"
));
auto
rsumb
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
s
.
lens
()}}),
rsum1
);
auto
add
=
add_reduce
(
p1
,
"test:reduce_sum1"
,
{
rsumb
,
x
},
{
1
},
[
&
](
auto
*
rm
,
const
auto
&
inputs
,
const
auto
&
axes
)
{
auto
add2
=
add_pointwise
(
p1
,
rm
,
"test:pointwise0"
,
inputs
,
single_pointwise
(
"add"
));
return
rm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
axes
}}),
add2
);
});
mm
->
add_return
({
add
});
}
run_pass
(
p1
);
migraphx
::
program
p2
;
{
auto
*
mm
=
p2
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
rsum
=
add_reduce
(
p2
,
"test:reduce_sum1:test:reduce_sum0"
,
{
x
},
{
1
},
[
&
](
auto
*
rm
,
const
auto
&
inputs
,
const
auto
&
axes
)
{
auto
rsum1
=
rm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
axes
}}),
inputs
[
0
]);
auto
rsumb
=
rm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
s
.
lens
()}}),
rsum1
);
auto
add
=
add_pointwise
(
p2
,
rm
,
"test:pointwise0"
,
{
rsumb
,
inputs
[
0
]},
single_pointwise
(
"add"
));
return
rm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
axes
}}),
add
);
});
mm
->
add_return
({
rsum
});
}
EXPECT
(
p1
==
p2
);
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/gpu/hip.cpp
View file @
18cf0435
...
@@ -27,7 +27,7 @@
...
@@ -27,7 +27,7 @@
#include <migraphx/gpu/hip.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/gpu/target.hpp>
#include <migraphx/gpu/target.hpp>
TEST_CASE
(
tuple_
to_
from_gpu
)
TEST_CASE
(
tuple_from_gpu
)
{
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
int32_type
,
{
2
,
4
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
int32_type
,
{
2
,
4
}};
...
@@ -47,4 +47,23 @@ TEST_CASE(tuple_to_from_gpu)
...
@@ -47,4 +47,23 @@ TEST_CASE(tuple_to_from_gpu)
EXPECT
(
result2
==
p2_data
);
EXPECT
(
result2
==
p2_data
);
}
}
TEST_CASE
(
tuple_to_gpu
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
int32_type
,
{
2
,
4
}};
std
::
vector
<
float
>
p1_data
=
{
1.1
,
2.2
,
3.3
,
4.4
,
5.5
,
6.6
};
std
::
vector
<
int
>
p2_data
=
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
};
auto
p1
=
migraphx
::
argument
{
s1
,
p1_data
.
data
()};
auto
p2
=
migraphx
::
argument
{
s2
,
p2_data
.
data
()};
auto
p_gpu
=
migraphx
::
gpu
::
to_gpu
(
migraphx
::
argument
({
p1
,
p2
}));
auto
p_host
=
migraphx
::
gpu
::
from_gpu
(
p_gpu
);
std
::
vector
<
migraphx
::
argument
>
results
=
p_host
.
get_sub_objects
();
std
::
vector
<
float
>
result1
;
results
[
0
].
visit
([
&
](
auto
output
)
{
result1
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
int
>
result2
;
results
[
1
].
visit
([
&
](
auto
output
)
{
result2
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
result1
==
p1_data
);
EXPECT
(
result2
==
p2_data
);
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/gpu/mlir.cpp
View file @
18cf0435
...
@@ -213,4 +213,37 @@ module {
...
@@ -213,4 +213,37 @@ module {
EXPECT
(
verify_mlir
(
m
));
EXPECT
(
verify_mlir
(
m
));
}
}
TEST_CASE
(
conv_int8_dequantize_quantize
)
{
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
func.func @main(%arg0: tensor<2x8x3x3xi8>, %arg1: tensor<1x8x4x4xi8>, %arg2: tensor<1x2x2x2xf32>, %arg3: tensor<1x2x2x2xi32>) -> tensor<1x2x2x2xi32> attributes {arch = "", kernel = "mixr"} {
%0 = migraphx.quant_convolution(%arg1, %arg0) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xi8>, tensor<2x8x3x3xi8>) -> tensor<1x2x2x2xi32>
%1 = migraphx.dequantizelinear(%0, %arg2, %arg3) : (tensor<1x2x2x2xi32>, tensor<1x2x2x2xf32>, tensor<1x2x2x2xi32>) -> tensor<1x2x2x2xf32>
%2 = migraphx.quantizelinear(%1, %arg2, %arg3) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>, tensor<1x2x2x2xi32>) -> tensor<1x2x2x2xi32>
return %2 : tensor<1x2x2x2xi32>
}
}
)__migraphx__"
;
migraphx
::
module
m
;
auto
x
=
m
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
int8_type
,
{
1
,
8
,
4
,
4
}});
auto
w
=
m
.
add_parameter
(
"w"
,
{
migraphx
::
shape
::
int8_type
,
{
2
,
8
,
3
,
3
}});
auto
conv
=
m
.
add_instruction
(
migraphx
::
make_op
(
"quant_convolution"
),
x
,
w
);
migraphx
::
shape
ss
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
2
,
2
}};
migraphx
::
shape
sz
{
migraphx
::
shape
::
int32_type
,
{
1
,
2
,
2
,
2
}};
auto
input2
=
m
.
add_parameter
(
"x_scale"
,
ss
);
auto
input3
=
m
.
add_parameter
(
"x_zero_point"
,
sz
);
auto
dequant
=
m
.
add_instruction
(
migraphx
::
make_op
(
"dequantizelinear"
),
conv
,
input2
,
input3
);
auto
r
=
m
.
add_instruction
(
migraphx
::
make_op
(
"quantizelinear"
),
dequant
,
input2
,
input3
);
m
.
add_return
({
r
});
auto
s
=
migraphx
::
gpu
::
dump_mlir
(
m
);
// Skip test if MLIR is not enabled
if
(
s
.
empty
())
return
;
CHECK
(
encode
(
s
)
==
encode
(
mlir_output
));
EXPECT
(
verify_mlir
(
m
));
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/include/pointwise.hpp
View file @
18cf0435
...
@@ -30,12 +30,12 @@
...
@@ -30,12 +30,12 @@
template
<
class
F
>
template
<
class
F
>
migraphx
::
instruction_ref
add_pointwise
(
migraphx
::
program
&
p
,
migraphx
::
instruction_ref
add_pointwise
(
migraphx
::
program
&
p
,
migraphx
::
module_ref
mm
,
const
std
::
string
&
name
,
const
std
::
string
&
name
,
std
::
vector
<
migraphx
::
instruction_ref
>
inputs
,
std
::
vector
<
migraphx
::
instruction_ref
>
inputs
,
F
f
)
F
f
)
{
{
auto
*
pm
=
p
.
create_module
(
name
);
auto
*
pm
=
p
.
create_module
(
name
);
auto
*
mm
=
p
.
get_main_module
();
pm
->
set_bypass
();
pm
->
set_bypass
();
std
::
vector
<
migraphx
::
instruction_ref
>
params
;
std
::
vector
<
migraphx
::
instruction_ref
>
params
;
std
::
transform
(
inputs
.
begin
(),
inputs
.
end
(),
std
::
back_inserter
(
params
),
[
&
](
auto
input
)
{
std
::
transform
(
inputs
.
begin
(),
inputs
.
end
(),
std
::
back_inserter
(
params
),
[
&
](
auto
input
)
{
...
@@ -47,6 +47,15 @@ migraphx::instruction_ref add_pointwise(migraphx::program& p,
...
@@ -47,6 +47,15 @@ migraphx::instruction_ref add_pointwise(migraphx::program& p,
return
mm
->
add_instruction
(
migraphx
::
make_op
(
"pointwise"
),
inputs
,
{
pm
});
return
mm
->
add_instruction
(
migraphx
::
make_op
(
"pointwise"
),
inputs
,
{
pm
});
}
}
template
<
class
F
>
migraphx
::
instruction_ref
add_pointwise
(
migraphx
::
program
&
p
,
const
std
::
string
&
name
,
std
::
vector
<
migraphx
::
instruction_ref
>
inputs
,
F
f
)
{
return
add_pointwise
(
p
,
p
.
get_main_module
(),
name
,
inputs
,
f
);
}
inline
auto
single_pointwise
(
const
std
::
string
&
name
)
inline
auto
single_pointwise
(
const
std
::
string
&
name
)
{
{
return
[
=
](
auto
*
pm
,
const
auto
&
inputs
)
{
return
[
=
](
auto
*
pm
,
const
auto
&
inputs
)
{
...
...
test/onnx/.onnxrt-commit
View file @
18cf0435
ad4db1269972f92fdba932bb5770943291be3ca5
c294040bac0e34bd7ef0cb97424bace7998900e7
test/onnx/onnx_test.cpp
View file @
18cf0435
...
@@ -4959,13 +4959,13 @@ TEST_CASE(reducemax_dyn_test)
...
@@ -4959,13 +4959,13 @@ TEST_CASE(reducemax_dyn_test)
migraphx
::
program
p
;
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
auto
l0
=
mm
->
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
3
,
3
},
{
4
,
4
},
{
5
,
5
},
{
6
,
6
}}});
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
3
,
5
},
{
4
,
4
},
{
5
,
5
},
{
6
,
6
}}});
auto
r0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_max"
,
{{
"axes"
,
{
2
}}}),
l0
);
auto
r0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_max"
,
{{
"axes"
,
{
2
}}}),
l0
);
auto
r1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
2
}}}),
r0
);
auto
r1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
2
}}}),
r0
);
mm
->
add_return
({
r1
});
mm
->
add_return
({
r1
});
migraphx
::
onnx_options
options
;
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"x"
]
=
{{
3
,
3
},
{
4
,
4
},
{
5
,
5
},
{
6
,
6
}};
options
.
map_dyn_input_dims
[
"x"
]
=
{{
3
,
5
},
{
4
,
4
},
{
5
,
5
},
{
6
,
6
}};
auto
prog
=
migraphx
::
parse_onnx
(
"reducemax_dyn_test.onnx"
,
options
);
auto
prog
=
migraphx
::
parse_onnx
(
"reducemax_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
EXPECT
(
p
==
prog
);
...
@@ -6953,6 +6953,23 @@ TEST_CASE(variable_batch_user_input_test6)
...
@@ -6953,6 +6953,23 @@ TEST_CASE(variable_batch_user_input_test6)
EXPECT
(
test
::
throws
([
&
]
{
migraphx
::
parse_onnx
(
"variable_batch_test.onnx"
,
options
);
}));
EXPECT
(
test
::
throws
([
&
]
{
migraphx
::
parse_onnx
(
"variable_batch_test.onnx"
,
options
);
}));
}
}
TEST_CASE
(
variable_batch_user_input_test7
)
{
// if entry in map_dyn_input_dims is all fixed dynamic_dimensions, convert it to a static shape
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
16
,
16
}});
auto
r
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"identity"
),
l0
);
mm
->
add_return
({
r
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"0"
]
=
{{
2
,
2
,
{
2
}},
{
3
,
3
},
{
16
,
16
},
{
16
,
16
}};
auto
prog
=
migraphx
::
parse_onnx
(
"variable_batch_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
variable_batch_leq_zero_test
)
TEST_CASE
(
variable_batch_leq_zero_test
)
{
{
migraphx
::
program
p
;
migraphx
::
program
p
;
...
...
test/op_shape_test.cpp
View file @
18cf0435
...
@@ -1822,6 +1822,33 @@ TEST_CASE(pad_dyn_shape1)
...
@@ -1822,6 +1822,33 @@ TEST_CASE(pad_dyn_shape1)
expect_shape
(
output
,
migraphx
::
make_op
(
"pad"
,
{{
"pads"
,
{
0
,
0
,
1
,
1
,
0
,
0
,
1
,
1
}}}),
input
);
expect_shape
(
output
,
migraphx
::
make_op
(
"pad"
,
{{
"pads"
,
{
0
,
0
,
1
,
1
,
0
,
0
,
1
,
1
}}}),
input
);
}
}
TEST_CASE
(
pointwise_no_module
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
0
},
{
0
}};
throws_shape
(
migraphx
::
make_op
(
"pointwise"
),
input
);
}
TEST_CASE
(
pointwise_no_input
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
module
m
;
std
::
vector
<
migraphx
::
instruction_ref
>
args
{};
auto
output
=
migraphx
::
shape
(
migraphx
::
shape
::
float_type
,
{
1
},
{
0
});
auto
l
=
m
.
add_literal
(
migraphx
::
literal
(
output
,
{
1
}));
m
.
add_return
({
l
});
EXPECT
(
test
::
throws
([
&
]
{
mm
->
add_instruction
(
migraphx
::
make_op
(
"pointwise"
),
args
,
{
&
m
});
}));
}
TEST_CASE
(
pointwise_no_output
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
module
m
;
std
::
vector
<
migraphx
::
instruction_ref
>
args
{};
EXPECT
(
test
::
throws
([
&
]
{
mm
->
add_instruction
(
migraphx
::
make_op
(
"pointwise"
),
args
,
{
&
m
});
}));
}
TEST_CASE
(
pooling_shape0
)
TEST_CASE
(
pooling_shape0
)
{
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}};
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}};
...
@@ -2014,6 +2041,62 @@ TEST_CASE(quant_dot_2args)
...
@@ -2014,6 +2041,62 @@ TEST_CASE(quant_dot_2args)
}
}
}
}
TEST_CASE
(
qlinear
)
{
migraphx
::
shape
scales
{
migraphx
::
shape
::
float_type
,
{
2
,
4
}};
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
2
,
4
}};
migraphx
::
shape
result
{
migraphx
::
shape
::
uint8_type
,
{
2
,
4
}};
expect_shape
(
result
,
migraphx
::
make_op
(
"quantizelinear"
),
input
,
scales
);
}
TEST_CASE
(
qlinear_zeros
)
{
migraphx
::
shape
zeros
{
migraphx
::
shape
::
int8_type
,
{
2
,
4
}};
migraphx
::
shape
scales
{
migraphx
::
shape
::
float_type
,
{
2
,
4
}};
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
2
,
4
}};
migraphx
::
shape
result
{
migraphx
::
shape
::
int8_type
,
{
2
,
4
}};
expect_shape
(
result
,
migraphx
::
make_op
(
"quantizelinear"
),
input
,
scales
,
zeros
);
}
TEST_CASE
(
qlinear_fp16
)
{
migraphx
::
shape
scales
{
migraphx
::
shape
::
half_type
,
{
2
,
4
}};
migraphx
::
shape
input
{
migraphx
::
shape
::
half_type
,
{
2
,
4
}};
migraphx
::
shape
result
{
migraphx
::
shape
::
uint8_type
,
{
2
,
4
}};
expect_shape
(
result
,
migraphx
::
make_op
(
"quantizelinear"
),
input
,
scales
);
}
TEST_CASE
(
qlinear_mismatch_type
)
{
migraphx
::
shape
scales
{
migraphx
::
shape
::
int8_type
,
{
2
,
4
}};
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
2
,
4
}};
throws_shape
(
migraphx
::
make_op
(
"quantizelinear"
),
input
,
scales
);
}
TEST_CASE
(
dqlinear
)
{
migraphx
::
shape
scales
{
migraphx
::
shape
::
float_type
,
{
2
,
4
}};
migraphx
::
shape
input
{
migraphx
::
shape
::
int8_type
,
{
2
,
4
}};
migraphx
::
shape
result
{
migraphx
::
shape
::
float_type
,
{
2
,
4
}};
expect_shape
(
result
,
migraphx
::
make_op
(
"dequantizelinear"
),
input
,
scales
);
}
TEST_CASE
(
dqlinear_fp16
)
{
migraphx
::
shape
scales
{
migraphx
::
shape
::
half_type
,
{
2
,
4
}};
migraphx
::
shape
input
{
migraphx
::
shape
::
int8_type
,
{
2
,
4
}};
migraphx
::
shape
result
{
migraphx
::
shape
::
half_type
,
{
2
,
4
}};
expect_shape
(
result
,
migraphx
::
make_op
(
"dequantizelinear"
),
input
,
scales
);
}
TEST_CASE
(
dqlinear_mismatch_type
)
{
migraphx
::
shape
zeros
{
migraphx
::
shape
::
float_type
,
{
2
,
4
}};
migraphx
::
shape
scales
{
migraphx
::
shape
::
float_type
,
{
2
,
4
}};
migraphx
::
shape
input
{
migraphx
::
shape
::
int8_type
,
{
2
,
4
}};
throws_shape
(
migraphx
::
make_op
(
"dequantizelinear"
),
input
,
scales
,
zeros
);
}
template
<
class
T
>
template
<
class
T
>
void
test_reduce_ops
()
void
test_reduce_ops
()
{
{
...
...
test/py/test_shape.py
View file @
18cf0435
...
@@ -49,6 +49,16 @@ def test_create_shape_type():
...
@@ -49,6 +49,16 @@ def test_create_shape_type():
assert
s
.
type_size
()
==
4
assert
s
.
type_size
()
==
4
def
test_type_enum
():
mgx_types
=
[
'bool_type'
,
'double_type'
,
'float_type'
,
'half_type'
,
'int16_type'
,
'int32_type'
,
'int64_type'
,
'int8_type'
,
'uint16_type'
,
'uint32_type'
,
'uint64_type'
,
'uint8_type'
]
for
t
in
mgx_types
:
assert
hasattr
(
migraphx
.
shape
.
type_t
,
t
)
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
test_create_shape
()
test_create_shape
()
test_create_shape_broadcast
()
test_create_shape_broadcast
()
...
...
test/rewrite_quantization_test.cpp
View file @
18cf0435
...
@@ -33,12 +33,20 @@
...
@@ -33,12 +33,20 @@
#include <migraphx/make_op.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/serialize.hpp>
#include <migraphx/serialize.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/verify.hpp>
bool
is_quantizelinear
(
migraphx
::
instruction
&
ins
)
{
return
ins
.
name
()
==
"quantizelinear"
;
}
bool
is_quantizelinear
(
migraphx
::
instruction
&
ins
)
{
return
ins
.
name
()
==
"quantizelinear"
;
}
bool
is_dequantizelinear
(
migraphx
::
instruction
&
ins
)
{
return
ins
.
name
()
==
"dequantizelinear"
;
}
bool
is_dequantizelinear
(
migraphx
::
instruction
&
ins
)
{
return
ins
.
name
()
==
"dequantizelinear"
;
}
void
run_pass
(
migraphx
::
module
&
m
)
{
migraphx
::
run_passes
(
m
,
{
migraphx
::
rewrite_quantization
{}});
}
migraphx
::
argument
eval
(
const
migraphx
::
program
&
p
)
{
auto
r
=
p
.
eval
({});
EXPECT
(
r
.
size
()
==
1
);
return
r
.
front
();
}
TEST_CASE
(
quantizelinear
)
TEST_CASE
(
quantizelinear
)
{
{
...
@@ -58,8 +66,8 @@ TEST_CASE(quantizelinear)
...
@@ -58,8 +66,8 @@ TEST_CASE(quantizelinear)
migraphx
::
program
p1
=
create_program
();
migraphx
::
program
p1
=
create_program
();
migraphx
::
program
p2
=
create_program
();
migraphx
::
program
p2
=
create_program
();
migraphx
::
rewrite_quantization
opt
;
run_pass
(
*
p2
.
get_main_module
())
;
opt
.
apply
(
*
p2
.
get_main_module
(
));
EXPECT
(
eval
(
p1
)
==
eval
(
p2
));
EXPECT
(
any_of
(
*
p1
.
get_main_module
(),
&
is_quantizelinear
));
EXPECT
(
any_of
(
*
p1
.
get_main_module
(),
&
is_quantizelinear
));
EXPECT
(
none_of
(
*
p2
.
get_main_module
(),
&
is_quantizelinear
));
EXPECT
(
none_of
(
*
p2
.
get_main_module
(),
&
is_quantizelinear
));
}
}
...
@@ -71,8 +79,8 @@ TEST_CASE(dequantizelinear)
...
@@ -71,8 +79,8 @@ TEST_CASE(dequantizelinear)
std
::
vector
<
float
>
xv
=
{
0
,
1
,
2
,
5
,
10
,
50
,
100
,
150
,
250
};
std
::
vector
<
float
>
xv
=
{
0
,
1
,
2
,
5
,
10
,
50
,
100
,
150
,
250
};
migraphx
::
shape
ss
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
3
}};
migraphx
::
shape
ss
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
3
}};
std
::
vector
<
float
>
sv
=
{
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
};
std
::
vector
<
float
>
sv
=
{
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
};
migraphx
::
shape
zs
{
migraphx
::
shape
::
uint8
_type
,
{
1
,
3
,
3
}};
migraphx
::
shape
zs
{
migraphx
::
shape
::
float
_type
,
{
1
,
3
,
3
}};
std
::
vector
<
uint8_
t
>
zv
=
{
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
};
std
::
vector
<
floa
t
>
zv
=
{
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
};
auto
create_program
=
[
&
]()
{
auto
create_program
=
[
&
]()
{
migraphx
::
program
p
;
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
*
mm
=
p
.
get_main_module
();
...
@@ -86,8 +94,8 @@ TEST_CASE(dequantizelinear)
...
@@ -86,8 +94,8 @@ TEST_CASE(dequantizelinear)
migraphx
::
program
p1
=
create_program
();
migraphx
::
program
p1
=
create_program
();
migraphx
::
program
p2
=
create_program
();
migraphx
::
program
p2
=
create_program
();
migraphx
::
rewrite_quantization
opt
;
run_pass
(
*
p2
.
get_main_module
())
;
opt
.
apply
(
*
p2
.
get_main_module
(
));
EXPECT
(
eval
(
p1
)
==
eval
(
p2
));
EXPECT
(
any_of
(
*
p1
.
get_main_module
(),
&
is_dequantizelinear
));
EXPECT
(
any_of
(
*
p1
.
get_main_module
(),
&
is_dequantizelinear
));
EXPECT
(
none_of
(
*
p2
.
get_main_module
(),
&
is_dequantizelinear
));
EXPECT
(
none_of
(
*
p2
.
get_main_module
(),
&
is_dequantizelinear
));
}
}
...
...
test/shape_test.cpp
View file @
18cf0435
...
@@ -336,6 +336,49 @@ TEST_CASE(test_shape_dyn_to_dynamic)
...
@@ -336,6 +336,49 @@ TEST_CASE(test_shape_dyn_to_dynamic)
EXPECT
(
s0
==
s1
);
EXPECT
(
s0
==
s1
);
}
}
TEST_CASE
(
test_shape_subshapes_to_dynamic
)
{
std
::
vector
<
migraphx
::
shape
>
sub_shapes0
=
{};
sub_shapes0
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
4
,
4
}}});
sub_shapes0
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
5
}});
migraphx
::
shape
s0
{
sub_shapes0
};
migraphx
::
shape
s1
=
s0
.
to_dynamic
();
std
::
vector
<
migraphx
::
shape
>
sub_shapes1
=
{};
sub_shapes1
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
4
,
4
}}});
sub_shapes1
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
3
,
3
},
{
4
,
4
},
{
5
,
5
}}});
migraphx
::
shape
s2
{
sub_shapes1
};
EXPECT
(
s1
==
s2
);
}
TEST_CASE
(
test_shape_dyn_to_static
)
{
migraphx
::
shape
s0
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
},
{
2
,
2
},
{
2
,
10
},
{
2
,
10
}}};
migraphx
::
shape
s1
=
s0
.
to_static
(
4
);
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
4
,
4
}};
EXPECT
(
s1
==
s2
);
}
TEST_CASE
(
test_shape_static_to_static
)
{
migraphx
::
shape
s0
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
4
,
4
}};
migraphx
::
shape
s1
=
s0
.
to_static
(
8
);
EXPECT
(
s0
==
s1
);
}
TEST_CASE
(
test_shape_subshapes_to_static
)
{
std
::
vector
<
migraphx
::
shape
>
sub_shapes0
=
{};
sub_shapes0
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
4
,
4
}}});
sub_shapes0
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
5
}});
migraphx
::
shape
s0
{
sub_shapes0
};
migraphx
::
shape
s1
=
s0
.
to_static
(
3
);
std
::
vector
<
migraphx
::
shape
>
sub_shapes1
=
{};
sub_shapes1
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
3
,
4
}});
sub_shapes1
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
5
}});
migraphx
::
shape
s2
{
sub_shapes1
};
EXPECT
(
s1
==
s2
);
}
TEST_CASE
(
test_shape_overlap
)
TEST_CASE
(
test_shape_overlap
)
{
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
2
,
3
},
{
6
,
3
,
2
}};
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
2
,
3
},
{
6
,
3
,
2
}};
...
...
test/simplify_algebra_test.cpp
View file @
18cf0435
...
@@ -509,6 +509,34 @@ TEST_CASE(simplify_dot_add)
...
@@ -509,6 +509,34 @@ TEST_CASE(simplify_dot_add)
EXPECT
(
m1
==
m2
);
EXPECT
(
m1
==
m2
);
}
}
TEST_CASE
(
simplify_conv_add
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
32
,
32
}};
migraphx
::
shape
ws
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}};
migraphx
::
module
m1
;
{
auto
x
=
m1
.
add_parameter
(
"x"
,
s
);
auto
c
=
m1
.
add_literal
(
migraphx
::
generate_literal
(
s
,
1
));
auto
w
=
m1
.
add_literal
(
migraphx
::
generate_literal
(
ws
,
2
));
auto
sum
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"add"
),
c
,
x
);
auto
conv
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"convolution"
),
sum
,
w
);
m1
.
add_instruction
(
pass_op
{},
conv
);
}
run_pass
(
m1
);
migraphx
::
module
m2
;
{
auto
x
=
m2
.
add_parameter
(
"x"
,
s
);
auto
c
=
m2
.
add_literal
(
migraphx
::
generate_literal
(
s
,
1
));
auto
w
=
m2
.
add_literal
(
migraphx
::
generate_literal
(
ws
,
2
));
auto
conv1
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"convolution"
),
c
,
w
);
auto
conv2
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"convolution"
),
x
,
w
);
auto
sum
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"add"
),
conv1
,
conv2
);
m2
.
add_instruction
(
pass_op
{},
sum
);
}
EXPECT
(
m1
==
m2
);
}
TEST_CASE
(
simplify_inner_broadcast1
)
TEST_CASE
(
simplify_inner_broadcast1
)
{
{
auto
b
=
migraphx
::
op
::
broadcast
{
1
,
{
2
,
1
,
4
,
5
}};
auto
b
=
migraphx
::
op
::
broadcast
{
1
,
{
2
,
1
,
4
,
5
}};
...
...
test/simplify_qdq_test.cpp
View file @
18cf0435
...
@@ -402,9 +402,10 @@ TEST_CASE(conv_bias_add)
...
@@ -402,9 +402,10 @@ TEST_CASE(conv_bias_add)
auto
bias
=
m1
.
add_parameter
(
"bias"
,
s6
);
auto
bias
=
m1
.
add_parameter
(
"bias"
,
s6
);
auto
scale
=
m1
.
add_literal
(
0.5
f
);
auto
scale
=
m1
.
add_literal
(
0.5
f
);
auto
zero
=
m1
.
add_literal
(
std
::
int8_t
{
0
});
auto
zero
=
m1
.
add_literal
(
std
::
int8_t
{
0
});
auto
zero32
=
m1
.
add_literal
(
std
::
int32_t
{
0
});
auto
d1
=
add_quantize_op
(
m1
,
"dequantizelinear"
,
weights
,
scale
,
zero
);
auto
d1
=
add_quantize_op
(
m1
,
"dequantizelinear"
,
weights
,
scale
,
zero
);
auto
d2
=
add_quantize_op
(
m1
,
"dequantizelinear"
,
bias
,
scale
,
zero
);
auto
d2
=
add_quantize_op
(
m1
,
"dequantizelinear"
,
bias
,
scale
,
zero
32
);
auto
q1
=
add_quantize_op
(
m1
,
"quantizelinear"
,
input
,
scale
,
zero
);
auto
q1
=
add_quantize_op
(
m1
,
"quantizelinear"
,
input
,
scale
,
zero
);
auto
d5
=
add_quantize_op
(
m1
,
"dequantizelinear"
,
q1
,
scale
,
zero
);
auto
d5
=
add_quantize_op
(
m1
,
"dequantizelinear"
,
q1
,
scale
,
zero
);
auto
c1
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"convolution"
,
auto
c1
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"convolution"
,
...
@@ -428,9 +429,10 @@ TEST_CASE(conv_bias_add)
...
@@ -428,9 +429,10 @@ TEST_CASE(conv_bias_add)
auto
bias
=
m2
.
add_parameter
(
"bias"
,
s6
);
auto
bias
=
m2
.
add_parameter
(
"bias"
,
s6
);
auto
scale
=
m2
.
add_literal
(
0.5
f
);
auto
scale
=
m2
.
add_literal
(
0.5
f
);
auto
zero
=
m2
.
add_literal
(
std
::
int8_t
{
0
});
auto
zero
=
m2
.
add_literal
(
std
::
int8_t
{
0
});
auto
zero32
=
m2
.
add_literal
(
std
::
int32_t
{
0
});
auto
scale1
=
m2
.
add_literal
(
0.25
f
);
auto
scale1
=
m2
.
add_literal
(
0.25
f
);
auto
d2
=
add_quantize_op
(
m2
,
"dequantizelinear"
,
bias
,
scale
,
zero
);
auto
d2
=
add_quantize_op
(
m2
,
"dequantizelinear"
,
bias
,
scale
,
zero
32
);
auto
q1
=
add_quantize_op
(
m2
,
"quantizelinear"
,
input
,
scale
,
zero
);
auto
q1
=
add_quantize_op
(
m2
,
"quantizelinear"
,
input
,
scale
,
zero
);
auto
c1
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"quant_convolution"
,
auto
c1
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"quant_convolution"
,
{{
"padding"
,
{
0
,
0
,
0
,
0
}},
{{
"padding"
,
{
0
,
0
,
0
,
0
}},
...
@@ -468,9 +470,10 @@ TEST_CASE(conv_pooling_dot)
...
@@ -468,9 +470,10 @@ TEST_CASE(conv_pooling_dot)
auto
input
=
m1
.
add_parameter
(
"input"
,
s7
);
auto
input
=
m1
.
add_parameter
(
"input"
,
s7
);
auto
scale
=
m1
.
add_literal
(
0.5
f
);
auto
scale
=
m1
.
add_literal
(
0.5
f
);
auto
zero
=
m1
.
add_literal
(
std
::
int8_t
{
0
});
auto
zero
=
m1
.
add_literal
(
std
::
int8_t
{
0
});
auto
zero32
=
m1
.
add_literal
(
std
::
int32_t
{
0
});
auto
d1
=
add_quantize_op
(
m1
,
"dequantizelinear"
,
weights
,
scale
,
zero
);
auto
d1
=
add_quantize_op
(
m1
,
"dequantizelinear"
,
weights
,
scale
,
zero
);
auto
d2
=
add_quantize_op
(
m1
,
"dequantizelinear"
,
bias
,
scale
,
zero
);
auto
d2
=
add_quantize_op
(
m1
,
"dequantizelinear"
,
bias
,
scale
,
zero
32
);
auto
d3
=
add_quantize_op
(
m1
,
"dequantizelinear"
,
ab
,
scale
,
zero
);
auto
d3
=
add_quantize_op
(
m1
,
"dequantizelinear"
,
ab
,
scale
,
zero
);
auto
d4
=
add_quantize_op
(
m1
,
"dequantizelinear"
,
db
,
scale
,
zero
);
auto
d4
=
add_quantize_op
(
m1
,
"dequantizelinear"
,
db
,
scale
,
zero
);
auto
q1
=
add_quantize_op
(
m1
,
"quantizelinear"
,
input
,
scale
,
zero
);
auto
q1
=
add_quantize_op
(
m1
,
"quantizelinear"
,
input
,
scale
,
zero
);
...
@@ -515,10 +518,11 @@ TEST_CASE(conv_pooling_dot)
...
@@ -515,10 +518,11 @@ TEST_CASE(conv_pooling_dot)
auto
input
=
m2
.
add_parameter
(
"input"
,
s7
);
auto
input
=
m2
.
add_parameter
(
"input"
,
s7
);
auto
scale
=
m2
.
add_literal
(
0.5
f
);
auto
scale
=
m2
.
add_literal
(
0.5
f
);
auto
zero
=
m2
.
add_literal
(
std
::
int8_t
{
0
});
auto
zero
=
m2
.
add_literal
(
std
::
int8_t
{
0
});
auto
zero32
=
m2
.
add_literal
(
std
::
int32_t
{
0
});
auto
scale1
=
m2
.
add_literal
(
0.25
f
);
auto
scale1
=
m2
.
add_literal
(
0.25
f
);
auto
scale2
=
m2
.
add_literal
(
0.25
f
);
auto
scale2
=
m2
.
add_literal
(
0.25
f
);
auto
d2
=
add_quantize_op
(
m2
,
"dequantizelinear"
,
bias
,
scale
,
zero
);
auto
d2
=
add_quantize_op
(
m2
,
"dequantizelinear"
,
bias
,
scale
,
zero
32
);
auto
d3
=
add_quantize_op
(
m2
,
"dequantizelinear"
,
ab
,
scale
,
zero
);
auto
d3
=
add_quantize_op
(
m2
,
"dequantizelinear"
,
ab
,
scale
,
zero
);
auto
q1
=
add_quantize_op
(
m2
,
"quantizelinear"
,
input
,
scale
,
zero
);
auto
q1
=
add_quantize_op
(
m2
,
"quantizelinear"
,
input
,
scale
,
zero
);
auto
c1
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"quant_convolution"
,
auto
c1
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"quant_convolution"
,
...
@@ -572,9 +576,10 @@ TEST_CASE(mobilenet_snippet)
...
@@ -572,9 +576,10 @@ TEST_CASE(mobilenet_snippet)
auto
input
=
mm
.
add_parameter
(
"input"
,
s7
);
auto
input
=
mm
.
add_parameter
(
"input"
,
s7
);
auto
scale
=
mm
.
add_literal
(
0.5
f
);
auto
scale
=
mm
.
add_literal
(
0.5
f
);
auto
zero
=
mm
.
add_literal
(
std
::
int8_t
{
0
});
auto
zero
=
mm
.
add_literal
(
std
::
int8_t
{
0
});
auto
zero32
=
mm
.
add_literal
(
std
::
int32_t
{
0
});
auto
d1
=
add_quantize_op
(
mm
,
"dequantizelinear"
,
weights
,
scale
,
zero
);
auto
d1
=
add_quantize_op
(
mm
,
"dequantizelinear"
,
weights
,
scale
,
zero
);
auto
d2
=
add_quantize_op
(
mm
,
"dequantizelinear"
,
bias
,
scale
,
zero
);
auto
d2
=
add_quantize_op
(
mm
,
"dequantizelinear"
,
bias
,
scale
,
zero
32
);
auto
d3
=
add_quantize_op
(
mm
,
"dequantizelinear"
,
ab
,
scale
,
zero
);
auto
d3
=
add_quantize_op
(
mm
,
"dequantizelinear"
,
ab
,
scale
,
zero
);
auto
d4
=
add_quantize_op
(
mm
,
"dequantizelinear"
,
db
,
scale
,
zero
);
auto
d4
=
add_quantize_op
(
mm
,
"dequantizelinear"
,
db
,
scale
,
zero
);
auto
q1
=
add_quantize_op
(
mm
,
"quantizelinear"
,
input
,
scale
,
zero
);
auto
q1
=
add_quantize_op
(
mm
,
"quantizelinear"
,
input
,
scale
,
zero
);
...
...
test/split_single_dyn_dim_test.cpp
View file @
18cf0435
...
@@ -50,8 +50,8 @@ TEST_CASE(dynamic_batch)
...
@@ -50,8 +50,8 @@ TEST_CASE(dynamic_batch)
auto
sm_input
=
submod
->
add_parameter
(
"data"
,
sm_shape
);
auto
sm_input
=
submod
->
add_parameter
(
"data"
,
sm_shape
);
migraphx
::
shape
lit_s
{
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
}}};
migraphx
::
shape
lit_s
{
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
}}};
auto
literal_ins
=
submod
->
add_literal
(
migraphx
::
literal
{
lit_s
,
{
6
}});
auto
literal_ins
=
submod
->
add_literal
(
migraphx
::
literal
{
lit_s
,
{
6
}});
auto
broadcast_lit
=
auto
broadcast_lit
=
submod
->
add_instruction
(
submod
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
),
literal_ins
,
sm_input
);
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
sm_shape
.
lens
()}}
),
literal_ins
);
auto
add_ins
=
auto
add_ins
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
sm_input
,
broadcast_lit
);
submod
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
sm_input
,
broadcast_lit
);
submod
->
add_return
({
add_ins
});
submod
->
add_return
({
add_ins
});
...
@@ -107,8 +107,8 @@ TEST_CASE(multiple_outputs)
...
@@ -107,8 +107,8 @@ TEST_CASE(multiple_outputs)
auto
sm_input
=
submod
->
add_parameter
(
"data"
,
sm_shape
);
auto
sm_input
=
submod
->
add_parameter
(
"data"
,
sm_shape
);
migraphx
::
shape
lit_s
{
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
}}};
migraphx
::
shape
lit_s
{
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
}}};
auto
literal_ins
=
submod
->
add_literal
(
migraphx
::
literal
{
lit_s
,
{
6
}});
auto
literal_ins
=
submod
->
add_literal
(
migraphx
::
literal
{
lit_s
,
{
6
}});
auto
broadcast_lit
=
auto
broadcast_lit
=
submod
->
add_instruction
(
submod
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
),
literal_ins
,
sm_input
);
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
sm_shape
.
lens
()}}
),
literal_ins
);
auto
add0_ins
=
auto
add0_ins
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
sm_input
,
broadcast_lit
);
submod
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
sm_input
,
broadcast_lit
);
auto
add1_ins
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
sm_input
,
sm_input
);
auto
add1_ins
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
sm_input
,
sm_input
);
...
@@ -157,4 +157,64 @@ TEST_CASE(multiple_outputs)
...
@@ -157,4 +157,64 @@ TEST_CASE(multiple_outputs)
EXPECT
(
p0
==
p1
);
EXPECT
(
p0
==
p1
);
}
}
TEST_CASE
(
broadcast_match
)
{
// Slightly different from ref_ops_test in that the literal is copied over the submodules.
// A different compiler pass will pull the literals from the submodules to the main module.
migraphx
::
program
p0
;
{
auto
*
mm0
=
p0
.
get_main_module
();
// create batch submodules
auto
create_submodule
=
[
&
](
std
::
size_t
batch_size
,
const
std
::
string
&
module_name
)
{
auto
*
submod
=
p0
.
create_module
(
module_name
);
migraphx
::
shape
sm_shape
{
migraphx
::
shape
::
float_type
,
{
batch_size
,
4
}};
auto
sm_input
=
submod
->
add_parameter
(
"data"
,
sm_shape
);
migraphx
::
shape
lit_s
{
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
}}};
auto
literal_ins
=
submod
->
add_literal
(
migraphx
::
literal
{
lit_s
,
{
6
,
5
,
4
,
3
}});
auto
broadcast_lit
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
1
},
{
"out_lens"
,
sm_shape
.
lens
()}}),
literal_ins
);
auto
add_ins
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
sm_input
,
broadcast_lit
);
submod
->
add_return
({
add_ins
});
return
submod
;
};
auto
*
dim1
=
create_submodule
(
1
,
"dim_1"
);
auto
*
dim2
=
create_submodule
(
2
,
"dim_2"
);
auto
*
dim3
=
create_submodule
(
3
,
"dim_3"
);
auto
*
dim4
=
create_submodule
(
4
,
"dim_4"
);
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
4
,
4
}}};
auto
input0
=
mm0
->
add_parameter
(
"data"
,
s
);
std
::
vector
<
migraphx
::
shape
>
sub_shapes
=
{};
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
4
,
4
}}});
migraphx
::
shape
out_attr
=
migraphx
::
shape
{
sub_shapes
};
auto
sm_ins
=
mm0
->
add_instruction
(
migraphx
::
make_op
(
"select_module"
,
{{
"output_dyn_shapes"
,
migraphx
::
to_value
(
out_attr
)}}),
{
input0
},
{
dim1
,
dim2
,
dim3
,
dim4
});
auto
ret
=
mm0
->
add_instruction
(
migraphx
::
make_op
(
"get_tuple_elem"
,
{{
"index"
,
0
}}),
sm_ins
);
mm0
->
add_return
({
ret
});
}
migraphx
::
program
p1
;
{
auto
*
mm1
=
p1
.
get_main_module
();
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
4
,
4
}}};
auto
input1
=
mm1
->
add_parameter
(
"data"
,
s
);
migraphx
::
shape
lit_s
{
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
}}};
auto
literal_ins
=
mm1
->
add_literal
(
migraphx
::
literal
{
lit_s
,
{
6
,
5
,
4
,
3
}});
auto
broadcast_lit
=
mm1
->
add_instruction
(
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
1
}}),
literal_ins
,
input1
);
auto
add_ins
=
mm1
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
input1
,
broadcast_lit
);
mm1
->
add_return
({
add_ins
});
}
run_pass
(
p1
);
EXPECT
(
p0
==
p1
);
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/targets.cpp
View file @
18cf0435
...
@@ -41,10 +41,12 @@ TEST_CASE(make_invalid_target)
...
@@ -41,10 +41,12 @@ TEST_CASE(make_invalid_target)
TEST_CASE
(
targets
)
TEST_CASE
(
targets
)
{
{
// GCC doesn't load libmigraphx_ref unless necesssary even though it is linked to the test.
// Force it to load by making ref target
#if defined(__GNUC__) && !defined(__clang__)
auto
ref_target
=
migraphx
::
make_target
(
"ref"
);
#endif
auto
ts
=
migraphx
::
get_targets
();
auto
ts
=
migraphx
::
get_targets
();
EXPECT
(
ts
.
size
()
==
0
);
auto
ref_t
=
migraphx
::
make_target
(
"ref"
);
ts
=
migraphx
::
get_targets
();
EXPECT
(
ts
.
size
()
==
1
);
EXPECT
(
ts
.
size
()
==
1
);
}
}
...
...
test/verify/test_add_conv_constant.cpp
0 → 100644
View file @
18cf0435
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct
test_add_conv_constant
:
verify_program
<
test_add_conv_constant
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
32
,
32
}};
migraphx
::
shape
ws
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}};
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
c
=
mm
->
add_literal
(
migraphx
::
generate_literal
(
s
,
1
));
auto
w
=
mm
->
add_literal
(
migraphx
::
generate_literal
(
ws
,
2
));
auto
sum
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
c
,
x
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"convolution"
),
sum
,
w
);
return
p
;
}
};
test/verify/test_quantizelinear_int32.cpp
View file @
18cf0435
...
@@ -40,7 +40,10 @@ struct test_quantizelinear_int32 : verify_program<test_quantizelinear_int32>
...
@@ -40,7 +40,10 @@ struct test_quantizelinear_int32 : verify_program<test_quantizelinear_int32>
auto
input1
=
mm
->
add_parameter
(
"x"
,
sx
);
auto
input1
=
mm
->
add_parameter
(
"x"
,
sx
);
auto
input2
=
mm
->
add_parameter
(
"y_scale"
,
ss
);
auto
input2
=
mm
->
add_parameter
(
"y_scale"
,
ss
);
auto
input3
=
mm
->
add_parameter
(
"y_zero_point"
,
sz
);
auto
input3
=
mm
->
add_parameter
(
"y_zero_point"
,
sz
);
auto
r
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"quantizelinear"
),
input1
,
input2
,
input3
);
auto
input1_float
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"convert"
,
{{
"target_type"
,
migraphx
::
shape
::
float_type
}}),
input1
);
auto
r
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"quantizelinear"
),
input1_float
,
input2
,
input3
);
mm
->
add_return
({
r
});
mm
->
add_return
({
r
});
return
p
;
return
p
;
};
};
...
...
Prev
1
2
3
4
5
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment