Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
7fafbb66
Unverified
Commit
7fafbb66
authored
Sep 25, 2023
by
Chris Austen
Committed by
GitHub
Sep 25, 2023
Browse files
Merge branch 'develop' into jenkins_stash_migx_all_targets
parents
c157b338
434a06cf
Changes
13
Hide whitespace changes
Inline
Side-by-side
Showing
13 changed files
with
672 additions
and
138 deletions
+672
-138
.github/workflows/ci.yaml
.github/workflows/ci.yaml
+45
-37
.readthedocs.yaml
.readthedocs.yaml
+5
-1
src/CMakeLists.txt
src/CMakeLists.txt
+1
-0
src/include/migraphx/simplify_dyn_ops.hpp
src/include/migraphx/simplify_dyn_ops.hpp
+49
-0
src/simplify_dyn_ops.cpp
src/simplify_dyn_ops.cpp
+141
-0
src/split_single_dyn_dim.cpp
src/split_single_dyn_dim.cpp
+0
-32
src/targets/gpu/target.cpp
src/targets/gpu/target.cpp
+3
-0
test/onnx/.onnxrt-commit
test/onnx/.onnxrt-commit
+1
-1
test/py/onnx_backend_test.py
test/py/onnx_backend_test.py
+176
-1
test/simplify_dyn_ops_test.cpp
test/simplify_dyn_ops_test.cpp
+240
-0
test/split_single_dyn_dim_test.cpp
test/split_single_dyn_dim_test.cpp
+4
-64
tools/download_models.sh
tools/download_models.sh
+5
-0
tools/install_prereqs.sh
tools/install_prereqs.sh
+2
-2
No files found.
.github/workflows/ci.yaml
View file @
7fafbb66
...
...
@@ -11,6 +11,9 @@ on:
env
:
DOCKER_USER
:
${{secrets.DOCKERHUB_USERID}}
DOCKER_TOKEN
:
${{secrets.DOCKERHUB_TOKEN}}
DOCKER_IMAGE_UBUNTU
:
"
rocm/migraphx-ci-ubuntu"
DOCKER_IMAGE_SLES
:
"
rocm/migraphx-ci-sles"
jobs
:
cancel
:
...
...
@@ -37,18 +40,16 @@ jobs:
-
name
:
Create Image Tag
id
:
image_hash
run
:
|
echo "imagetag=
rocm/migraphx-private:
hip-clang-${{hashFiles('**/hip-clang.docker', '**/*requirements.txt', '**/install_prereqs.sh', '**/rbuild.ini')}}" >> $GITHUB_OUTPUT
echo "imagetag_sles=
rocm/migraphx-sles-private:
hip-clang-${{hashFiles('**/tools/docker/sles.docker', '**/*requirements.txt', '**/install_prereqs.sh', '**/rbuild.ini')}}" >> $GITHUB_OUTPUT
echo "imagetag=hip-clang-${{hashFiles('**/hip-clang.docker', '**/*requirements.txt', '**/install_prereqs.sh', '**/rbuild.ini')}}" >> $GITHUB_OUTPUT
echo "imagetag_sles=hip-clang-${{hashFiles('**/tools/docker/sles.docker', '**/*requirements.txt', '**/install_prereqs.sh', '**/rbuild.ini')}}" >> $GITHUB_OUTPUT
-
name
:
Check if image is built already
id
:
check_image
env
:
DOCKER
IMAGE
:
${{ steps.image_hash.outputs.imagetag }}
DOCKER
IM
AG
E
_SLES
:
${{ steps.image_hash.outputs.imagetag_sles }}
DOCKER
_TAG_UBUNTU
:
${{ steps.image_hash.outputs.imagetag }}
DOCKER
_T
AG_SLES
:
${{ steps.image_hash.outputs.imagetag_sles }}
run
:
|
echo $DOCKER_TOKEN | docker login -u $DOCKER_USER --password-stdin
if [[ "$(docker manifest inspect $DOCKERIMAGE 2> /dev/null)" != "" ]]; then
if [[ "$(docker manifest inspect $DOCKER_IMAGE_UBUNTU:$DOCKER_TAG_UBUNTU 2> /dev/null)" != "" ]]; then
echo "imageexists=true" >> $GITHUB_OUTPUT
echo "Image already exists, skip building available"
else
...
...
@@ -56,7 +57,7 @@ jobs:
echo "Tag does not exist, build and publishing required"
fi
if [[ "$(docker manifest inspect $DOCKERIMAGE_SLES 2> /dev/null)" != "" ]]; then
if [[ "$(docker manifest inspect $DOCKER
_
IMAGE_SLES
:$DOCKER_TAG_SLES
2> /dev/null)" != "" ]]; then
echo "imageexists_sles=true" >> $GITHUB_OUTPUT
echo "SLES Image already exists, skip building available"
else
...
...
@@ -75,11 +76,17 @@ jobs:
-
name
:
Build and publish
env
:
DOCKER
IMAGE
:
${{ needs.check_image.outputs.imagetag }}
DOCKER
_TAG_UBUNTU
:
${{ needs.check_image.outputs.imagetag }}
run
:
|
# The TOKEN and USERID are github secrets, Action failures at this step
# can come from a PR from a fork changing a file which forces a rebuild
# Resolve by making an internal PR of the Forked PR
echo $DOCKER_TOKEN | docker login -u $DOCKER_USER --password-stdin
docker build . --file hip-clang.docker --tag $DOCKERIMAGE;
docker push $DOCKERIMAGE;
docker pull $DOCKER_IMAGE_UBUNTU:latest || true
docker build . --file hip-clang.docker --cache-from $DOCKER_IMAGE_UBUNTU:latest --tag $DOCKER_IMAGE_UBUNTU:$DOCKER_TAG_UBUNTU --tag $DOCKER_IMAGE_UBUNTU:latest;
docker push $DOCKER_IMAGE_UBUNTU:$DOCKER_TAG_UBUNTU;
docker push $DOCKER_IMAGE_UBUNTU:latest;
build_SLES_image
:
name
:
Build SLES image
...
...
@@ -90,18 +97,24 @@ jobs:
-
uses
:
actions/checkout@v3
-
name
:
Build and publish SLES
env
:
DOCKER
IM
AG
E
_SLES
:
${{ needs.check_image.outputs.imagetag_sles }}
DOCKER
_T
AG_SLES
:
${{ needs.check_image.outputs.imagetag_sles }}
run
:
|
# The TOKEN and USERID are github secrets, Action failures at this step
# can come from a PR from a fork changing a file wichi forces a rebuild
# Resolve by making an internal PR of the Forked PR
echo $DOCKER_TOKEN | docker login -u $DOCKER_USER --password-stdin
docker build . --file tools/docker/sles.docker --tag $DOCKERIMAGE_SLES;
docker push $DOCKERIMAGE_SLES;
docker pull $DOCKER_IMAGE_SLES:latest || true
docker build . --file ./tools/docker/sles.docker --cache-from $DOCKER_IMAGE_SLES:latest --tag $DOCKER_IMAGE_SLES:$DOCKER_TAG_SLES --tag $DOCKER_IMAGE_SLES:latest;
docker push $DOCKER_IMAGE_SLES:$DOCKER_TAG_SLES;
docker push $DOCKER_IMAGE_SLES:latest;
tidy
:
runs-on
:
ROCM-Ubuntu
needs
:
[
build_image
,
check_image
]
env
:
DOCKER
IMAGE
:
${{ needs.check_image.outputs.imagetag }}
DOCKER
_TAG_UBUNTU
:
${{ needs.check_image.outputs.imagetag }}
if
:
${{ !cancelled() && (needs.build_image.result == 'success' || needs.build_image.result == 'skipped') }}
steps
:
...
...
@@ -115,12 +128,8 @@ jobs:
key
:
tidy-cache-${{ github.ref }}
restore-keys
:
tidy-cache-
-
name
:
Docker Login
run
:
|
echo $DOCKER_TOKEN | docker login -u $DOCKER_USER --password-stdin
-
name
:
Clang Tidy
shell
:
bash -c "docker run -i -v=$GITHUB_WORKSPACE:/data -w /data $DOCKERIMAGE bash < {0}"
shell
:
bash -c "docker run -i -v=$GITHUB_WORKSPACE:/data -w /data $DOCKER
_
IMAGE
_UBUNTU:$DOCKER_TAG_UBUNTU
bash < {0}"
run
:
|
mkdir build
cd build
...
...
@@ -159,7 +168,7 @@ jobs:
runs-on
:
ROCM-Ubuntu
needs
:
[
build_image
,
check_image
]
env
:
DOCKER
IMAGE
:
${{ needs.check_image.outputs.imagetag }}
DOCKER
_TAG_UBUNTU
:
${{ needs.check_image.outputs.imagetag }}
if
:
${{ !cancelled() && (needs.build_image.result == 'success' || needs.build_image.result == 'skipped') }}
steps
:
...
...
@@ -173,12 +182,8 @@ jobs:
key
:
cppcheck-cache-${{ hashFiles('cppcheck.rules', 'CMakeLists.txt') }}-${{ github.ref }}
restore-keys
:
cppcheck-cache-${{ hashFiles('cppcheck.rules', 'CMakeLists.txt') }}-
-
name
:
Docker Login
run
:
|
echo $DOCKER_TOKEN | docker login -u $DOCKER_USER --password-stdin
-
name
:
Cppcheck
shell
:
bash -c "docker run -i -v=$GITHUB_WORKSPACE:/data -w /data $DOCKERIMAGE bash < {0}"
shell
:
bash -c "docker run -i -v=$GITHUB_WORKSPACE:/data -w /data $DOCKER
_
IMAGE
_UBUNTU:$DOCKER_TAG_UBUNTU
bash < {0}"
run
:
|
mkdir build
cd build
...
...
@@ -212,7 +217,7 @@ jobs:
runs-on
:
ubuntu-latest
needs
:
[
build_image
,
check_image
]
env
:
DOCKER
IMAGE
:
${{ needs.check_image.outputs.imagetag }}
DOCKER
_TAG_UBUNTU
:
${{ needs.check_image.outputs.imagetag }}
if
:
${{ !cancelled() && (needs.build_image.result == 'success' || needs.build_image.result == 'skipped') }}
steps
:
...
...
@@ -220,12 +225,19 @@ jobs:
with
:
fetch-depth
:
0
-
name
:
Docker Login
run
:
|
echo $DOCKER_TOKEN | docker login -u $DOCKER_USER --password-stdin
-
name
:
Free space
uses
:
jlumbroso/free-disk-space@main
with
:
tool-cache
:
true
android
:
true
dotnet
:
true
haskell
:
true
large-packages
:
true
swap-storage
:
true
docker-images
:
true
-
name
:
Check formatting
shell
:
bash -c "docker run -i -v=$GITHUB_WORKSPACE:/data -w /data $DOCKERIMAGE bash < {0}"
shell
:
bash -c "docker run -i -v=$GITHUB_WORKSPACE:/data -w /data $DOCKER
_
IMAGE
_UBUNTU:$DOCKER_TAG_UBUNTU
bash < {0}"
run
:
|
set -e
git config --global --add safe.directory /data
...
...
@@ -235,7 +247,7 @@ jobs:
runs-on
:
ROCM-Ubuntu
needs
:
[
build_SLES_image
,
check_image
]
env
:
DOCKER
IM
AG
E
_SLES
:
${{ needs.check_image.outputs.imagetag_sles }}
DOCKER
_T
AG_SLES
:
${{ needs.check_image.outputs.imagetag_sles }}
if
:
${{ !cancelled() && (needs.build_SLES_image.result == 'success' || needs.build_SLES_image.result == 'skipped') }}
steps
:
...
...
@@ -251,12 +263,8 @@ jobs:
key
:
ccache-sles-${{ github.ref }}
restore-keys
:
ccache-sles-
-
name
:
Docker Login
run
:
|
echo $DOCKER_TOKEN | docker login -u $DOCKER_USER --password-stdin
-
name
:
Build migraphx
shell
:
bash -c "docker run -i -v=$GITHUB_WORKSPACE:/data -w /data $DOCKERIMAGE_SLES bash < {0}"
shell
:
bash -c "docker run -i -v=$GITHUB_WORKSPACE:/data -w /data $DOCKER
_
IMAGE_SLES
:$DOCKER_TAG_SLES
bash < {0}"
run
:
|
set -e
export CCACHE_COMPRESSLEVEL=10
...
...
.readthedocs.yaml
View file @
7fafbb66
...
...
@@ -9,6 +9,10 @@ sphinx:
formats
:
[
htmlzip
]
python
:
version
:
"
3.8"
install
:
-
requirements
:
docs/.sphinx/requirements.txt
build
:
os
:
ubuntu-20.04
tools
:
python
:
"
3.8"
src/CMakeLists.txt
View file @
7fafbb66
...
...
@@ -96,6 +96,7 @@ add_library(migraphx
serialize.cpp
shape.cpp
simplify_algebra.cpp
simplify_dyn_ops.cpp
simplify_reshapes.cpp
split_single_dyn_dim.cpp
target.cpp
...
...
src/include/migraphx/simplify_dyn_ops.hpp
0 → 100644
View file @
7fafbb66
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_RTGLIB_SIMPLIFY_DYN_OPS_HPP
#define MIGRAPHX_GUARD_RTGLIB_SIMPLIFY_DYN_OPS_HPP
#include <string>
#include <migraphx/instruction_ref.hpp>
#include <migraphx/config.hpp>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
struct
module
;
/**
* Convert dynamic ops to their static version if possible.
* Should be run after the split_single_dyn_dims pass.
*/
struct
MIGRAPHX_EXPORT
simplify_dyn_ops
{
std
::
string
name
()
const
{
return
"simplify_dyn_ops"
;
}
void
apply
(
module
&
m
)
const
;
};
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
#endif
src/simplify_dyn_ops.cpp
0 → 100644
View file @
7fafbb66
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/simplify_dyn_ops.hpp>
#include <migraphx/matcher.hpp>
#include <migraphx/make_op.hpp>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
/**
* Convert 2 input static shape broadcast/multibroadcast into 1 input version.
* Some compiler passes (ex. simplify_algebra) only support the 1 input versions
* of the broadcasting operators.
*/
struct
find_static_2in_broadcasts
{
auto
matcher
()
const
{
return
match
::
broadcast
(
match
::
nargs
(
2
),
match
::
arg
(
0
)(
match
::
static_shape
()),
match
::
arg
(
1
)(
match
::
static_shape
()));
}
void
apply
(
module
&
m
,
const
match
::
matcher_result
&
mr
)
const
{
auto
ins
=
mr
.
result
;
auto
out_lens
=
ins
->
get_shape
().
lens
();
auto
broadcast_op
=
ins
->
get_operator
();
if
(
broadcast_op
.
name
()
==
"broadcast"
)
{
broadcast_op
.
from_value
({{
"out_lens"
,
out_lens
}});
}
else
{
broadcast_op
.
from_value
({{
"out_lens"
,
out_lens
},
{
"out_dyn_dims"
,
{}}});
}
m
.
replace_instruction
(
ins
,
broadcast_op
,
ins
->
inputs
().
at
(
0
));
}
};
/**
* Simplify slice with variable `starts` and `ends` to the constant version if
* the `input_starts` and `input_ends` inputs are constant.
*/
struct
find_const_3in_slice
{
auto
matcher
()
const
{
return
match
::
name
(
"slice"
)(
match
::
nargs
(
3
),
match
::
arg
(
1
)(
match
::
is_constant
()),
match
::
arg
(
2
)(
match
::
is_constant
()));
}
void
apply
(
module
&
m
,
const
match
::
matcher_result
&
mr
)
const
{
auto
ins
=
mr
.
result
;
auto
inputs
=
ins
->
inputs
();
argument
starts_arg
=
inputs
.
at
(
1
)
->
eval
();
argument
ends_arg
=
inputs
.
at
(
2
)
->
eval
();
if
(
not
starts_arg
.
empty
()
and
not
ends_arg
.
empty
())
{
std
::
vector
<
int64_t
>
starts_vec
;
std
::
vector
<
int64_t
>
ends_vec
;
starts_arg
.
visit
([
&
](
auto
output
)
{
starts_vec
.
assign
(
output
.
begin
(),
output
.
end
());
});
ends_arg
.
visit
([
&
](
auto
output
)
{
ends_vec
.
assign
(
output
.
begin
(),
output
.
end
());
});
auto
slice_val
=
ins
->
get_operator
().
to_value
();
auto
axes_vec
=
slice_val
.
at
(
"axes"
).
to_vector
<
int64_t
>
();
m
.
replace_instruction
(
ins
,
make_op
(
"slice"
,
{{
"starts"
,
starts_vec
},
{
"ends"
,
ends_vec
},
{
"axes"
,
axes_vec
}}),
inputs
.
at
(
0
));
}
}
};
/**
* Simplify slice with variable `starts`, `ends`, and `input_axes` to the constant version if
* the `input_starts`, `input_ends`, and `input_axes` inputs are constant.
*/
struct
find_const_4in_slice
{
auto
matcher
()
const
{
return
match
::
name
(
"slice"
)(
match
::
nargs
(
4
),
match
::
arg
(
1
)(
match
::
is_constant
()),
match
::
arg
(
2
)(
match
::
is_constant
()),
match
::
arg
(
3
)(
match
::
is_constant
()));
}
void
apply
(
module
&
m
,
const
match
::
matcher_result
&
mr
)
const
{
auto
ins
=
mr
.
result
;
auto
inputs
=
ins
->
inputs
();
argument
starts_arg
=
inputs
.
at
(
1
)
->
eval
();
argument
ends_arg
=
inputs
.
at
(
2
)
->
eval
();
argument
axes_arg
=
inputs
.
at
(
3
)
->
eval
();
if
(
not
starts_arg
.
empty
()
and
not
ends_arg
.
empty
()
and
not
axes_arg
.
empty
())
{
std
::
vector
<
int64_t
>
starts_vec
;
std
::
vector
<
int64_t
>
ends_vec
;
std
::
vector
<
int64_t
>
axes_vec
;
starts_arg
.
visit
([
&
](
auto
output
)
{
starts_vec
.
assign
(
output
.
begin
(),
output
.
end
());
});
ends_arg
.
visit
([
&
](
auto
output
)
{
ends_vec
.
assign
(
output
.
begin
(),
output
.
end
());
});
axes_arg
.
visit
([
&
](
auto
output
)
{
axes_vec
.
assign
(
output
.
begin
(),
output
.
end
());
});
m
.
replace_instruction
(
ins
,
make_op
(
"slice"
,
{{
"starts"
,
starts_vec
},
{
"ends"
,
ends_vec
},
{
"axes"
,
axes_vec
}}),
inputs
.
at
(
0
));
}
}
};
void
simplify_dyn_ops
::
apply
(
module
&
m
)
const
{
match
::
find_matches
(
m
,
find_static_2in_broadcasts
{},
find_const_3in_slice
{},
find_const_4in_slice
{});
}
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
src/split_single_dyn_dim.cpp
View file @
7fafbb66
...
...
@@ -68,37 +68,6 @@ has_one_dyn_dim(const std::unordered_map<std::string, shape>& param_shapes)
dds_it
->
max
};
}
namespace
{
struct
find_static_2in_broadcasts
{
// Convert 2 input static shape broadcast/multibroadcast into 1 input version.
// Some compiler passes (ex. simplify_algebra) only support the 1 input versions
// of the broadcasting operators.
auto
matcher
()
const
{
return
match
::
broadcast
(
match
::
nargs
(
2
),
match
::
arg
(
0
)(
match
::
static_shape
()),
match
::
arg
(
1
)(
match
::
static_shape
()));
}
void
apply
(
module
&
m
,
const
match
::
matcher_result
&
mr
)
const
{
auto
ins
=
mr
.
result
;
auto
out_lens
=
ins
->
get_shape
().
lens
();
auto
broadcast_op
=
ins
->
get_operator
();
if
(
broadcast_op
.
name
()
==
"broadcast"
)
{
broadcast_op
.
from_value
({{
"out_lens"
,
out_lens
}});
}
else
{
broadcast_op
.
from_value
({{
"out_lens"
,
out_lens
},
{
"out_dyn_dims"
,
{}}});
}
m
.
replace_instruction
(
ins
,
broadcast_op
,
ins
->
inputs
().
at
(
0
));
}
};
}
// namespace
/**
* Makes all the shapes in the dynamic_dimension range. Probably won't work for `if`
* and `loop` instructions, depending on how the submodules for those
...
...
@@ -135,7 +104,6 @@ void split_single_dyn_dim::apply(module_pass_manager& mpm) const
dd_check
->
dyn_param_str
,
migraphx
::
shape
{
dyn_param_shape
.
type
(),
static_lens
});
auto
outputs
=
submod
->
add_instructions
(
mm
,
map_ins
);
submod
->
add_return
({
outputs
});
match
::
find_matches
(
*
submod
,
find_static_2in_broadcasts
{});
submodules
.
push_back
(
submod
);
}
// redirect to select_module operator and return
...
...
src/targets/gpu/target.cpp
View file @
7fafbb66
...
...
@@ -48,6 +48,7 @@
#include <migraphx/rewrite_quantization.hpp>
#include <migraphx/rewrite_rnn.hpp>
#include <migraphx/schedule.hpp>
#include <migraphx/simplify_dyn_ops.hpp>
#include <migraphx/simplify_qdq.hpp>
#include <migraphx/simplify_reshapes.hpp>
#include <migraphx/split_single_dyn_dim.hpp>
...
...
@@ -109,6 +110,8 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti
{
split_single_dyn_dim
{},
dead_code_elimination
{},
simplify_dyn_ops
{},
dead_code_elimination
{},
normalize_ops
{},
dead_code_elimination
{},
simplify_qdq
{},
...
...
test/onnx/.onnxrt-commit
View file @
7fafbb66
377f959c69e9f213cd4a8c71a5e80162a412989a
6d7bc2a097a1a08541cd0d4628831c79ab8092d5
test/py/onnx_backend_test.py
View file @
7fafbb66
...
...
@@ -104,11 +104,170 @@ def disabled_tests_onnx_1_10_0(backend_test):
backend_test
.
exclude
(
r
'test_shape_start_negative_1_cpu'
)
def
disabled_tests_onnx_1_12_0
(
backend_test
):
def
disabled_tests_onnx_1_11_0
(
backend_test
):
# crash
backend_test
.
exclude
(
r
'test_scatter_elements_with_duplicate_indices_cpu'
)
# fails
backend_test
.
exclude
(
r
'test_roialign_aligned_false_cpu'
)
backend_test
.
exclude
(
r
'test_roialign_aligned_true_cpu'
)
backend_test
.
exclude
(
r
'test_scatternd_add_cpu'
)
backend_test
.
exclude
(
r
'test_scatternd_multiply_cpu'
)
# errors
backend_test
.
exclude
(
r
'test_identity_opt_cpu'
)
backend_test
.
exclude
(
r
'test_if_opt_cpu'
)
def
disabled_tests_onnx_1_12_0
(
backend_test
):
pass
def
disabled_tests_onnx_1_13_0
(
backend_test
):
# fails
backend_test
.
exclude
(
r
'test_reduce_l1_do_not_keepdims_example_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_l1_do_not_keepdims_random_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_l1_keep_dims_example_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_l1_keep_dims_random_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_l1_negative_axes_keep_dims_example_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_l1_negative_axes_keep_dims_random_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_l2_do_not_keepdims_example_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_l2_do_not_keepdims_random_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_l2_keep_dims_example_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_l2_keep_dims_random_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_l2_negative_axes_keep_dims_example_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_l2_negative_axes_keep_dims_random_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_log_sum_exp_do_not_keepdims_example_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_log_sum_exp_do_not_keepdims_random_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_log_sum_exp_keepdims_example_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_log_sum_exp_keepdims_random_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_log_sum_exp_negative_axes_keepdims_example_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_log_sum_exp_negative_axes_keepdims_random_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_sum_square_do_not_keepdims_example_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_sum_square_do_not_keepdims_random_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_sum_square_keepdims_example_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_sum_square_keepdims_random_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_sum_square_negative_axes_keepdims_example_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_sum_square_negative_axes_keepdims_random_cpu'
)
backend_test
.
exclude
(
r
'test_scatternd_max_cpu'
)
backend_test
.
exclude
(
r
'test_scatternd_min_cpu'
)
# errors
backend_test
.
exclude
(
r
'test_constant_pad_axes_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_l1_default_axes_keepdims_example_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_l1_default_axes_keepdims_example_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_l1_default_axes_keepdims_random_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_l1_default_axes_keepdims_random_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_l1_do_not_keepdims_example_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_l1_do_not_keepdims_random_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_l1_keep_dims_example_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_l1_keep_dims_random_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_l1_negative_axes_keep_dims_example_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_l1_negative_axes_keep_dims_random_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_l2_default_axes_keepdims_example_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_l2_default_axes_keepdims_example_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_l2_default_axes_keepdims_random_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_l2_default_axes_keepdims_random_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_l2_do_not_keepdims_example_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_l2_do_not_keepdims_random_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_l2_keep_dims_example_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_l2_keep_dims_random_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_l2_negative_axes_keep_dims_example_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_l2_negative_axes_keep_dims_random_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_log_sum_asc_axes_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_log_sum_asc_axes_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_log_sum_default_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_log_sum_default_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_log_sum_desc_axes_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_log_sum_desc_axes_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_log_sum_exp_default_axes_keepdims_example_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_log_sum_exp_default_axes_keepdims_example_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_log_sum_exp_default_axes_keepdims_random_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_log_sum_exp_default_axes_keepdims_random_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_log_sum_exp_do_not_keepdims_example_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_log_sum_exp_do_not_keepdims_random_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_log_sum_exp_keepdims_example_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_log_sum_exp_keepdims_random_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_log_sum_exp_negative_axes_keepdims_example_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_log_sum_exp_negative_axes_keepdims_random_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_log_sum_negative_axes_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_log_sum_negative_axes_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_max_do_not_keepdims_example_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_max_do_not_keepdims_random_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_max_keepdims_example_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_max_keepdims_random_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_max_negative_axes_keepdims_example_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_max_negative_axes_keepdims_random_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_mean_default_axes_keepdims_example_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_mean_default_axes_keepdims_random_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_mean_do_not_keepdims_example_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_mean_do_not_keepdims_random_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_mean_keepdims_example_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_mean_keepdims_random_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_mean_negative_axes_keepdims_example_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_mean_negative_axes_keepdims_random_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_min_do_not_keepdims_example_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_min_do_not_keepdims_random_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_min_keepdims_example_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_min_keepdims_random_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_min_negative_axes_keepdims_example_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_min_negative_axes_keepdims_random_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_prod_do_not_keepdims_example_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_prod_do_not_keepdims_random_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_prod_keepdims_example_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_prod_keepdims_random_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_prod_negative_axes_keepdims_example_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_prod_negative_axes_keepdims_random_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_sum_square_default_axes_keepdims_example_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_sum_square_default_axes_keepdims_example_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_sum_square_default_axes_keepdims_random_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_sum_square_default_axes_keepdims_random_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_sum_square_do_not_keepdims_example_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_sum_square_do_not_keepdims_random_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_sum_square_keepdims_example_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_sum_square_keepdims_random_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_sum_square_negative_axes_keepdims_example_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_reduce_sum_square_negative_axes_keepdims_random_expanded_cpu'
)
backend_test
.
exclude
(
r
'test_scatter_elements_with_reduction_max_cpu'
)
backend_test
.
exclude
(
r
'test_scatter_elements_with_reduction_min_cpu'
)
# The following tests fail due to the CastLike operator being unsupported
backend_test
.
exclude
(
r
'test_elu_default_expanded_ver18_cpu'
)
backend_test
.
exclude
(
r
'test_elu_example_expanded_ver18_cpu'
)
...
...
@@ -131,6 +290,19 @@ def disabled_tests_onnx_1_13_0(backend_test):
def
disabled_tests_onnx_1_14_0
(
backend_test
):
# fails
backend_test
.
exclude
(
r
'test_averagepool_2d_dilations_cpu'
)
backend_test
.
exclude
(
r
'test_roialign_mode_max_cpu'
)
# errors
backend_test
.
exclude
(
r
'test_constant_pad_negative_axes_cpu'
)
backend_test
.
exclude
(
r
'test_dequantizelinear_e4m3fn_cpu'
)
backend_test
.
exclude
(
r
'test_dequantizelinear_e5m2_cpu'
)
backend_test
.
exclude
(
r
'test_equal_string_broadcast_cpu'
)
backend_test
.
exclude
(
r
'test_equal_string_cpu'
)
backend_test
.
exclude
(
r
'test_quantizelinear_e4m3fn_cpu'
)
backend_test
.
exclude
(
r
'test_quantizelinear_e5m2_cpu'
)
# The following tests fail due to the CastLike operator being unsupported
backend_test
.
exclude
(
r
'test_softplus_example_expanded_ver18_cpu'
)
backend_test
.
exclude
(
r
'test_softplus_expanded_ver18_cpu'
)
...
...
@@ -359,6 +531,9 @@ def create_backend_test(testname=None, target_device=None):
if
version
.
parse
(
onnx
.
__version__
)
>=
version
.
parse
(
"1.10.0"
):
disabled_tests_onnx_1_10_0
(
backend_test
)
if
version
.
parse
(
onnx
.
__version__
)
>=
version
.
parse
(
"1.11.0"
):
disabled_tests_onnx_1_11_0
(
backend_test
)
if
version
.
parse
(
onnx
.
__version__
)
>=
version
.
parse
(
"1.12.0"
):
disabled_tests_onnx_1_12_0
(
backend_test
)
...
...
test/simplify_dyn_ops_test.cpp
0 → 100644
View file @
7fafbb66
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/simplify_dyn_ops.hpp>
#include <migraphx/split_single_dyn_dim.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/program.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/make_op.hpp>
#include <test.hpp>
void
run_pass
(
migraphx
::
module
&
m
)
{
migraphx
::
run_passes
(
m
,
{
migraphx
::
simplify_dyn_ops
{},
migraphx
::
dead_code_elimination
{}});
}
TEST_CASE
(
static_broadcast
)
{
migraphx
::
module
m0
;
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
4
}};
auto
input
=
m0
.
add_parameter
(
"data"
,
s
);
migraphx
::
shape
lit_s
{
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
}}};
auto
literal_ins
=
m0
.
add_literal
(
migraphx
::
literal
{
lit_s
,
{
6
,
5
,
4
,
3
}});
auto
broadcast_lit
=
m0
.
add_instruction
(
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
1
},
{
"out_lens"
,
s
.
lens
()}}),
literal_ins
);
auto
add_ins
=
m0
.
add_instruction
(
migraphx
::
make_op
(
"add"
),
input
,
broadcast_lit
);
m0
.
add_return
({
add_ins
});
}
migraphx
::
module
m1
;
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
4
}};
auto
input
=
m1
.
add_parameter
(
"data"
,
s
);
migraphx
::
shape
lit_s
{
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
}}};
auto
literal_ins
=
m1
.
add_literal
(
migraphx
::
literal
{
lit_s
,
{
6
,
5
,
4
,
3
}});
auto
broadcast_lit
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
1
}}),
literal_ins
,
input
);
auto
add_ins
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"add"
),
input
,
broadcast_lit
);
m1
.
add_return
({
add_ins
});
}
run_pass
(
m1
);
EXPECT
(
m0
==
m1
);
}
TEST_CASE
(
static_multibroadcast
)
{
migraphx
::
module
m0
;
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
4
}};
auto
input
=
m0
.
add_parameter
(
"data"
,
s
);
migraphx
::
shape
lit_s
{
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
},
{
0
}}};
auto
literal_ins
=
m0
.
add_literal
(
migraphx
::
literal
{
lit_s
,
{
6
}});
auto
broadcast_lit
=
m0
.
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
s
.
lens
()}}),
literal_ins
);
auto
add_ins
=
m0
.
add_instruction
(
migraphx
::
make_op
(
"add"
),
input
,
broadcast_lit
);
m0
.
add_return
({
add_ins
});
}
migraphx
::
module
m1
;
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
4
}};
auto
input
=
m1
.
add_parameter
(
"data"
,
s
);
migraphx
::
shape
lit_s
{
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
},
{
0
}}};
auto
literal_ins
=
m1
.
add_literal
(
migraphx
::
literal
{
lit_s
,
{
6
}});
auto
broadcast_lit
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
),
literal_ins
,
input
);
auto
add_ins
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"add"
),
input
,
broadcast_lit
);
m1
.
add_return
({
add_ins
});
}
run_pass
(
m1
);
EXPECT
(
m0
==
m1
);
}
TEST_CASE
(
after_split_dyn_broadcast_match
)
{
migraphx
::
program
p0
;
{
auto
*
mm0
=
p0
.
get_main_module
();
// create batch submodules
auto
create_submodule
=
[
&
](
std
::
size_t
batch_size
,
const
std
::
string
&
module_name
)
{
auto
*
submod
=
p0
.
create_module
(
module_name
);
migraphx
::
shape
sm_shape
{
migraphx
::
shape
::
float_type
,
{
batch_size
,
4
}};
auto
sm_input
=
submod
->
add_parameter
(
"data"
,
sm_shape
);
migraphx
::
shape
lit_s
{
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
}}};
auto
literal_ins
=
submod
->
add_literal
(
migraphx
::
literal
{
lit_s
,
{
6
,
5
,
4
,
3
}});
auto
broadcast_lit
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
1
},
{
"out_lens"
,
sm_shape
.
lens
()}}),
literal_ins
);
auto
add_ins
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
sm_input
,
broadcast_lit
);
submod
->
add_return
({
add_ins
});
return
submod
;
};
auto
*
dim1
=
create_submodule
(
1
,
"dim_1"
);
auto
*
dim2
=
create_submodule
(
2
,
"dim_2"
);
auto
*
dim3
=
create_submodule
(
3
,
"dim_3"
);
auto
*
dim4
=
create_submodule
(
4
,
"dim_4"
);
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
4
,
4
}}};
auto
input0
=
mm0
->
add_parameter
(
"data"
,
s
);
std
::
vector
<
migraphx
::
shape
>
sub_shapes
=
{};
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
4
,
4
}}});
migraphx
::
shape
out_attr
=
migraphx
::
shape
{
sub_shapes
};
auto
sm_ins
=
mm0
->
add_instruction
(
migraphx
::
make_op
(
"select_module"
,
{{
"output_dyn_shapes"
,
migraphx
::
to_value
(
out_attr
)}}),
{
input0
},
{
dim1
,
dim2
,
dim3
,
dim4
});
auto
ret
=
mm0
->
add_instruction
(
migraphx
::
make_op
(
"get_tuple_elem"
,
{{
"index"
,
0
}}),
sm_ins
);
mm0
->
add_return
({
ret
});
}
migraphx
::
program
p1
;
{
auto
*
mm1
=
p1
.
get_main_module
();
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
4
,
4
}}};
auto
input1
=
mm1
->
add_parameter
(
"data"
,
s
);
migraphx
::
shape
lit_s
{
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
}}};
auto
literal_ins
=
mm1
->
add_literal
(
migraphx
::
literal
{
lit_s
,
{
6
,
5
,
4
,
3
}});
auto
broadcast_lit
=
mm1
->
add_instruction
(
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
1
}}),
literal_ins
,
input1
);
auto
add_ins
=
mm1
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
input1
,
broadcast_lit
);
mm1
->
add_return
({
add_ins
});
}
migraphx
::
run_passes
(
p1
,
{
migraphx
::
split_single_dyn_dim
{},
migraphx
::
dead_code_elimination
{},
migraphx
::
simplify_dyn_ops
{}});
EXPECT
(
p0
==
p1
);
}
TEST_CASE
(
const_slice_3input
)
{
migraphx
::
module
m0
;
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
6
,
4
,
4
}};
auto
input
=
m0
.
add_parameter
(
"data"
,
s
);
auto
slice_ins
=
m0
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"starts"
,
{
0
}},
{
"ends"
,
{
3
}},
{
"axes"
,
{
0
}}}),
input
);
m0
.
add_return
({
slice_ins
});
}
migraphx
::
module
m1
;
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
6
,
4
,
4
}};
auto
input
=
m1
.
add_parameter
(
"data"
,
s
);
migraphx
::
shape
s1
{
migraphx
::
shape
::
int32_type
,
{
1
}};
auto
input_starts
=
m1
.
add_literal
(
migraphx
::
literal
{
s1
,
{
0
}});
auto
input_ends
=
m1
.
add_literal
(
migraphx
::
literal
{
s1
,
{
3
}});
auto
slice_ins
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
0
}}}),
input
,
input_starts
,
input_ends
);
m1
.
add_return
({
slice_ins
});
}
run_pass
(
m1
);
EXPECT
(
m0
==
m1
);
}
TEST_CASE
(
const_slice_3input_dyn
)
{
migraphx
::
module
m0
;
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{{
6
,
6
},
{
2
,
4
,
{
2
,
4
}},
{
2
,
4
,
{
2
,
4
}}}};
auto
input
=
m0
.
add_parameter
(
"data"
,
s
);
auto
slice_ins
=
m0
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"starts"
,
{
0
}},
{
"ends"
,
{
3
}},
{
"axes"
,
{
0
}}}),
input
);
m0
.
add_return
({
slice_ins
});
}
migraphx
::
module
m1
;
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{{
6
,
6
},
{
2
,
4
,
{
2
,
4
}},
{
2
,
4
,
{
2
,
4
}}}};
auto
input
=
m1
.
add_parameter
(
"data"
,
s
);
migraphx
::
shape
s1
{
migraphx
::
shape
::
int32_type
,
{
1
}};
auto
input_starts
=
m1
.
add_literal
(
migraphx
::
literal
{
s1
,
{
0
}});
auto
input_ends
=
m1
.
add_literal
(
migraphx
::
literal
{
s1
,
{
3
}});
auto
slice_ins
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
0
}}}),
input
,
input_starts
,
input_ends
);
m1
.
add_return
({
slice_ins
});
}
run_pass
(
m1
);
EXPECT
(
m0
==
m1
);
}
TEST_CASE
(
const_slice_4input
)
{
migraphx
::
module
m0
;
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
6
,
4
,
4
}};
auto
input
=
m0
.
add_parameter
(
"data"
,
s
);
auto
slice_ins
=
m0
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"starts"
,
{
0
}},
{
"ends"
,
{
3
}},
{
"axes"
,
{
0
}}}),
input
);
m0
.
add_return
({
slice_ins
});
}
migraphx
::
module
m1
;
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
6
,
4
,
4
}};
auto
input
=
m1
.
add_parameter
(
"data"
,
s
);
migraphx
::
shape
s1
{
migraphx
::
shape
::
int32_type
,
{
1
}};
auto
input_starts
=
m1
.
add_literal
(
migraphx
::
literal
{
s1
,
{
0
}});
auto
input_ends
=
m1
.
add_literal
(
migraphx
::
literal
{
s1
,
{
3
}});
auto
input_axes
=
m1
.
add_literal
(
migraphx
::
literal
{
s1
,
{
0
}});
auto
slice_ins
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"slice"
),
input
,
input_starts
,
input_ends
,
input_axes
);
m1
.
add_return
({
slice_ins
});
}
run_pass
(
m1
);
EXPECT
(
m0
==
m1
);
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/split_single_dyn_dim_test.cpp
View file @
7fafbb66
...
...
@@ -50,8 +50,8 @@ TEST_CASE(dynamic_batch)
auto
sm_input
=
submod
->
add_parameter
(
"data"
,
sm_shape
);
migraphx
::
shape
lit_s
{
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
}}};
auto
literal_ins
=
submod
->
add_literal
(
migraphx
::
literal
{
lit_s
,
{
6
}});
auto
broadcast_lit
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
sm_shape
.
lens
()}}
),
literal_ins
);
auto
broadcast_lit
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
),
literal_ins
,
sm_input
);
auto
add_ins
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
sm_input
,
broadcast_lit
);
submod
->
add_return
({
add_ins
});
...
...
@@ -107,8 +107,8 @@ TEST_CASE(multiple_outputs)
auto
sm_input
=
submod
->
add_parameter
(
"data"
,
sm_shape
);
migraphx
::
shape
lit_s
{
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
}}};
auto
literal_ins
=
submod
->
add_literal
(
migraphx
::
literal
{
lit_s
,
{
6
}});
auto
broadcast_lit
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
sm_shape
.
lens
()}}
),
literal_ins
);
auto
broadcast_lit
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
),
literal_ins
,
sm_input
);
auto
add0_ins
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
sm_input
,
broadcast_lit
);
auto
add1_ins
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
sm_input
,
sm_input
);
...
...
@@ -157,64 +157,4 @@ TEST_CASE(multiple_outputs)
EXPECT
(
p0
==
p1
);
}
TEST_CASE
(
broadcast_match
)
{
// Slightly different from ref_ops_test in that the literal is copied over the submodules.
// A different compiler pass will pull the literals from the submodules to the main module.
migraphx
::
program
p0
;
{
auto
*
mm0
=
p0
.
get_main_module
();
// create batch submodules
auto
create_submodule
=
[
&
](
std
::
size_t
batch_size
,
const
std
::
string
&
module_name
)
{
auto
*
submod
=
p0
.
create_module
(
module_name
);
migraphx
::
shape
sm_shape
{
migraphx
::
shape
::
float_type
,
{
batch_size
,
4
}};
auto
sm_input
=
submod
->
add_parameter
(
"data"
,
sm_shape
);
migraphx
::
shape
lit_s
{
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
}}};
auto
literal_ins
=
submod
->
add_literal
(
migraphx
::
literal
{
lit_s
,
{
6
,
5
,
4
,
3
}});
auto
broadcast_lit
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
1
},
{
"out_lens"
,
sm_shape
.
lens
()}}),
literal_ins
);
auto
add_ins
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
sm_input
,
broadcast_lit
);
submod
->
add_return
({
add_ins
});
return
submod
;
};
auto
*
dim1
=
create_submodule
(
1
,
"dim_1"
);
auto
*
dim2
=
create_submodule
(
2
,
"dim_2"
);
auto
*
dim3
=
create_submodule
(
3
,
"dim_3"
);
auto
*
dim4
=
create_submodule
(
4
,
"dim_4"
);
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
4
,
4
}}};
auto
input0
=
mm0
->
add_parameter
(
"data"
,
s
);
std
::
vector
<
migraphx
::
shape
>
sub_shapes
=
{};
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
4
,
4
}}});
migraphx
::
shape
out_attr
=
migraphx
::
shape
{
sub_shapes
};
auto
sm_ins
=
mm0
->
add_instruction
(
migraphx
::
make_op
(
"select_module"
,
{{
"output_dyn_shapes"
,
migraphx
::
to_value
(
out_attr
)}}),
{
input0
},
{
dim1
,
dim2
,
dim3
,
dim4
});
auto
ret
=
mm0
->
add_instruction
(
migraphx
::
make_op
(
"get_tuple_elem"
,
{{
"index"
,
0
}}),
sm_ins
);
mm0
->
add_return
({
ret
});
}
migraphx
::
program
p1
;
{
auto
*
mm1
=
p1
.
get_main_module
();
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
4
,
4
}}};
auto
input1
=
mm1
->
add_parameter
(
"data"
,
s
);
migraphx
::
shape
lit_s
{
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
}}};
auto
literal_ins
=
mm1
->
add_literal
(
migraphx
::
literal
{
lit_s
,
{
6
,
5
,
4
,
3
}});
auto
broadcast_lit
=
mm1
->
add_instruction
(
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
1
}}),
literal_ins
,
input1
);
auto
add_ins
=
mm1
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
input1
,
broadcast_lit
);
mm1
->
add_return
({
add_ins
});
}
run_pass
(
p1
);
EXPECT
(
p0
==
p1
);
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
tools/download_models.sh
View file @
7fafbb66
...
...
@@ -49,3 +49,8 @@ do
curl https://download.onnxruntime.ai/onnx/models/
$name
.tar.gz
--output
$tmp_dir
/
$name
.tar.gz
tar
-xzvf
$tmp_dir
/
$name
.tar.gz
--directory
$model_dir
&&
rm
$tmp_dir
/
$name
.tar.gz
done
# CI jobs can run as a different user then the docker image builder.
# Allow read/write access to the models
chmod
777
$model_dir
tools/install_prereqs.sh
View file @
7fafbb66
...
...
@@ -80,8 +80,8 @@ rbuild prepare -d $PREFIX -s develop
if
[[
(
"
${
ID
}
"
!=
"sles"
)
]]
;
then
export
CMAKE_ARGS
=
"-DONNX_USE_PROTOBUF_SHARED_LIBS=ON"
pip3
install
onnx
==
1.1
0.2
numpy
==
1.21.6
typing
==
3.7.4
pytest
==
6.0.1
packaging
==
23.0
pip3
install
onnx
==
1.1
4.1
numpy
==
1.21.6
typing
==
3.7.4
pytest
==
6.0.1
packaging
==
23.0
# pin version of protobuf in Python for onnx runtime unit tests between dist versions
pip3
install
protobuf
==
3.20.
0
pip3
install
protobuf
==
3.20.
2
fi
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment