Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
9bff4331
Commit
9bff4331
authored
Mar 21, 2023
by
Paul
Browse files
Merge
parents
214b313f
94a7f6ee
Changes
274
Hide whitespace changes
Inline
Side-by-side
Showing
14 changed files
with
450 additions
and
45 deletions
+450
-45
test/verify/test_reduce_op_large.cpp
test/verify/test_reduce_op_large.cpp
+26
-0
test/verify/test_select_module_add.cpp
test/verify/test_select_module_add.cpp
+77
-0
test/verify/test_select_module_conv.cpp
test/verify/test_select_module_conv.cpp
+71
-0
test/verify/test_select_module_reduce.cpp
test/verify/test_select_module_reduce.cpp
+69
-0
test/verify/test_sin_half.cpp
test/verify/test_sin_half.cpp
+13
-12
test/verify/test_trans_slice.cpp
test/verify/test_trans_slice.cpp
+55
-0
tools/accuracy/accuracy_checker.py
tools/accuracy/accuracy_checker.py
+115
-22
tools/api/api.cpp
tools/api/api.cpp
+5
-1
tools/build_and_test_onnxrt.sh
tools/build_and_test_onnxrt.sh
+7
-4
tools/download_models.sh
tools/download_models.sh
+5
-4
tools/include/context.hpp
tools/include/context.hpp
+2
-0
tools/include/operation.hpp
tools/include/operation.hpp
+2
-0
tools/install_prereqs.sh
tools/install_prereqs.sh
+1
-1
tools/te.py
tools/te.py
+2
-1
No files found.
test/verify/test_reduce_op_large.cpp
View file @
9bff4331
...
...
@@ -76,3 +76,29 @@ struct test_reduce_mean_2 : verify_program<test_reduce_mean_2>
return
p
;
};
};
struct
test_large_reduce_mean1
:
verify_program
<
test_large_reduce_mean1
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
256
*
256
*
16
}};
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
mm
->
add_instruction
(
migraphx
::
op
::
reduce_mean
{{
1
}},
x
);
return
p
;
};
};
struct
test_large_reduce_mean2
:
verify_program
<
test_large_reduce_mean2
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
1
,
32
,
262144
}};
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
mm
->
add_instruction
(
migraphx
::
op
::
reduce_mean
{{
2
}},
x
);
return
p
;
};
};
test/verify/test_select_module_add.cpp
0 → 100644
View file @
9bff4331
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct
test_select_module_add
:
verify_program
<
test_select_module_add
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
lit_s
{
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
}}};
auto
literal_ins
=
mm
->
add_literal
(
migraphx
::
literal
{
lit_s
,
{
6
}});
// create batch submodules
auto
create_submodule
=
[
&
](
std
::
size_t
batch_size
,
const
std
::
string
&
module_name
)
{
auto
*
submod
=
p
.
create_module
(
module_name
);
migraphx
::
shape
sm_shape
{
migraphx
::
shape
::
float_type
,
{
batch_size
,
4
}};
auto
sm_input
=
submod
->
add_parameter
(
"data"
,
sm_shape
);
auto
broadcast_lit
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
),
literal_ins
,
sm_input
);
auto
add_ins0
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
sm_input
,
broadcast_lit
);
auto
add_ins1
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
add_ins0
,
broadcast_lit
);
submod
->
add_return
({
add_ins0
,
add_ins1
});
return
submod
;
};
auto
*
batch1
=
create_submodule
(
1
,
"batch_1"
);
auto
*
batch2
=
create_submodule
(
2
,
"batch_2"
);
auto
*
batch3
=
create_submodule
(
3
,
"batch_3"
);
auto
*
batch4
=
create_submodule
(
4
,
"batch_4"
);
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
4
,
4
}}};
auto
input
=
mm
->
add_parameter
(
"data"
,
s
);
std
::
vector
<
migraphx
::
shape
>
sub_shapes
=
{};
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
4
,
4
}}});
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
4
,
4
}}});
migraphx
::
shape
out_attr
=
migraphx
::
shape
{
sub_shapes
};
auto
sm_ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"select_module"
,
{{
"output_dyn_shapes"
,
migraphx
::
to_value
(
out_attr
)}}),
{
input
},
{
batch1
,
batch2
,
batch3
,
batch4
});
auto
ret0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"get_tuple_elem"
,
{{
"index"
,
0
}}),
sm_ins
);
auto
ret1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"get_tuple_elem"
,
{{
"index"
,
1
}}),
sm_ins
);
mm
->
add_return
({
ret0
,
ret1
});
return
p
;
}
};
test/verify/test_select_module_conv.cpp
0 → 100644
View file @
9bff4331
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct
test_select_module_conv
:
verify_program
<
test_select_module_conv
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
// create batch submodules
auto
create_submodule
=
[
&
](
std
::
size_t
batch_size
,
const
std
::
string
&
module_name
)
{
auto
*
submod
=
p
.
create_module
(
module_name
);
migraphx
::
shape
sm_shape
{
migraphx
::
shape
::
float_type
,
{
batch_size
,
3
,
4
,
4
}};
auto
sm_input
=
submod
->
add_parameter
(
"data"
,
sm_shape
);
migraphx
::
shape
weights_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
3
,
3
}};
std
::
vector
<
float
>
weights_data
(
2
*
3
*
3
*
3
,
2.0
);
auto
weights
=
submod
->
add_literal
(
migraphx
::
literal
{
weights_shape
,
weights_data
});
auto
conv_ins
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"convolution"
,
{{
"padding"
,
{
1
,
1
}}}),
sm_input
,
weights
);
submod
->
add_return
({
conv_ins
});
return
submod
;
};
auto
*
batch1
=
create_submodule
(
1
,
"batch_1"
);
auto
*
batch2
=
create_submodule
(
2
,
"batch_2"
);
auto
*
batch3
=
create_submodule
(
3
,
"batch_3"
);
auto
*
batch4
=
create_submodule
(
4
,
"batch_4"
);
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
3
,
3
},
{
4
,
4
},
{
4
,
4
}}};
auto
input
=
mm
->
add_parameter
(
"data"
,
s
);
std
::
vector
<
migraphx
::
shape
>
sub_shapes
=
{};
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
2
,
2
},
{
4
,
4
},
{
4
,
4
}}});
migraphx
::
shape
out_attr
=
migraphx
::
shape
{
sub_shapes
};
auto
sm_ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"select_module"
,
{{
"output_dyn_shapes"
,
migraphx
::
to_value
(
out_attr
)}}),
{
input
},
{
batch1
,
batch2
,
batch3
,
batch4
});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"get_tuple_elem"
,
{{
"index"
,
0
}}),
sm_ins
);
mm
->
add_return
({
ret
});
return
p
;
}
};
test/verify/test_select_module_reduce.cpp
0 → 100644
View file @
9bff4331
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct
test_select_module_reduce
:
verify_program
<
test_select_module_reduce
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
// create batch submodules
auto
create_submodule
=
[
&
](
std
::
size_t
batch_size
,
const
std
::
string
&
module_name
)
{
auto
*
submod
=
p
.
create_module
(
module_name
);
migraphx
::
shape
sm_shape
{
migraphx
::
shape
::
float_type
,
{
batch_size
,
2
,
2
}};
auto
sm_input
=
submod
->
add_parameter
(
"data"
,
sm_shape
);
auto
reduce_ins
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
{
1
}}}),
sm_input
);
auto
squeeze_ins
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
1
}}}),
reduce_ins
);
submod
->
add_return
({
squeeze_ins
});
return
submod
;
};
auto
*
batch1
=
create_submodule
(
1
,
"batch_1"
);
auto
*
batch2
=
create_submodule
(
2
,
"batch_2"
);
auto
*
batch3
=
create_submodule
(
3
,
"batch_3"
);
auto
*
batch4
=
create_submodule
(
4
,
"batch_4"
);
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
2
,
2
},
{
2
,
2
}}};
auto
input
=
mm
->
add_parameter
(
"data"
,
s
);
std
::
vector
<
migraphx
::
shape
>
sub_shapes
=
{};
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
2
,
2
}}});
migraphx
::
shape
out_attr
=
migraphx
::
shape
{
sub_shapes
};
auto
sm_ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"select_module"
,
{{
"output_dyn_shapes"
,
migraphx
::
to_value
(
out_attr
)}}),
{
input
},
{
batch1
,
batch2
,
batch3
,
batch4
});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"get_tuple_elem"
,
{{
"index"
,
0
}}),
sm_ins
);
mm
->
add_return
({
ret
});
return
p
;
}
};
src/opt/memory_coloring
.cpp
→
test/verify/test_sin_half
.cpp
View file @
9bff4331
...
...
@@ -21,20 +21,21 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/memory_coloring.hpp>
#include "memory_coloring_impl.hpp"
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
void
memory_coloring
::
apply
(
module
&
m
)
const
struct
test_sin_half
:
verify_program
<
test_sin_half
>
{
if
(
not
enabled
(
MIGRAPHX_DISABLE_MEMORY_COLORING
{}))
migraphx
::
program
create_program
()
const
{
memory_coloring_impl
opt
(
&
m
,
allocation_op
,
verify
);
opt
.
run
();
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
s
{
migraphx
::
shape
::
half_type
,
{
10
}};
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"sin"
),
x
);
return
p
;
}
}
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
};
test/verify/test_trans_slice.cpp
0 → 100644
View file @
9bff4331
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct
test_trans_slice
:
verify_program
<
test_trans_slice
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
2
,
384
,
36
,
64
}});
auto
transpose
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
1
,
3
}}}),
x
);
auto
slice1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
1
}},
{
"starts"
,
{
0
}},
{
"ends"
,
{
12
}}}),
transpose
);
auto
slice2
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
1
}},
{
"starts"
,
{
12
}},
{
"ends"
,
{
24
}}}),
transpose
);
auto
transpose2
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
1
,
3
,
2
}}}),
slice2
);
auto
slice3
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
1
}},
{
"starts"
,
{
24
}},
{
"ends"
,
{
36
}}}),
transpose
);
mm
->
add_return
({
slice1
,
transpose2
,
slice3
});
return
p
;
}
};
tools/accuracy/accuracy_checker.py
View file @
9bff4331
...
...
@@ -25,6 +25,7 @@ import argparse
import
numpy
as
np
import
migraphx
import
onnxruntime
as
ort
import
sys
def
parse_args
():
...
...
@@ -33,15 +34,13 @@ def parse_args():
'MIGraphX accuracy checker. Use to verify onnx files to ensure MIGraphX
\'
s output
\
is within tolerance of onnx runtime
\'
s expected output.'
)
req_args
=
parser
.
add_argument_group
(
title
=
'required arguments'
)
req_args
.
add_argument
(
'--onnx'
,
type
=
str
,
required
=
True
,
help
=
'path to onnx file'
)
req_args
.
add_argument
(
'--provider'
,
type
=
str
,
default
=
'CPUExecutionProvider'
,
help
=
'execution provider for onnx runtime
\
file_args
=
parser
.
add_argument_group
(
title
=
'file type arguments'
)
file_args
.
add_argument
(
'--onnx'
,
type
=
str
,
help
=
'path to onnx file'
)
file_args
.
add_argument
(
'--tf'
,
type
=
str
,
help
=
'path to tf pb file'
)
parser
.
add_argument
(
'--provider'
,
type
=
str
,
default
=
'CPUExecutionProvider'
,
help
=
'execution provider for onnx runtime
\
(default = CPUExecutionProvider)'
)
parser
.
add_argument
(
'--batch'
,
type
=
int
,
...
...
@@ -50,6 +49,9 @@ def parse_args():
parser
.
add_argument
(
'--fill1'
,
action
=
'store_true'
,
help
=
'fill all arguments with a value of 1'
)
parser
.
add_argument
(
'--fill0'
,
action
=
'store_true'
,
help
=
'fill all arguments with a value of 0'
)
parser
.
add_argument
(
'--verbose'
,
action
=
'store_true'
,
help
=
'show verbose information (for debugging)'
)
...
...
@@ -57,6 +59,12 @@ def parse_args():
type
=
float
,
default
=
1e-3
,
help
=
'accuracy tolerance (default = 1e-3)'
)
parser
.
add_argument
(
'--input-dim'
,
type
=
str
,
action
=
'append'
,
help
=
'specify input parameter dimension
\
with the following format --input_dim input_name:dim0,dim1,dim2...'
)
args
=
parser
.
parse_args
()
return
args
...
...
@@ -111,42 +119,127 @@ def get_np_datatype(in_type):
def
main
():
args
=
parse_args
()
use_onnx
=
True
if
args
.
onnx
==
None
:
use_onnx
=
False
if
not
use_onnx
and
args
.
tf
==
None
:
print
(
'Error: please specify either an onnx or tf pb file'
)
sys
.
exit
(
-
1
)
model_name
=
args
.
onnx
batch
=
args
.
batch
model
=
migraphx
.
parse_onnx
(
model_name
,
default_dim_value
=
batch
)
custom_inputs
=
args
.
input_dim
input_dims
=
{}
if
custom_inputs
!=
None
:
for
input
in
custom_inputs
:
input_dim
=
''
.
join
(
input
.
split
(
':'
)[:
-
1
])
dims
=
[
int
(
dim
)
for
dim
in
input
.
split
(
':'
)[
-
1
].
split
(
','
)]
input_dims
[
input_dim
]
=
dims
if
use_onnx
:
if
not
input_dims
:
model
=
migraphx
.
parse_onnx
(
model_name
,
default_dim_value
=
batch
)
else
:
model
=
migraphx
.
parse_onnx
(
model_name
,
default_dim_value
=
batch
,
map_input_dims
=
input_dims
)
else
:
model_name
=
args
.
tf
if
not
input_dims
:
model
=
migraphx
.
parse_tf
(
model_name
,
batch_size
=
batch
)
else
:
model
=
migraphx
.
parse_tf
(
model_name
,
batch_size
=
batch
,
map_input_dims
=
input_dims
)
if
args
.
verbose
:
print
(
model
)
model
.
compile
(
migraphx
.
get_target
(
'gpu'
)
,
offload_copy
=
False
)
model
.
compile
(
migraphx
.
get_target
(
'gpu'
))
params
=
{}
test_inputs
=
{}
for
name
,
shape
in
model
.
get_parameter_shapes
().
items
():
if
args
.
verbose
:
print
(
'Parameter {} -> {
}'
.
format
(
name
,
shape
)
)
print
(
f
'Parameter
{
name
}
->
{
shape
}
'
)
in_shape
=
shape
.
lens
()
in_type
=
shape
.
type_string
()
if
not
args
.
fill1
:
if
not
args
.
fill1
and
not
args
.
fill0
:
test_input
=
np
.
random
.
rand
(
*
(
in_shape
)).
astype
(
get_np_datatype
(
in_type
))
el
se
:
el
if
not
args
.
fill0
:
test_input
=
np
.
ones
(
in_shape
).
astype
(
get_np_datatype
(
in_type
))
else
:
test_input
=
np
.
zeros
(
in_shape
).
astype
(
get_np_datatype
(
in_type
))
test_inputs
[
name
]
=
test_input
params
[
name
]
=
migraphx
.
to_gpu
(
migraphx
.
argument
(
test_input
))
params
[
name
]
=
migraphx
.
argument
(
test_input
)
pred_migx
=
np
.
array
(
model
.
run
(
params
)[
-
1
])
pred_migx
=
np
.
array
(
migraphx
.
from_gpu
(
model
.
run
(
params
)[
-
1
]))
if
use_onnx
:
sess
=
ort
.
InferenceSession
(
model_name
,
providers
=
[
args
.
provider
])
sess
=
ort
.
InferenceSession
(
model_name
,
providers
=
[
args
.
provider
])
ort_params
=
{}
for
input
in
sess
.
get_inputs
():
ort_params
[
input
.
name
]
=
test_inputs
[
input
.
name
]
try
:
pred_fw
=
sess
.
run
(
None
,
ort_params
)[
-
1
]
except
Exception
as
e
:
if
any
(
input_dims
):
print
(
'Error: custom input dim may not be compatible with onnx runtime'
)
raise
e
else
:
import
tensorflow
as
tf
def
load_tf_graph
(
model_name
):
with
tf
.
io
.
gfile
.
GFile
(
model_name
,
'rb'
)
as
f
:
graph_def
=
tf
.
compat
.
v1
.
GraphDef
()
graph_def
.
ParseFromString
(
f
.
read
())
with
tf
.
compat
.
v1
.
Graph
().
as_default
()
as
graph
:
tf
.
graph_util
.
import_graph_def
(
graph_def
)
return
graph
graph
=
load_tf_graph
(
model_name
)
is_nhwc
=
False
graph_ops
=
[]
for
op
in
graph
.
get_operations
():
graph_ops
.
append
(
op
.
name
)
if
'Conv'
in
op
.
node_def
.
op
:
if
'NHWC'
in
op
.
get_attr
(
'data_format'
).
decode
(
'utf-8'
):
is_nhwc
=
True
graph_ops_set
=
set
(
graph_ops
)
tf_dict
=
{}
for
name
in
test_inputs
.
keys
():
# graph.get_operations() adds 'import/' to the op name
tf_name
=
f
'import/
{
name
}
'
if
tf_name
not
in
graph_ops_set
:
continue
x
=
graph
.
get_tensor_by_name
(
f
'
{
tf_name
}
:0'
)
tf_input
=
test_inputs
[
name
]
# transpose input for NHWC model
if
tf_input
.
ndim
==
4
and
is_nhwc
:
tf_dict
[
x
]
=
np
.
transpose
(
tf_input
,
(
0
,
2
,
3
,
1
))
else
:
tf_dict
[
x
]
=
tf_input
ort_params
=
{}
for
input
in
sess
.
get_inputs
():
ort_params
[
input
.
name
]
=
test_inputs
[
input
.
name
]
# assume last node in graph is output
# TODO: let user specify op name for output
y
=
graph
.
get_tensor_by_name
(
f
'
{
graph_ops
[
-
1
]
}
:0'
)
pred_ort
=
sess
.
run
(
None
,
ort_params
)[
-
1
]
with
tf
.
compat
.
v1
.
Session
(
graph
=
graph
)
as
sess
:
y_out
=
sess
.
run
(
y
,
feed_dict
=
tf_dict
)
pred_fw
=
y_out
is_correct
=
check_correctness
(
pred_
ort
,
pred_migx
,
args
.
tolerance
,
is_correct
=
check_correctness
(
pred_
fw
,
pred_migx
,
args
.
tolerance
,
args
.
tolerance
,
args
.
verbose
)
verbose_string
=
' Rerun with --verbose for detailed information.'
\
if
not
args
.
verbose
else
''
...
...
tools/api/api.cpp
View file @
9bff4331
...
...
@@ -32,7 +32,6 @@
#include <migraphx/register_target.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/quantization.hpp>
#include <migraphx/ref/target.hpp>
#include <migraphx/load_save.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/register_op.hpp>
...
...
@@ -134,6 +133,11 @@ void set_offload_copy(compile_options& options, bool value) { options.offload_co
void
set_fast_math
(
compile_options
&
options
,
bool
value
)
{
options
.
fast_math
=
value
;
}
void
set_exhaustive_tune_flag
(
compile_options
&
options
,
bool
value
)
{
options
.
exhaustive_tune
=
value
;
}
void
set_file_format
(
file_options
&
options
,
const
char
*
format
)
{
options
.
format
=
format
;
}
void
set_default_dim_value
(
onnx_options
&
options
,
size_t
value
)
...
...
tools/build_and_test_onnxrt.sh
View file @
9bff4331
...
...
@@ -22,9 +22,12 @@
# THE SOFTWARE.
#####################################################################################
cd
/onnxruntime
pip3
install
-r
requirements.txt
pip3
install
-r
requirements
-dev
.txt
# Add newer cmake to the path
export
PATH
=
"/opt/cmake/bin:
$PATH
"
export
CXXFLAGS
=
"-D__HIP_PLATFORM_HCC__=1 -w"
./build.sh
--config
Release
--update
--build
--parallel
--cmake_extra_defines
ONNXRUNTIME_VERSION
=
$(
cat
./VERSION_NUMBER
)
--test
--use_migraphx
# pip3 install /code/onnxruntime/build/Linux/Release/dist/*.whl
export
CXXFLAGS
=
"-D__HIP_PLATFORM_AMD__=1 -w"
./build.sh
--config
Release
--cmake_extra_defines
CMAKE_HIP_COMPILER
=
/opt/rocm/llvm/bin/clang++
--update
--build
--parallel
--cmake_extra_defines
ONNXRUNTIME_VERSION
=
$(
cat
./VERSION_NUMBER
)
--skip_tests
--rocm_home
/opt/rocm
--use_migraphx
--migraphx_home
/opt/rocm
--rocm_version
=
`
cat
/opt/rocm/.info/version-dev
`
cd
build/Linux/Release
#Add test launcher for onnxrt tests
../../../tools/ci_build/github/pai/migraphx_test_launcher.sh
tools/download_models.sh
View file @
9bff4331
...
...
@@ -26,10 +26,12 @@
if
[
-z
"
$ONNX_HOME
"
]
then
ONNX_HOME
=
$HOME
# The onnx library uses ONNX_HOME, by default if it doesn't exist
# the path of " ~/.onnx " is used
ONNX_HOME
=
$HOME
/.onnx
fi
model_dir
=
$ONNX_HOME
/
.onnx/
models
model_dir
=
$ONNX_HOME
/models
tmp_dir
=
$ONNX_HOME
/tmp/
mkdir
-p
$model_dir
mkdir
-p
$tmp_dir
...
...
@@ -42,7 +44,6 @@ models="bvlc_alexnet \
for
name
in
$models
do
curl https://
s3.amazonaws.com/download.onnx/models/opset_9
/
$name
.tar.gz
--output
$tmp_dir
/
$name
.tar.gz
curl https://
download.onnxruntime.ai/onnx/models
/
$name
.tar.gz
--output
$tmp_dir
/
$name
.tar.gz
tar
-xzvf
$tmp_dir
/
$name
.tar.gz
--directory
$model_dir
&&
rm
$tmp_dir
/
$name
.tar.gz
done
tools/include/context.hpp
View file @
9bff4331
...
...
@@ -66,6 +66,7 @@ any_ptr get_queue_context(T&)
{
return
{};
}
template
<
class
T
>
void
wait_for_context
(
T
&
,
any_ptr
)
{
...
...
@@ -87,6 +88,7 @@ void finish_on_context(T&, any_ptr){}
{
v
=
ctx
.
to_value
();
}
inline
void
migraphx_from_value
(
const
value
&
v
,
context
&
ctx
)
{
ctx
.
from_value
(
v
);
}
#endif
...
...
tools/include/operation.hpp
View file @
9bff4331
...
...
@@ -140,6 +140,8 @@ template <class T>
auto
compute_shape_op
(
rank
<
2
>
,
const
T
&
x
,
const
std
::
vector
<
shape
>&
inputs
)
->
decltype
(
x
.
normalize_compute_shape
(
inputs
))
{
if
(
inputs
.
empty
())
MIGRAPHX_THROW
(
"At least one input is required for "
+
x
.
name
());
dependent_type
<
operation
,
T
>
y
=
x
;
normalize_attributes
(
y
,
inputs
[
0
].
max_lens
());
return
any_cast
<
T
>
(
y
).
normalize_compute_shape
(
inputs
);
...
...
tools/install_prereqs.sh
View file @
9bff4331
...
...
@@ -57,7 +57,7 @@ echo "Dependencies are installed at $PREFIX"
rbuild prepare
-d
$PREFIX
-s
develop
# install onnx package for unit tests
pip3
install
onnx
==
1.
8.1
numpy
==
1.21.6
typing
==
3.7.4
pytest
==
6.0.1
packaging
==
16.8
pip3
install
onnx
==
1.
10.2
numpy
==
1.21.6
typing
==
3.7.4
pytest
==
6.0.1
packaging
==
23.0
# pin version of protobuf in Python for onnx runtime unit tests
pip3
install
protobuf
==
3.20.0
tools/te.py
View file @
9bff4331
...
...
@@ -24,7 +24,8 @@
import
string
,
sys
,
re
trivial
=
[
'std::size_t'
,
'instruction_ref'
,
'support_metric'
,
'const_module_ref'
'std::size_t'
,
'instruction_ref'
,
'support_metric'
,
'const_module_ref'
,
'bool'
,
'any_ptr'
]
headers
=
'''
...
...
Prev
1
…
10
11
12
13
14
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment