Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
36bb977b
Commit
36bb977b
authored
Aug 07, 2023
by
Brian Pickrell
Browse files
Merge branch 'develop' into rand_uniform
parents
d626a09e
c65ab678
Changes
52
Hide whitespace changes
Inline
Side-by-side
Showing
12 changed files
with
154 additions
and
28 deletions
+154
-28
test/CMakeLists.txt
test/CMakeLists.txt
+1
-7
test/api/test_custom_op.cpp
test/api/test_custom_op.cpp
+1
-1
test/gpu/mlir.cpp
test/gpu/mlir.cpp
+7
-7
test/module_test.cpp
test/module_test.cpp
+10
-1
test/onnx/.onnxrt-commit
test/onnx/.onnxrt-commit
+1
-1
test/op_shape_test.cpp
test/op_shape_test.cpp
+33
-3
test/py/CMakeLists.txt
test/py/CMakeLists.txt
+3
-5
test/verify/test_add_nhwc.cpp
test/verify/test_add_nhwc.cpp
+44
-0
test/verify/test_reduce_mean_nhwc.cpp
test/verify/test_reduce_mean_nhwc.cpp
+46
-0
tools/api.py
tools/api.py
+4
-2
tools/api/api.cpp
tools/api/api.cpp
+1
-1
tools/api/migraphx.h
tools/api/migraphx.h
+3
-0
No files found.
test/CMakeLists.txt
View file @
36bb977b
...
...
@@ -98,17 +98,11 @@ endfunction()
function
(
add_test_executable TEST_NAME
)
add_executable
(
${
TEST_NAME
}
EXCLUDE_FROM_ALL
${
ARGN
}
)
target_link_libraries
(
${
TEST_NAME
}
${
CMAKE_THREAD_LIBS_INIT
}
)
# Cmake does not add flags correctly for gcc
if
(
CMAKE_CXX_COMPILER_ID MATCHES
"GNU"
)
set_target_properties
(
${
TEST_NAME
}
PROPERTIES COMPILE_FLAGS -pthread LINK_FLAGS -pthread
)
endif
()
set
(
TEST_COMMAND
${
TEST_NAME
}
)
add_test_command
(
${
TEST_NAME
}
${
TEST_COMMAND
}
)
add_dependencies
(
tests
${
TEST_NAME
}
)
add_dependencies
(
check
${
TEST_NAME
}
)
target_link_libraries
(
${
TEST_NAME
}
migraphx migraphx_onnx migraphx_ref
)
target_link_libraries
(
${
TEST_NAME
}
Threads::Threads
migraphx migraphx_onnx migraphx_ref
)
target_include_directories
(
${
TEST_NAME
}
PUBLIC include
)
endfunction
(
add_test_executable
)
...
...
test/api/test_custom_op.cpp
View file @
36bb977b
...
...
@@ -99,7 +99,7 @@ TEST_CASE(run_sigmoid_custom_op)
EXPECT
(
bool
{
result
==
migraphx
::
argument
(
s
,
expected_result
.
data
())});
}
extern
"C"
void
migraphx_test_private_disable_exception_catch
(
bool
b
);
extern
"C"
MIGRAPHX_C_EXPORT
void
migraphx_test_private_disable_exception_catch
(
bool
);
TEST_CASE
(
run_sigmoid_with_incorrect_shape
)
{
...
...
test/gpu/mlir.cpp
View file @
36bb977b
...
...
@@ -84,7 +84,7 @@ migraphx::program create_program_from_mlir(const migraphx::module& mmlir)
inputs
.
push_back
(
mm
->
add_parameter
(
"output"
,
mmlir
.
get_output_shapes
().
front
()));
migraphx
::
gpu
::
context
ctx
;
migraphx
::
gpu
::
insert_mlir
(
*
mm
,
mm
->
end
(),
compile_mlir
(
ctx
,
mmlir
,
inputs
),
inputs
);
migraphx
::
gpu
::
insert_mlir
(
*
mm
,
mm
->
end
(),
compile_mlir
(
ctx
,
mmlir
,
inputs
,
{}
),
inputs
);
return
p
;
}
...
...
@@ -163,7 +163,7 @@ TEST_CASE(conv_add_relu)
{
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
func.func @mlir_convolution(%arg0: tensor<1x2x2x2xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {arch = "", kernel = "mixr"} {
func.func @mlir_convolution
_add_relu
(%arg0: tensor<1x2x2x2xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {arch = "", kernel = "mixr"} {
%0 = migraphx.convolution(%arg2, %arg1) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
%1 = migraphx.add(%0, %arg0) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
%2 = migraphx.relu(%1) : (tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
...
...
@@ -191,7 +191,7 @@ TEST_CASE(quant_dot_add)
{
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
func.func @m
ain
(%arg0: tensor<1x5x4xi8>, %arg1: tensor<1x4x3xi8>, %arg2: tensor<1x5x3xi32>) -> tensor<1x5x3xi32> attributes {arch = "", kernel = "mixr"} {
func.func @m
lir_quant_dot_add
(%arg0: tensor<1x5x4xi8>, %arg1: tensor<1x4x3xi8>, %arg2: tensor<1x5x3xi32>) -> tensor<1x5x3xi32> attributes {arch = "", kernel = "mixr"} {
%0 = migraphx.quant_dot(%arg0, %arg1) : (tensor<1x5x4xi8>, tensor<1x4x3xi8>) -> tensor<1x5x3xi32>
%1 = migraphx.add(%0, %arg2) : (tensor<1x5x3xi32>, tensor<1x5x3xi32>) -> tensor<1x5x3xi32>
return %1 : tensor<1x5x3xi32>
...
...
@@ -218,7 +218,7 @@ TEST_CASE(dot_add)
{
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
func.func @mlir_dot(%arg0: tensor<1x5x4xf32>, %arg1: tensor<1x4x3xf32>, %arg2: tensor<1x5x3xf32>) -> tensor<1x5x3xf32> attributes {arch = "", kernel = "mixr"} {
func.func @mlir_dot
_add
(%arg0: tensor<1x5x4xf32>, %arg1: tensor<1x4x3xf32>, %arg2: tensor<1x5x3xf32>) -> tensor<1x5x3xf32> attributes {arch = "", kernel = "mixr"} {
%0 = migraphx.dot(%arg0, %arg1) : (tensor<1x5x4xf32>, tensor<1x4x3xf32>) -> tensor<1x5x3xf32>
%1 = migraphx.add(%0, %arg2) : (tensor<1x5x3xf32>, tensor<1x5x3xf32>) -> tensor<1x5x3xf32>
return %1 : tensor<1x5x3xf32>
...
...
@@ -244,7 +244,7 @@ TEST_CASE(conv_int8_dequantize_quantize)
{
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
func.func @m
ain
(%arg0: tensor<2x8x3x3xi8>, %arg1: tensor<1x8x4x4xi8>, %arg2: tensor<1x2x2x2xf32>, %arg3: tensor<1x2x2x2xi32>) -> tensor<1x2x2x2xi32> attributes {arch = "", kernel = "mixr"} {
func.func @m
lir_quant_convolution_dequantizelinear_quantizelinear
(%arg0: tensor<2x8x3x3xi8>, %arg1: tensor<1x8x4x4xi8>, %arg2: tensor<1x2x2x2xf32>, %arg3: tensor<1x2x2x2xi32>) -> tensor<1x2x2x2xi32> attributes {arch = "", kernel = "mixr"} {
%0 = migraphx.quant_convolution(%arg1, %arg0) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xi8>, tensor<2x8x3x3xi8>) -> tensor<1x2x2x2xi32>
%1 = migraphx.dequantizelinear(%0, %arg2, %arg3) : (tensor<1x2x2x2xi32>, tensor<1x2x2x2xf32>, tensor<1x2x2x2xi32>) -> tensor<1x2x2x2xf32>
%2 = migraphx.quantizelinear(%1, %arg2, %arg3) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>, tensor<1x2x2x2xi32>) -> tensor<1x2x2x2xi32>
...
...
@@ -277,7 +277,7 @@ TEST_CASE(dot_convert)
{
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
func.func @mlir_dot(%arg0: tensor<1x5x4xf32>, %arg1: tensor<1x4x3xf32>) -> tensor<1x5x3xf16> attributes {arch = "", kernel = "mixr"} {
func.func @mlir_dot
_convert
(%arg0: tensor<1x5x4xf32>, %arg1: tensor<1x4x3xf32>) -> tensor<1x5x3xf16> attributes {arch = "", kernel = "mixr"} {
%0 = migraphx.dot(%arg0, %arg1) : (tensor<1x5x4xf32>, tensor<1x4x3xf32>) -> tensor<1x5x3xf32>
%1 = migraphx.convert(%0) {target_type = 1 : i64} : (tensor<1x5x3xf32>) -> tensor<1x5x3xf16>
return %1 : tensor<1x5x3xf16>
...
...
@@ -303,7 +303,7 @@ TEST_CASE(dot_where)
{
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
func.func @mlir_dot(%arg0: tensor<1x5x4xf32>, %arg1: tensor<1x4x3xf32>, %arg2: tensor<1x5x3xi8>, %arg3: tensor<1x5x3xf32>) -> tensor<1x5x3xf32> attributes {arch = "", kernel = "mixr"} {
func.func @mlir_dot
_where
(%arg0: tensor<1x5x4xf32>, %arg1: tensor<1x4x3xf32>, %arg2: tensor<1x5x3xi8>, %arg3: tensor<1x5x3xf32>) -> tensor<1x5x3xf32> attributes {arch = "", kernel = "mixr"} {
%0 = migraphx.dot(%arg0, %arg1) : (tensor<1x5x4xf32>, tensor<1x4x3xf32>) -> tensor<1x5x3xf32>
%1 = migraphx.where(%arg2, %0, %arg3) : (tensor<1x5x3xi8>, tensor<1x5x3xf32>, tensor<1x5x3xf32>) -> tensor<1x5x3xf32>
return %1 : tensor<1x5x3xf32>
...
...
test/module_test.cpp
View file @
36bb977b
...
...
@@ -83,7 +83,7 @@ TEST_CASE(calc_implict_deps)
auto
*
else_mod
=
p
.
create_module
(
"If_5_else"
);
auto
l2
=
else_mod
->
add_literal
(
migraphx
::
literal
(
ys
,
datay
));
auto
a2
=
else_mod
->
add_instruction
(
migraphx
::
make_op
(
"if"
),
{
cond
},
{
then_mod1
,
else_mod1
});
auto
a3
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"get_tuple_elem"
,
{{
"index"
,
0
}}),
a2
);
auto
a3
=
else_mod
->
add_instruction
(
migraphx
::
make_op
(
"get_tuple_elem"
,
{{
"index"
,
0
}}),
a2
);
else_mod
->
add_return
({
a3
,
l2
});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"if"
),
{
cond
},
{
then_mod
,
else_mod
});
...
...
@@ -95,6 +95,15 @@ TEST_CASE(calc_implict_deps)
EXPECT
(
migraphx
::
contains
(
implicit_deps
.
at
(
ret
),
x1
));
EXPECT
(
migraphx
::
contains
(
implicit_deps
.
at
(
ret
),
x2
));
EXPECT
(
migraphx
::
contains
(
implicit_deps
.
at
(
ret
),
y2
));
EXPECT
(
migraphx
::
contains
(
implicit_deps
.
at
(
ret
),
lx
));
EXPECT
(
migraphx
::
contains
(
implicit_deps
.
at
(
ret
),
ly
));
// test for sorting
p
.
sort
();
auto
ret_inputs
=
ret
->
inputs
();
ret_inputs
.
insert
(
ret_inputs
.
end
(),
implicit_deps
.
at
(
ret
).
begin
(),
implicit_deps
.
at
(
ret
).
end
());
EXPECT
(
std
::
all_of
(
ret_inputs
.
begin
(),
ret_inputs
.
end
(),
[
&
](
const
auto
i
)
{
return
std
::
distance
(
mm
->
begin
(),
i
)
<
std
::
distance
(
mm
->
begin
(),
ret
);
}));
}
TEST_CASE
(
module_annotate
)
...
...
test/onnx/.onnxrt-commit
View file @
36bb977b
d3295f4329d744fe1f8419e1220e123807282b99
21a71d52bd2074b770807b209939ec11e2c64fa7
test/op_shape_test.cpp
View file @
36bb977b
...
...
@@ -323,7 +323,7 @@ TEST_CASE(conv_dyn_batch)
TEST_CASE
(
conv_dyn_img
)
{
migraphx
::
shape
input_dyn_shape
=
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
},
{
3
,
3
},
{
5
,
20
},
{
5
,
20
}}};
{{
1
,
1
},
{
3
,
3
},
{
5
,
20
},
{
5
,
20
}}};
migraphx
::
shape
weights_shape
=
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
3
,
3
}};
migraphx
::
shape
output_dyn_shape
=
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
},
{
1
,
1
},
{
3
,
18
},
{
3
,
18
}}};
...
...
@@ -376,7 +376,7 @@ TEST_CASE(conv_autopad_dyn_batch)
{
// auto_pad dynamic batch
migraphx
::
shape
input_dyn_shape
=
{
migraphx
::
shape
::
float_type
,
{{
1
,
10
},
{
3
,
3
},
{
5
,
5
},
{
5
,
5
}}};
{{
1
,
10
},
{
3
,
3
},
{
5
,
5
},
{
5
,
5
}}};
migraphx
::
shape
weights_shape
=
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
3
,
3
}};
migraphx
::
shape
output_dyn_shape
=
{
migraphx
::
shape
::
float_type
,
{{
1
,
10
},
{
1
,
1
},
{
5
,
5
},
{
5
,
5
}}};
...
...
@@ -393,7 +393,7 @@ TEST_CASE(conv_autopad_dyn_img)
{
// auto_pad dynamic img
migraphx
::
shape
input_dyn_shape
=
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
},
{
3
,
3
},
{
5
,
10
},
{
5
,
10
}}};
{{
1
,
1
},
{
3
,
3
},
{
5
,
10
},
{
5
,
10
}}};
migraphx
::
shape
weights_shape
=
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
3
,
3
}};
migraphx
::
shape
output_dyn_shape
=
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
},
{
1
,
1
},
{
5
,
10
},
{
5
,
10
}}};
...
...
@@ -2619,6 +2619,36 @@ TEST_CASE(reshape_non_fixed_not_matching_error)
throws_shape
(
migraphx
::
make_op
(
"reshape"
,
{{
"dims"
,
new_shape
}}),
input
);
}
TEST_CASE
(
return_shape_tuple
)
{
using
migraphx
::
shape
;
auto
op
=
migraphx
::
make_op
(
"@return"
);
shape
s0
{
shape
::
bool_type
,
{
1
,
1
}};
shape
s1
{
shape
::
float_type
,
{
2
,
3
}};
std
::
vector
<
shape
>
s
{
s0
,
s1
};
auto
s_out
=
op
.
compute_shape
(
s
);
EXPECT
(
s_out
.
type
()
==
shape
::
tuple_type
);
EXPECT
(
s0
==
s_out
.
sub_shapes
()[
0
]);
EXPECT
(
s1
==
s_out
.
sub_shapes
()[
1
]);
}
TEST_CASE
(
return_shape_half
)
{
using
migraphx
::
shape
;
auto
op
=
migraphx
::
make_op
(
"@return"
);
std
::
vector
<
shape
>
s
{{
shape
::
half_type
}};
EXPECT
(
op
.
compute_shape
(
s
)
==
shape
{
shape
::
half_type
});
}
TEST_CASE
(
return_shape_empty
)
{
using
migraphx
::
shape
;
auto
op
=
migraphx
::
make_op
(
"@return"
);
std
::
vector
<
shape
>
s
;
EXPECT
(
op
.
compute_shape
(
s
)
==
shape
{});
}
TEST_CASE
(
rnn
)
{
{
...
...
test/py/CMakeLists.txt
View file @
36bb977b
...
...
@@ -27,7 +27,7 @@ include(PythonModules)
function
(
add_py_test NAME SCRIPT
)
foreach
(
PYTHON_VERSION
${
PYTHON_VERSIONS
}
)
set
(
ENV_COMMAND
${
CMAKE_COMMAND
}
-E env
"PYTHONPATH=$<TARGET_FILE_DIR:migraphx_py_
${
PYTHON_VERSION
}
>"
"PYTHONPATH=$<TARGET_FILE_DIR:migraphx_py
bind
_
${
PYTHON_VERSION
}
>"
"PYTHONMALLOC=debug"
"MALLOC_CHECK_=3"
)
...
...
@@ -41,10 +41,8 @@ function(add_py_test NAME SCRIPT)
endforeach
()
endfunction
()
foreach
(
PYTHON_VERSION
${
PYTHON_VERSIONS
}
)
add_dependencies
(
tests migraphx_py_
${
PYTHON_VERSION
}
)
add_dependencies
(
check migraphx_py_
${
PYTHON_VERSION
}
)
endforeach
()
add_dependencies
(
tests migraphx_py
)
add_dependencies
(
check migraphx_py
)
add_py_test
(
ref test_cpu.py WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
add_py_test
(
save_load test_save_load.py WORKING_DIRECTORY
${
TEST_ONNX_DIR
}
)
...
...
test/verify/test_add_nhwc.cpp
0 → 100644
View file @
36bb977b
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct
test_add_nhwc
:
verify_program
<
test_add_nhwc
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
s
=
migraphx
::
shape
::
from_permutation
(
migraphx
::
shape
::
float_type
,
{
4
,
3
,
8
,
8
},
{
0
,
2
,
3
,
1
});
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
y
=
mm
->
add_parameter
(
"y"
,
s
);
auto
add
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
x
,
y
);
mm
->
add_return
({
add
});
return
p
;
}
};
test/verify/test_reduce_mean_nhwc.cpp
0 → 100644
View file @
36bb977b
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/instruction.hpp>
struct
test_reduce_mean_nhwc
:
verify_program
<
test_reduce_mean_nhwc
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
s
=
migraphx
::
shape
::
from_permutation
(
migraphx
::
shape
::
float_type
,
{
4
,
256
,
2
,
2
},
{
0
,
2
,
3
,
1
});
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
reduce
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_mean"
,
{{
"axes"
,
{
1
}}}),
x
);
auto
abs
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"abs"
),
reduce
);
auto
sqrt
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"sqrt"
),
abs
);
mm
->
add_return
({
sqrt
});
return
p
;
};
};
tools/api.py
View file @
36bb977b
...
...
@@ -36,6 +36,8 @@ error_type = ''
success_type
=
''
try_wrap
=
''
export_c_macro
=
'MIGRAPHX_C_EXPORT'
c_header_preamble
:
List
[
str
]
=
[]
c_api_body_preamble
:
List
[
str
]
=
[]
cpp_header_preamble
:
List
[
str
]
=
[]
...
...
@@ -125,7 +127,7 @@ class Type:
header_function
=
Template
(
'''
${error_type} ${name}(${params});
${export_c_macro}
${error_type} ${name}(${params});
'''
)
function_pointer_typedef
=
Template
(
'''
...
...
@@ -177,7 +179,7 @@ class CFunction:
**
kwargs
)
def
generate_header
(
self
)
->
str
:
return
self
.
substitute
(
header_function
)
return
self
.
substitute
(
header_function
,
export_c_macro
=
export_c_macro
)
def
generate_function_pointer
(
self
,
name
:
Optional
[
str
]
=
None
)
->
str
:
return
self
.
substitute
(
function_pointer_typedef
,
...
...
tools/api/api.cpp
View file @
36bb977b
...
...
@@ -44,7 +44,7 @@ namespace migraphx {
static
thread_local
bool
disable_exception_catch
=
false
;
// NOLINT
extern
"C"
void
migraphx_test_private_disable_exception_catch
(
bool
b
)
extern
"C"
MIGRAPHX_C_EXPORT
void
migraphx_test_private_disable_exception_catch
(
bool
b
)
{
disable_exception_catch
=
b
;
}
...
...
tools/api/migraphx.h
View file @
36bb977b
...
...
@@ -26,6 +26,9 @@
#include <stdlib.h>
#include <stdbool.h>
#include <migraphx/api/export.h>
// Add new types here
// clang-format off
#define MIGRAPHX_SHAPE_VISIT_TYPES(m) \
...
...
Prev
1
2
3
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment