Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
606ed5e8
Commit
606ed5e8
authored
Aug 28, 2023
by
Brian Pickrell
Browse files
Merge branch 'rand_uniform' into multinomial_parse_merge_random
parents
c27d3b62
476ed17c
Changes
131
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
262 additions
and
116 deletions
+262
-116
src/targets/gpu/jit/mlir.cpp
src/targets/gpu/jit/mlir.cpp
+15
-2
src/targets/gpu/jit/pointwise.cpp
src/targets/gpu/jit/pointwise.cpp
+1
-1
src/targets/gpu/jit/reduce.cpp
src/targets/gpu/jit/reduce.cpp
+3
-3
src/targets/gpu/mlir.cpp
src/targets/gpu/mlir.cpp
+142
-55
src/targets/gpu/target.cpp
src/targets/gpu/target.cpp
+2
-2
src/targets/gpu/time_op.cpp
src/targets/gpu/time_op.cpp
+1
-1
src/tf/tf_parser.cpp
src/tf/tf_parser.cpp
+1
-1
src/value.cpp
src/value.cpp
+1
-1
test/CMakeLists.txt
test/CMakeLists.txt
+10
-11
test/api/test_cpu.cpp
test/api/test_cpu.cpp
+3
-3
test/api/test_custom_op.cpp
test/api/test_custom_op.cpp
+1
-1
test/eliminate_contiguous_test.cpp
test/eliminate_contiguous_test.cpp
+34
-2
test/eliminate_pad_test.cpp
test/eliminate_pad_test.cpp
+3
-4
test/gpu/mlir.cpp
test/gpu/mlir.cpp
+8
-8
test/gpu/quantization.cpp
test/gpu/quantization.cpp
+2
-2
test/include/test.hpp
test/include/test.hpp
+23
-6
test/inline_module_test.cpp
test/inline_module_test.cpp
+0
-1
test/insert_pad_test.cpp
test/insert_pad_test.cpp
+6
-5
test/jit.cpp
test/jit.cpp
+6
-6
test/layout_nhwc.cpp
test/layout_nhwc.cpp
+0
-1
No files found.
src/targets/gpu/jit/mlir.cpp
View file @
606ed5e8
...
...
@@ -36,11 +36,12 @@ struct mlir_compiler : compiler<mlir_compiler>
operation
compile_op
(
context
&
,
const
std
::
vector
<
shape
>&
,
const
value
&
)
const
{
return
{};
}
compiler_replace
compile
(
context
&
ctx
,
instruction_ref
ins
,
const
operation
&
)
const
compiler_replace
compile
(
const
context
&
ctx
,
instruction_ref
ins
,
const
operation
&
,
const
value
&
solution
)
const
{
auto
*
smod
=
ins
->
module_inputs
().
front
();
assert
(
smod
->
get_parameter_names
().
size
()
==
ins
->
inputs
().
size
()
-
1
);
return
insert
(
compile_mlir
(
ctx
,
*
smod
,
ins
->
inputs
()));
return
insert
(
compile_mlir
(
ctx
,
*
smod
,
ins
->
inputs
()
,
solution
));
}
compiler_replace
insert
(
code_object_op
co
)
const
...
...
@@ -50,6 +51,18 @@ struct mlir_compiler : compiler<mlir_compiler>
m
.
replace_instruction
(
ins
,
mlir
);
}};
}
optional
<
tuning_config
>
get_tuning_config
(
const
context
&
ctx
,
instruction_ref
ins
,
const
operation
&
,
bool
exhaustive
)
const
{
if
(
not
exhaustive
)
return
nullopt
;
auto
shapes
=
to_shapes
(
ins
->
inputs
());
auto
*
smod
=
ins
->
module_inputs
().
front
();
return
get_tuning_config_mlir
(
ctx
,
*
smod
,
shapes
);
}
};
}
// namespace gpu
...
...
src/targets/gpu/jit/pointwise.cpp
View file @
606ed5e8
...
...
@@ -72,7 +72,7 @@ struct pointwise_compiler : compiler<pointwise_compiler>
hip_compile_options
options
;
options
.
inputs
=
inputs
;
options
.
output
=
inputs
.
back
();
options
.
virtual_inputs
=
reduce_dims
(
inputs
);
options
.
virtual_inputs
=
reduce_dims
(
normalize_permutation
(
inputs
)
)
;
options
.
params
=
"-Wno-float-equal"
;
auto
axis
=
find_fast_axis
(
options
.
virtual_inputs
);
auto
vec
=
vectorize
::
elements
(
ctx
,
axis
,
options
.
virtual_inputs
);
...
...
src/targets/gpu/jit/reduce.cpp
View file @
606ed5e8
...
...
@@ -84,7 +84,7 @@ static shape get_reduced_shape(const shape& s, const std::vector<T>& axes)
std
::
fill
(
lens
.
begin
(),
lens
.
end
(),
1
);
for
(
const
auto
&
axis
:
axes
)
lens
[
axis
]
=
s
.
lens
()[
axis
];
return
s
hape
{
s
.
type
(),
lens
}
;
return
s
.
with_lens
(
lens
)
;
}
template
<
class
T
>
...
...
@@ -93,7 +93,7 @@ static shape get_output_shape(const shape& s, const std::vector<T>& axes)
auto
lens
=
s
.
lens
();
for
(
const
auto
&
axis
:
axes
)
lens
[
axis
]
=
1
;
return
s
hape
{
s
.
type
(),
lens
}
;
return
s
.
with_lens
(
lens
)
;
}
template
<
class
ReduceLens
>
...
...
@@ -228,7 +228,7 @@ struct fused_reduce_compiler : compiler<fused_reduce_compiler>
auto
virtual_inputs
=
inputs
;
virtual_inputs
.
push_back
(
get_reduced_shape
(
inputs
.
front
(),
axes
));
virtual_inputs
.
push_back
(
get_output_shape
(
inputs
.
front
(),
axes
));
virtual_inputs
=
reduce_dims
(
virtual_inputs
);
virtual_inputs
=
reduce_dims
(
normalize_permutation
(
virtual_inputs
)
)
;
auto
reduce_output_shape
=
virtual_inputs
.
back
();
virtual_inputs
.
pop_back
();
auto
reduction_shape
=
virtual_inputs
.
back
();
...
...
src/targets/gpu/mlir.cpp
View file @
606ed5e8
...
...
@@ -36,7 +36,10 @@
#include <mutex>
#if !defined(MLIR_MIGRAPHX_DIALECT_API_VERSION) || MLIR_MIGRAPHX_DIALECT_API_VERSION != 3
#warning "Incompatible version of rocMLIR library used, disabling"
// Only undefine when not using cppcheck
#ifndef CPPCHECK
#undef MIGRAPHX_MLIR
#endif
#else
#include <mlir-c/RegisterRocMLIR.h>
#endif
...
...
@@ -50,8 +53,10 @@
#include <migraphx/ranges.hpp>
#include <migraphx/gpu/code_object_op.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/compile_gen.hpp>
#include <migraphx/gpu/device_name.hpp>
#include <migraphx/gpu/perfdb.hpp>
#include <migraphx/gpu/tuning_config.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/permutation.hpp>
#include <deque>
...
...
@@ -134,6 +139,10 @@ using mlir_block = MIGRAPHX_MANAGE_MLIR_HANDLE(MlirBlock, mlirBlockD
using
mlir_pass_manager
=
MIGRAPHX_MANAGE_MLIR_HANDLE
(
MlirPassManager
,
mlirPassManagerDestroy
);
using
mlir_tuning_table
=
MIGRAPHX_MANAGE_MLIR_HANDLE
(
MlirRockTuningTable
,
mlirRockTuningTableDestroy
);
using
mlir_tuning_space
=
MIGRAPHX_MANAGE_MLIR_HANDLE
(
MlirRockTuningSpace
,
mlirRockTuningSpaceDestroy
);
using
mlir_tuning_param
=
MIGRAPHX_MANAGE_MLIR_HANDLE
(
MlirRockTuningParam
,
mlirRockTuningParamDestroy
);
std
::
string_view
to_string_view
(
MlirStringRef
s
)
{
return
{
s
.
data
,
s
.
length
};
}
...
...
@@ -167,12 +176,6 @@ std::string mlir_print(F f, T x)
return
ss
.
str
();
}
bool
has_xdlops
(
const
std
::
string
&
target_arch
)
{
const
auto
device_name
=
trim
(
split_string
(
target_arch
,
':'
).
front
());
return
(
starts_with
(
device_name
,
"gfx9"
)
and
device_name
>=
"gfx908"
);
}
struct
mlir_program
{
mlir_program
()
...
...
@@ -507,7 +510,8 @@ struct mlir_program
ops
.
add_attributes
({{
"function_type"
,
make_function_type
(
inputs
,
outputs
)},
{
"sym_name"
,
sym_name
},
{
"kernel"
,
std
::
string
(
"mixr"
)},
{
"arch"
,
target_arch
}});
{
"arch"
,
target_arch
},
{
"num_cu"
,
num_cu
}});
ops
.
add_region
(
std
::
move
(
region
));
insert
(
body
,
std
::
move
(
ops
));
...
...
@@ -554,14 +558,7 @@ struct mlir_program
static
std
::
string
get_symbol_name
(
const
module
&
m
)
{
for
(
auto
ins
:
iterator_for
(
m
))
{
if
(
ins
->
name
()
==
"convolution"
or
ins
->
name
()
==
"dot"
)
{
return
"mlir_"
+
ins
->
name
();
}
}
return
"main"
;
return
"mlir_"
+
gen
::
generate_name_from_ops
(
m
);
}
void
parse
(
const
module
&
m
)
...
...
@@ -597,9 +594,6 @@ struct mlir_program
{
pp
=
problem_params
{
ins
->
get_operator
(),
to_shapes
(
ins
->
inputs
()),
ins
->
get_shape
()};
// check if HW supports xdlops
if
(
has_xdlops
(
target_arch
))
ops
.
add_attributes
({{
"xdlopsV2"
,
true
}});
}
std
::
vector
<
MlirValue
>
inputs
;
...
...
@@ -616,18 +610,30 @@ struct mlir_program
}
}
code_object_op
compil
e
()
MIGRAPHX_TIDY_CONST
void
run_high_level_pipelin
e
()
MIGRAPHX_TIDY_CONST
{
mlir_pass_manager
pm_front
{
mlirPassManagerCreate
(
ctx
.
get
())};
mlir_pass_manager
pm_back
{
mlirPassManagerCreate
(
ctx
.
get
())};
// 1st pipeline to call
mlirMIGraphXAddHighLevelPipeline
(
pm_front
.
get
());
mlirPassManagerRunOnOp
(
pm_front
.
get
(),
mlirModuleGetOperation
(
mmodule
.
get
()));
}
// 2nd pipeline to call
get_module_tuned
();
void
run_backend_pipeline
()
MIGRAPHX_TIDY_CONST
{
mlir_pass_manager
pm_back
{
mlirPassManagerCreate
(
ctx
.
get
())};
mlirMIGraphXAddBackendPipeline
(
pm_back
.
get
(),
target_arch
.
c_str
());
mlirPassManagerRunOnOp
(
pm_back
.
get
(),
mlirModuleGetOperation
(
mmodule
.
get
()));
}
code_object_op
compile
(
const
value
&
solution
)
MIGRAPHX_TIDY_CONST
{
// 1st pipeline to call
run_high_level_pipeline
();
if
(
solution
.
is_null
())
get_module_tuned
();
else
set_tuning
(
solution
);
// 2nd pipeline to call
run_backend_pipeline
();
code_object_op
op
{};
op
.
symbol_name
=
sym_name
;
...
...
@@ -636,7 +642,12 @@ struct mlir_program
return
op
;
}
void
find_target
()
{
target_arch
=
get_device_name
();
}
void
set_gpu_properties
(
const
context
&
migraphx_ctx
)
{
const
auto
&
device
=
migraphx_ctx
.
get_current_device
();
target_arch
=
device
.
get_device_name
();
num_cu
=
device
.
get_cu_count
();
}
std
::
pair
<
std
::
size_t
,
std
::
size_t
>
get_launch_params
()
const
{
...
...
@@ -650,7 +661,7 @@ struct mlir_program
value
::
binary
get_binary
()
const
{
in
t
size
=
0
;
size_
t
size
=
0
;
mlirGetBinary
(
mmodule
.
get
(),
&
size
,
nullptr
);
value
::
binary
result
(
size
);
if
(
mlirGetBinary
(
mmodule
.
get
(),
&
size
,
reinterpret_cast
<
char
*>
(
result
.
data
())))
...
...
@@ -658,14 +669,52 @@ struct mlir_program
MIGRAPHX_THROW
(
"Failed to compile mlir program"
);
}
void
set_tuning
(
const
value
&
v
)
MIGRAPHX_TIDY_CONST
{
const
auto
*
str
=
v
.
if_string
();
if
(
str
==
nullptr
)
MIGRAPHX_THROW
(
"mlir tuning solutions must be strings"
);
if
(
not
mlirRockTuningSetFromStr
(
mmodule
.
get
(),
make_mlir_string_ref
(
*
str
)))
MIGRAPHX_THROW
(
"Failed setting tuning key: "
+
*
str
);
}
tuning_config
get_tuning_config
()
MIGRAPHX_TIDY_CONST
{
tuning_config
tc
;
run_high_level_pipeline
();
mlir_tuning_space
params
{
mlirRockTuningSpaceCreate
(
mmodule
.
get
(),
RocmlirTuningParamSetKindFull
)};
for
(
auto
i
:
range
(
mlirRockTuningGetNumParams
(
params
.
get
())))
{
mlir_tuning_param
param
{
mlirRockTuningParamCreate
()};
if
(
not
mlirRockTuningParamGet
(
params
.
get
(),
i
,
param
.
get
()))
MIGRAPHX_THROW
(
"Incorrect mlir tuning parameter: "
+
std
::
to_string
(
i
));
std
::
array
<
char
,
ROCMLIR_TUNING_KEY_BUFSZ
>
perf_key
;
size_t
perf_key_bytes
=
mlirRockTuningParamToString
(
param
.
get
(),
perf_key
.
data
(),
perf_key
.
size
());
if
(
perf_key_bytes
>
perf_key
.
size
())
MIGRAPHX_THROW
(
"Tuning perf key was "
+
std
::
to_string
(
perf_key_bytes
)
+
" bytes and thus too long"
);
tc
.
solutions
.
emplace_back
(
perf_key
.
begin
(),
perf_key
.
begin
()
+
perf_key_bytes
);
}
std
::
array
<
char
,
ROCMLIR_TUNING_KEY_BUFSZ
>
tuning_key
;
size_t
tuning_key_bytes
=
mlirRockTuningGetKey
(
mmodule
.
get
(),
tuning_key
.
data
(),
tuning_key
.
size
());
if
(
tuning_key_bytes
>
tuning_key
.
size
())
MIGRAPHX_THROW
(
"Tuning table key was "
+
std
::
to_string
(
tuning_key_bytes
)
+
" bytes and thus too long"
);
tc
.
problem
=
std
::
string
(
tuning_key
.
begin
(),
tuning_key
.
begin
()
+
tuning_key_bytes
);
return
tc
;
}
std
::
string
get_tune_params
(
bool
xdlops
)
const
{
return
get_mlir_perf_for_conv
(
pp
,
xdlops
);
}
// This function appends to tuning cfg file that could be
// used with rocMLIR tuning scripts.
void
dump_tuning_cfg
(
const
char
*
prob_config
)
const
void
dump_tuning_cfg
(
const
std
::
string
&
prob_config
)
const
{
std
::
string
tuning_cfg_path
=
string_value_of
(
MIGRAPHX_MLIR_TUNING_CFG
{});
if
(
!
tuning_cfg_path
.
empty
())
if
(
not
tuning_cfg_path
.
empty
())
{
std
::
vector
<
std
::
string
>
tokens
=
split_string
(
prob_config
,
'\t'
);
std
::
string
prob
=
tokens
[
1
];
...
...
@@ -682,51 +731,66 @@ struct mlir_program
}
}
static
mlir_tuning_table
create
_tuning_table
()
static
std
::
pair
<
mlir_tuning_table
,
bool
>
load
_tuning_table
()
{
mlir_tuning_table
tuning_table
{
mlirRockTuningTableCreate
()};
bool
found_table
=
false
;
std
::
string
tuning_db_path
=
string_value_of
(
MIGRAPHX_MLIR_TUNING_DB
{});
if
(
!
tuning_db_path
.
empty
())
if
(
not
tuning_db_path
.
empty
())
{
std
::
ifstream
tuning_db_tsv
(
tuning_db_path
);
if
(
tuning_db_tsv
)
{
found_table
=
true
;
std
::
string
line
;
while
(
std
::
getline
(
tuning_db_tsv
,
line
))
{
std
::
vector
<
std
::
string
>
tokens
=
split_string
(
line
,
'\t'
);
std
::
string
arch
=
tokens
[
0
];
std
::
string
prob
=
tokens
[
1
];
std
::
string
perf
=
tokens
[
2
];
std
::
string
key
=
arch
.
append
(
"
\t
"
).
append
(
prob
);
mlirRockTuningUpdateTable
(
tuning_table
.
get
(),
key
.
c_str
(),
perf
.
c_str
(),
1.0
);
std
::
string
num_cu
=
tokens
[
1
];
std
::
string
prob
=
tokens
[
2
];
std
::
string
perf
=
tokens
[
3
];
std
::
string
key
=
arch
.
append
(
"
\t
"
).
append
(
num_cu
).
append
(
"
\t
"
).
append
(
prob
);
mlirRockTuningUpdateTable
(
tuning_table
.
get
(),
make_mlir_string_ref
(
key
),
make_mlir_string_ref
(
perf
),
1.0
);
}
}
}
else
{
found_table
=
false
;
std
::
cerr
<<
"WARNING: MLIR tuning db not found. Please set MIGRAPHX_MLIR_TUNING_DB for "
"optimal performance."
<<
std
::
endl
;
}
return
tuning_table
;
return
std
::
make_pair
(
std
::
move
(
tuning_table
),
found_table
)
;
}
bool
get_module_tuned
()
const
{
static
mlir_tuning_table
tuning_table
=
create_tuning_table
();
// The tuning table as currently implemented is currently not
// thread safe. This will be fixed in the future. For now,
// stick a mutex around all tuning table interaction.
static
std
::
mutex
lock
;
std
::
lock_guard
<
std
::
mutex
>
guard
(
lock
);
if
(
!
mlirRockTuningSetFromTable
(
tuning_table
.
get
(),
mmodule
.
get
()))
static
std
::
pair
<
mlir_tuning_table
,
bool
>
tuning_table
=
load_tuning_table
();
if
(
not
mlirRockTuningSetFromTable
(
tuning_table
.
first
.
get
(),
mmodule
.
get
()))
{
const
char
*
prob_config
=
mlirRockTuningGetKey
(
tuning_table
.
get
(),
mmodule
.
get
());
std
::
stringstream
key
(
prob_config
);
std
::
cerr
<<
"fails to set param on"
<<
prob_config
<<
std
::
endl
;
dump_tuning_cfg
(
prob_config
);
std
::
array
<
char
,
ROCMLIR_TUNING_KEY_BUFSZ
>
prob_config
;
size_t
prob_config_bytes
=
mlirRockTuningGetKey
(
mmodule
.
get
(),
prob_config
.
data
(),
prob_config
.
size
());
if
(
prob_config_bytes
>=
prob_config
.
size
())
{
std
::
cerr
<<
"MLIR tuning key overflowed buffer, needed "
<<
prob_config_bytes
<<
" bytes"
<<
std
::
endl
;
return
false
;
}
std
::
string
prob_config_str
(
prob_config
.
begin
(),
prob_config
.
begin
()
+
prob_config_bytes
);
if
(
tuning_table
.
second
)
{
std
::
cerr
<<
"NOTE: MLIR tuning table did not include a key for "
<<
prob_config_str
<<
std
::
endl
;
}
dump_tuning_cfg
(
prob_config_str
);
return
false
;
}
return
true
;
...
...
@@ -737,7 +801,8 @@ struct mlir_program
mlir_module
mmodule
;
problem_params
pp
;
std
::
deque
<
std
::
string
>
strings
{};
std
::
string
target_arch
;
std
::
string
target_arch
=
""
;
std
::
size_t
num_cu
=
0
;
std
::
string
sym_name
;
};
...
...
@@ -749,14 +814,14 @@ std::string dump_mlir(const module& m)
return
mlir_print
(
&
mlirOperationPrint
,
mod_op
);
}
void
adjust_param_shapes
(
module
&
m
,
const
std
::
vector
<
instruction_ref
>&
inputs
)
void
adjust_param_shapes
(
module
&
m
,
const
std
::
vector
<
shape
>&
inputs
)
{
auto
names
=
m
.
get_parameter_names
();
std
::
sort
(
names
.
begin
(),
names
.
end
());
for
(
auto
i
:
range
(
names
.
size
()))
{
const
auto
&
name
=
names
[
i
];
const
auto
&
input
=
inputs
[
i
]
->
get_shape
()
;
const
auto
&
input
=
inputs
[
i
];
auto
param
=
m
.
get_parameter
(
name
);
if
(
input
.
standard
())
continue
;
...
...
@@ -794,22 +859,26 @@ void adjust_param_shapes(module& m, const std::vector<instruction_ref>& inputs)
}
}
code_object_op
compile_mlir
(
const
context
&
,
module
m
,
const
std
::
vector
<
instruction_ref
>&
inputs
)
code_object_op
compile_mlir
(
const
context
&
migraphx_ctx
,
module
m
,
const
std
::
vector
<
instruction_ref
>&
inputs
,
const
value
&
solution
)
{
adjust_param_shapes
(
m
,
inputs
);
adjust_param_shapes
(
m
,
to_shapes
(
inputs
)
)
;
const
bool
trace
=
enabled
(
MIGRAPHX_TRACE_MLIR
{});
if
(
trace
)
std
::
cout
<<
m
<<
std
::
endl
;
mlir_program
mp
;
mp
.
find_target
(
);
mp
.
set_gpu_properties
(
migraphx_ctx
);
mp
.
parse
(
m
);
auto
mod_op
=
mlirModuleGetOperation
(
mp
.
mmodule
.
get
());
if
(
trace
)
std
::
cout
<<
mlir_print
(
&
mlirOperationPrint
,
mod_op
)
<<
std
::
endl
;
auto
co
=
mp
.
compile
();
co
.
output
=
m
.
get_output_shapes
().
front
();
auto
co
=
mp
.
compile
(
solution
);
co
.
expected_inputs
=
to_shapes
(
inputs
);
co
.
output
=
m
.
get_output_shapes
().
front
();
return
co
;
}
...
...
@@ -829,6 +898,17 @@ instruction_ref insert_mlir(module& m,
return
m
.
insert_instruction
(
ins
,
co
,
refs
);
}
tuning_config
get_tuning_config_mlir
(
const
context
&
migraphx_ctx
,
module
m
,
const
std
::
vector
<
shape
>&
inputs
)
{
adjust_param_shapes
(
m
,
inputs
);
mlir_program
mp
;
mp
.
set_gpu_properties
(
migraphx_ctx
);
mp
.
parse
(
m
);
return
mp
.
get_tuning_config
();
}
#else
std
::
string
dump_mlir
(
const
module
&
)
{
return
{};
}
...
...
@@ -840,20 +920,27 @@ void use(T&)
// Disabling clang-tidy warning on non-real useage.
// NOLINTBEGIN(performance-unnecessary-value-param)
code_object_op
compile_mlir
(
const
context
&
,
module
,
const
std
::
vector
<
instruction_ref
>&
)
code_object_op
compile_mlir
(
const
context
&
,
module
,
const
std
::
vector
<
instruction_ref
>&
,
const
value
&
)
{
return
{};
}
// NOLINTEND(performance-unnecessary-value-param)
instruction_ref
// cppcheck-suppress funcArgNamesDifferent
insert_mlir
(
module
&
m
,
instruction_ref
,
code_object_op
co
,
const
std
::
vector
<
instruction_ref
>&
)
{
use
(
co
);
use
(
m
);
return
m
.
end
();
}
tuning_config
get_tuning_config_mlir
(
const
context
&
,
module
,
const
std
::
vector
<
shape
>&
)
{
return
{};
}
// NOLINTEND(performance-unnecessary-value-param)
#endif
}
// namespace gpu
...
...
src/targets/gpu/target.cpp
View file @
606ed5e8
...
...
@@ -75,7 +75,7 @@ namespace gpu {
MIGRAPHX_DECLARE_ENV_VAR
(
MIGRAPHX_DISABLE_SCHEDULE_PASS
)
MIGRAPHX_DECLARE_ENV_VAR
(
MIGRAPHX_DISABLE_REDUCE_FUSION
)
MIGRAPHX_DECLARE_ENV_VAR
(
MIGRAPHX_ENABLE_NHWC
)
#ifdef _WIN32
#if
n
def _WIN32
MIGRAPHX_DECLARE_ENV_VAR
(
MIGRAPHX_ENABLE_CK
)
#endif
...
...
@@ -138,7 +138,7 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti
dead_code_elimination
{},
enable_pass
(
not
enabled
(
MIGRAPHX_DISABLE_REDUCE_FUSION
{}),
fuse_reduce
{}),
dead_code_elimination
{},
#ifdef _WIN32
#if
n
def _WIN32
enable_pass
(
enabled
(
MIGRAPHX_ENABLE_CK
{}),
fuse_ck
{}),
#endif
dead_code_elimination
{},
...
...
src/targets/gpu/time_op.cpp
View file @
606ed5e8
...
...
@@ -34,7 +34,7 @@ namespace gpu {
std
::
vector
<
argument
>
generate_arguments
(
const
std
::
vector
<
shape
>&
shapes
,
unsigned
long
seed
=
0
)
{
std
::
vector
<
argument
>
args
;
std
::
transform
(
shapes
.
begin
(),
shapes
.
end
(),
std
::
back_inserter
(
args
),
[
&
](
auto
&
s
)
{
std
::
transform
(
shapes
.
begin
(),
shapes
.
end
(),
std
::
back_inserter
(
args
),
[
&
](
const
auto
&
s
)
{
return
to_gpu
(
generate_argument
(
s
,
seed
++
));
});
return
args
;
...
...
src/tf/tf_parser.cpp
View file @
606ed5e8
...
...
@@ -338,7 +338,7 @@ void tf_parser::parse_node(const std::string& name)
std
::
string
input_name
=
input
;
// if input has trailing `:0` index then remove it
auto
multi_out_idx
=
input
.
find
(
':'
);
if
(
multi_out_idx
!=
std
::
string
::
npos
&&
input
.
substr
(
multi_out_idx
+
1
)
==
"0"
)
if
(
multi_out_idx
!=
std
::
string
::
npos
and
input
.
substr
(
multi_out_idx
+
1
)
==
"0"
)
{
input_name
=
input
.
substr
(
0
,
multi_out_idx
);
}
...
...
src/value.cpp
View file @
606ed5e8
...
...
@@ -285,7 +285,7 @@ bool value::contains(const std::string& pkey) const
}
std
::
size_t
value
::
size
()
const
{
auto
*
a
=
if_array_impl
(
x
);
const
auto
*
a
=
if_array_impl
(
x
);
if
(
a
==
nullptr
)
return
0
;
return
a
->
size
();
...
...
test/CMakeLists.txt
View file @
606ed5e8
...
...
@@ -98,17 +98,11 @@ endfunction()
function
(
add_test_executable TEST_NAME
)
add_executable
(
${
TEST_NAME
}
EXCLUDE_FROM_ALL
${
ARGN
}
)
target_link_libraries
(
${
TEST_NAME
}
${
CMAKE_THREAD_LIBS_INIT
}
)
# Cmake does not add flags correctly for gcc
if
(
CMAKE_CXX_COMPILER_ID MATCHES
"GNU"
)
set_target_properties
(
${
TEST_NAME
}
PROPERTIES COMPILE_FLAGS -pthread LINK_FLAGS -pthread
)
endif
()
set
(
TEST_COMMAND
${
TEST_NAME
}
)
add_test_command
(
${
TEST_NAME
}
${
TEST_COMMAND
}
)
add_dependencies
(
tests
${
TEST_NAME
}
)
add_dependencies
(
check
${
TEST_NAME
}
)
target_link_libraries
(
${
TEST_NAME
}
migraphx migraphx_onnx migraphx_ref
)
target_link_libraries
(
${
TEST_NAME
}
Threads::Threads
migraphx migraphx_onnx migraphx_ref
)
target_include_directories
(
${
TEST_NAME
}
PUBLIC include
)
endfunction
(
add_test_executable
)
...
...
@@ -208,11 +202,16 @@ endif()
function
(
test_header NAME HEADER
)
file
(
WRITE
${
CMAKE_CURRENT_BINARY_DIR
}
/header-main-include-
${
NAME
}
.cpp
"#include <
${
HEADER
}
>
\n
int main() {}
\n
"
file
(
WRITE
${
CMAKE_CURRENT_BINARY_DIR
}
/header-main-include-
${
NAME
}
.cpp
"
#include <
${
HEADER
}
>
int main() {}
\n
"
)
file
(
WRITE
${
CMAKE_CURRENT_BINARY_DIR
}
/header-static-include-
${
NAME
}
.cpp
"#include <
${
HEADER
}
>
\n
"
file
(
WRITE
${
CMAKE_CURRENT_BINARY_DIR
}
/header-static-include-
${
NAME
}
.cpp
"
#include <
${
HEADER
}
>
#if defined(min) || defined(max) || defined(near) || defined(far)
#error
\"
Do not include windows.h in header files
\"
#endif
\n
"
)
add_test_executable
(
${
NAME
}
${
CMAKE_CURRENT_BINARY_DIR
}
/header-main-include-
${
NAME
}
.cpp
...
...
test/api/test_cpu.cpp
View file @
606ed5e8
...
...
@@ -145,15 +145,15 @@ TEST_CASE(zero_parameter)
TEST_CASE
(
set_scalar_parameter
)
{
auto
p1
=
migraphx
::
parse_onnx
(
"add_bcast_test.onnx"
);
migraphx
::
shape
s1
(
migraphx_shape_float_type
,
{
3
,
4
});
auto
p1
=
migraphx
::
parse_onnx
(
"
implicit_
add_bcast_test.onnx"
);
migraphx
::
shape
s1
(
migraphx_shape_float_type
,
{
3
,
4
,
1
});
auto
param_shapes
=
p1
.
get_parameter_shapes
();
auto
s1_orig
=
param_shapes
[
"1"
];
CHECK
(
bool
{
s1
==
s1_orig
});
migraphx
::
onnx_options
option
;
option
.
set_input_parameter_shape
(
"1"
,
{});
auto
p2
=
migraphx
::
parse_onnx
(
"add_bcast_test.onnx"
,
option
);
auto
p2
=
migraphx
::
parse_onnx
(
"
implicit_
add_bcast_test.onnx"
,
option
);
migraphx
::
shape
s_scalar
(
migraphx_shape_float_type
);
auto
param_shapes_1
=
p2
.
get_parameter_shapes
();
auto
s_scalar_after
=
param_shapes_1
[
"1"
];
...
...
test/api/test_custom_op.cpp
View file @
606ed5e8
...
...
@@ -99,7 +99,7 @@ TEST_CASE(run_sigmoid_custom_op)
EXPECT
(
bool
{
result
==
migraphx
::
argument
(
s
,
expected_result
.
data
())});
}
extern
"C"
void
migraphx_test_private_disable_exception_catch
(
bool
b
);
extern
"C"
MIGRAPHX_C_EXPORT
void
migraphx_test_private_disable_exception_catch
(
bool
);
TEST_CASE
(
run_sigmoid_with_incorrect_shape
)
{
...
...
test/eliminate_contiguous_test.cpp
View file @
606ed5e8
...
...
@@ -196,15 +196,47 @@ TEST_CASE(contiguous_pointwise)
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
1
},
{
"out_lens"
,
{
2
,
3
,
8
,
8
}}}),
y
);
auto
yc
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
yb
);
auto
add
=
add_pointwise
(
p
,
"main:pointwise0"
,
{
x
,
yc
},
single_pointwise
(
"add"
));
mm
->
add_instruction
(
pass_op
{},
add
);
auto
cadd
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
add
);
mm
->
add_instruction
(
pass_op
{},
cadd
);
}
auto
count
=
std
::
distance
(
mm
->
begin
(),
mm
->
end
());
run_pass
(
*
mm
);
EXPECT
(
std
::
distance
(
mm
->
begin
(),
mm
->
end
())
==
(
count
-
1
));
EXPECT
(
std
::
distance
(
mm
->
begin
(),
mm
->
end
())
==
(
count
-
2
));
EXPECT
(
std
::
none_of
(
mm
->
begin
(),
mm
->
end
(),
[](
auto
&&
ins
)
{
return
ins
.
name
()
==
"contiguous"
;
}));
}
TEST_CASE
(
contiguous_nhwc_pointwise
)
{
auto
s
=
migraphx
::
shape
::
from_permutation
(
migraphx
::
shape
::
float_type
,
{
2
,
3
,
8
,
8
},
{
0
,
2
,
3
,
1
});
migraphx
::
program
p1
;
{
auto
*
mm
=
p1
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
y
=
mm
->
add_parameter
(
"y"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
yb
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
1
},
{
"out_lens"
,
{
2
,
3
,
8
,
8
}}}),
y
);
auto
yc
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
yb
);
auto
add
=
add_pointwise
(
p1
,
"main:pointwise0"
,
{
x
,
yc
},
single_pointwise
(
"add"
));
auto
cadd
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
add
);
mm
->
add_instruction
(
pass_op
{},
cadd
);
}
run_pass
(
*
p1
.
get_main_module
());
migraphx
::
program
p2
;
{
auto
*
mm
=
p2
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
y
=
mm
->
add_parameter
(
"y"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
3
}});
auto
yb
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
1
},
{
"out_lens"
,
{
2
,
3
,
8
,
8
}}}),
y
);
auto
add
=
add_pointwise
(
p2
,
"main:pointwise0"
,
{
x
,
yb
},
single_pointwise
(
"add"
));
auto
cadd
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
add
);
mm
->
add_instruction
(
pass_op
{},
cadd
);
}
EXPECT
(
p1
==
p2
);
}
TEST_CASE
(
slice_contiguous
)
{
migraphx
::
module
m
;
...
...
test/eliminate_pad_test.cpp
View file @
606ed5e8
...
...
@@ -27,7 +27,7 @@
#include <migraphx/pass_manager.hpp>
#include <migraphx/instruction.hpp>
#include <basic_ops.hpp>
#include <migraphx/op
erators
.hpp>
#include <migraphx/op
/common
.hpp>
#include <migraphx/make_op.hpp>
#include <test.hpp>
...
...
@@ -58,9 +58,8 @@ create_conv(migraphx::instruction_ref& l_img,
migraphx
::
shape
s_weights
{
migraphx
::
shape
::
int32_type
,
{
4
,
channels
,
3
,
3
}};
std
::
vector
<
int32_t
>
weights
(
4
*
channels
*
3
*
3
);
auto
l_weights
=
m
.
add_literal
(
migraphx
::
literal
{
s_weights
,
weights
});
migraphx
::
op
::
convolution
op
;
op
.
padding_mode
=
padding_mode
;
return
m
.
add_instruction
(
op
,
l_img
,
l_weights
);
return
m
.
add_instruction
(
migraphx
::
make_op
(
"convolution"
,
{{
"padding_mode"
,
padding_mode
}}),
l_img
,
l_weights
);
}
TEST_CASE
(
rewrite_pad
)
...
...
test/gpu/mlir.cpp
View file @
606ed5e8
...
...
@@ -84,7 +84,7 @@ migraphx::program create_program_from_mlir(const migraphx::module& mmlir)
inputs
.
push_back
(
mm
->
add_parameter
(
"output"
,
mmlir
.
get_output_shapes
().
front
()));
migraphx
::
gpu
::
context
ctx
;
migraphx
::
gpu
::
insert_mlir
(
*
mm
,
mm
->
end
(),
compile_mlir
(
ctx
,
mmlir
,
inputs
),
inputs
);
migraphx
::
gpu
::
insert_mlir
(
*
mm
,
mm
->
end
(),
compile_mlir
(
ctx
,
mmlir
,
inputs
,
{}
),
inputs
);
return
p
;
}
...
...
@@ -140,7 +140,7 @@ TEST_CASE(conv)
{
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
func.func @mlir_convolution(%arg0: tensor<2x8x3x3xf32>, %arg1: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {arch = "", kernel = "mixr"} {
func.func @mlir_convolution(%arg0: tensor<2x8x3x3xf32>, %arg1: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {arch = "", kernel = "mixr"
, num_cu = 0 : i64
} {
%0 = migraphx.convolution(%arg1, %arg0) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
return %0 : tensor<1x2x2x2xf32>
}
...
...
@@ -163,7 +163,7 @@ TEST_CASE(conv_add_relu)
{
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
func.func @mlir_convolution(%arg0: tensor<1x2x2x2xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {arch = "", kernel = "mixr"} {
func.func @mlir_convolution
_add_relu
(%arg0: tensor<1x2x2x2xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {arch = "", kernel = "mixr"
, num_cu = 0 : i64
} {
%0 = migraphx.convolution(%arg2, %arg1) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
%1 = migraphx.add(%0, %arg0) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
%2 = migraphx.relu(%1) : (tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
...
...
@@ -191,7 +191,7 @@ TEST_CASE(quant_dot_add)
{
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
func.func @m
ain
(%arg0: tensor<1x5x4xi8>, %arg1: tensor<1x4x3xi8>, %arg2: tensor<1x5x3xi32>) -> tensor<1x5x3xi32> attributes {arch = "", kernel = "mixr"} {
func.func @m
lir_quant_dot_add
(%arg0: tensor<1x5x4xi8>, %arg1: tensor<1x4x3xi8>, %arg2: tensor<1x5x3xi32>) -> tensor<1x5x3xi32> attributes {arch = "", kernel = "mixr"
, num_cu = 0 : i64
} {
%0 = migraphx.quant_dot(%arg0, %arg1) : (tensor<1x5x4xi8>, tensor<1x4x3xi8>) -> tensor<1x5x3xi32>
%1 = migraphx.add(%0, %arg2) : (tensor<1x5x3xi32>, tensor<1x5x3xi32>) -> tensor<1x5x3xi32>
return %1 : tensor<1x5x3xi32>
...
...
@@ -218,7 +218,7 @@ TEST_CASE(dot_add)
{
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
func.func @mlir_dot(%arg0: tensor<1x5x4xf32>, %arg1: tensor<1x4x3xf32>, %arg2: tensor<1x5x3xf32>) -> tensor<1x5x3xf32> attributes {arch = "", kernel = "mixr"} {
func.func @mlir_dot
_add
(%arg0: tensor<1x5x4xf32>, %arg1: tensor<1x4x3xf32>, %arg2: tensor<1x5x3xf32>) -> tensor<1x5x3xf32> attributes {arch = "", kernel = "mixr"
, num_cu = 0 : i64
} {
%0 = migraphx.dot(%arg0, %arg1) : (tensor<1x5x4xf32>, tensor<1x4x3xf32>) -> tensor<1x5x3xf32>
%1 = migraphx.add(%0, %arg2) : (tensor<1x5x3xf32>, tensor<1x5x3xf32>) -> tensor<1x5x3xf32>
return %1 : tensor<1x5x3xf32>
...
...
@@ -244,7 +244,7 @@ TEST_CASE(conv_int8_dequantize_quantize)
{
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
func.func @m
ain
(%arg0: tensor<2x8x3x3xi8>, %arg1: tensor<1x8x4x4xi8>, %arg2: tensor<1x2x2x2xf32>, %arg3: tensor<1x2x2x2xi32>) -> tensor<1x2x2x2xi32> attributes {arch = "", kernel = "mixr"} {
func.func @m
lir_quant_convolution_dequantizelinear_quantizelinear
(%arg0: tensor<2x8x3x3xi8>, %arg1: tensor<1x8x4x4xi8>, %arg2: tensor<1x2x2x2xf32>, %arg3: tensor<1x2x2x2xi32>) -> tensor<1x2x2x2xi32> attributes {arch = "", kernel = "mixr"
, num_cu = 0 : i64
} {
%0 = migraphx.quant_convolution(%arg1, %arg0) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xi8>, tensor<2x8x3x3xi8>) -> tensor<1x2x2x2xi32>
%1 = migraphx.dequantizelinear(%0, %arg2, %arg3) : (tensor<1x2x2x2xi32>, tensor<1x2x2x2xf32>, tensor<1x2x2x2xi32>) -> tensor<1x2x2x2xf32>
%2 = migraphx.quantizelinear(%1, %arg2, %arg3) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>, tensor<1x2x2x2xi32>) -> tensor<1x2x2x2xi32>
...
...
@@ -277,7 +277,7 @@ TEST_CASE(dot_convert)
{
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
func.func @mlir_dot(%arg0: tensor<1x5x4xf32>, %arg1: tensor<1x4x3xf32>) -> tensor<1x5x3xf16> attributes {arch = "", kernel = "mixr"} {
func.func @mlir_dot
_convert
(%arg0: tensor<1x5x4xf32>, %arg1: tensor<1x4x3xf32>) -> tensor<1x5x3xf16> attributes {arch = "", kernel = "mixr"
, num_cu = 0 : i64
} {
%0 = migraphx.dot(%arg0, %arg1) : (tensor<1x5x4xf32>, tensor<1x4x3xf32>) -> tensor<1x5x3xf32>
%1 = migraphx.convert(%0) {target_type = 1 : i64} : (tensor<1x5x3xf32>) -> tensor<1x5x3xf16>
return %1 : tensor<1x5x3xf16>
...
...
@@ -303,7 +303,7 @@ TEST_CASE(dot_where)
{
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
func.func @mlir_dot(%arg0: tensor<1x5x4xf32>, %arg1: tensor<1x4x3xf32>, %arg2: tensor<1x5x3xi8>, %arg3: tensor<1x5x3xf32>) -> tensor<1x5x3xf32> attributes {arch = "", kernel = "mixr"} {
func.func @mlir_dot
_where
(%arg0: tensor<1x5x4xf32>, %arg1: tensor<1x4x3xf32>, %arg2: tensor<1x5x3xi8>, %arg3: tensor<1x5x3xf32>) -> tensor<1x5x3xf32> attributes {arch = "", kernel = "mixr"
, num_cu = 0 : i64
} {
%0 = migraphx.dot(%arg0, %arg1) : (tensor<1x5x4xf32>, tensor<1x4x3xf32>) -> tensor<1x5x3xf32>
%1 = migraphx.where(%arg2, %0, %arg3) : (tensor<1x5x3xi8>, tensor<1x5x3xf32>, tensor<1x5x3xf32>) -> tensor<1x5x3xf32>
return %1 : tensor<1x5x3xf32>
...
...
test/gpu/quantization.cpp
View file @
606ed5e8
...
...
@@ -24,7 +24,7 @@
#include <iostream>
#include <vector>
#include <migraphx/gpu/fuse_mlir.hpp>
#include <migraphx/
operators
.hpp>
#include <migraphx/
make_op
.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/quantization.hpp>
#include <migraphx/generate.hpp>
...
...
@@ -90,7 +90,7 @@ TEST_CASE(int8_quantization)
migraphx
::
shape
sc
{
migraphx
::
shape
::
float_type
,
{
5
,
8
}};
auto
pa
=
mm
->
add_parameter
(
"a"
,
sa
);
auto
pb
=
mm
->
add_parameter
(
"b"
,
sb
);
mm
->
add_instruction
(
migraphx
::
op
::
dot
{}
,
pa
,
pb
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"
dot
"
)
,
pa
,
pb
);
return
p
;
};
...
...
test/include/test.hpp
View file @
606ed5e8
...
...
@@ -22,6 +22,7 @@
* THE SOFTWARE.
*/
#include <atomic>
#include <algorithm>
#include <cassert>
#include <cstdio>
...
...
@@ -342,11 +343,19 @@ inline std::ostream& operator<<(std::ostream& os, const color& c)
return
os
;
}
inline
std
::
atomic
<
int
>&
failures
()
{
// NOLINTNEXTLINE
static
std
::
atomic
<
int
>
f
=
0
;
return
f
;
}
template
<
class
T
,
class
F
>
void
failed
(
T
x
,
const
char
*
msg
,
const
char
*
func
,
const
char
*
file
,
int
line
,
F
f
)
{
if
(
not
bool
(
x
.
value
()))
{
failures
()
++
;
std
::
cout
<<
func
<<
std
::
endl
;
std
::
cout
<<
file
<<
":"
<<
line
<<
":"
<<
std
::
endl
;
std
::
cout
<<
color
::
bold
<<
color
::
fg_red
<<
" FAILED: "
<<
color
::
reset
<<
msg
<<
" "
...
...
@@ -384,7 +393,7 @@ bool throws(F f, const std::string& msg = "")
}
template
<
class
T
,
class
U
>
auto
near
(
T
px
,
U
py
,
double
ptol
=
1e-6
f
)
auto
within_abs
(
T
px
,
U
py
,
double
ptol
=
1e-6
f
)
{
return
make_function
(
"near"
,
[](
auto
x
,
auto
y
,
auto
tol
)
{
return
std
::
abs
(
x
-
y
)
<
tol
;
})(
px
,
py
,
ptol
);
...
...
@@ -586,13 +595,21 @@ struct driver
{
try
{
failures
()
=
0
;
f
();
}
// cppcheck-suppress EmptyCatchStatement
catch
(
const
failure_error
&
)
{
msg
=
"Test failure"
;
}
}
if
(
msg
.
empty
()
and
failures
()
!=
0
)
{
if
(
failures
()
==
1
)
msg
=
"Test failure"
;
else
msg
=
std
::
to_string
(
failures
())
+
" test failures"
;
}
if
(
msg
.
empty
())
{
out
()
<<
color
::
fg_green
<<
"[ COMPLETE ] "
<<
color
::
reset
<<
color
::
bold
<<
name
...
...
@@ -683,10 +700,10 @@ inline void run(int argc, const char* argv[])
#define TEST_CAPTURE(...) test::capture{}->*__VA_ARGS__
// NOLINTNEXTLINE
#define CHECK(...)
\
test::failed(
\
test::capture{}->*
__VA_ARGS__, #__VA_ARGS__, __PRETTY_FUNCTION__, __FILE__, __LINE__, [] {
\
})
#define CHECK(...) \
test::failed( \
TEST_CAPTURE(
__VA_ARGS__
)
, #__VA_ARGS__, __PRETTY_FUNCTION__, __FILE__, __LINE__, [] {
})
// NOLINTNEXTLINE
#define EXPECT(...) \
test::failed(TEST_CAPTURE(__VA_ARGS__), \
...
...
test/inline_module_test.cpp
View file @
606ed5e8
...
...
@@ -26,7 +26,6 @@
#include <migraphx/pass_manager.hpp>
#include <migraphx/instruction.hpp>
#include <basic_ops.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/make_op.hpp>
#include <test.hpp>
...
...
test/insert_pad_test.cpp
View file @
606ed5e8
...
...
@@ -26,8 +26,8 @@
#include <migraphx/insert_pad.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/op/common.hpp>
#include <basic_ops.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/make_op.hpp>
#include <test.hpp>
...
...
@@ -58,10 +58,11 @@ create_conv(migraphx::instruction_ref& l_img,
migraphx
::
shape
s_weights
{
migraphx
::
shape
::
int32_type
,
{
4
,
channels
,
3
,
3
}};
std
::
vector
<
int32_t
>
weights
(
4
*
channels
*
3
*
3
);
auto
l_weights
=
m
.
add_literal
(
migraphx
::
literal
{
s_weights
,
weights
});
migraphx
::
op
::
convolution
op
;
op
.
padding_mode
=
padding_mode
;
op
.
padding
=
{
0
,
0
,
1
,
1
};
return
m
.
add_instruction
(
op
,
l_img
,
l_weights
);
return
m
.
add_instruction
(
migraphx
::
make_op
(
"convolution"
,
{{
"padding_mode"
,
padding_mode
},
{
"padding"
,
{
0
,
0
,
1
,
1
}}}),
l_img
,
l_weights
);
}
TEST_CASE
(
rewrite_pad
)
...
...
test/jit.cpp
View file @
606ed5e8
...
...
@@ -82,9 +82,9 @@ TEST_CASE(generate_module)
auto
f
=
compile_module
<
float
(
float
,
float
)
>
(
m
);
EXPECT
(
test
::
near
(
f
(
2
,
2
),
2
));
EXPECT
(
test
::
near
(
f
(
10
,
6
),
4
));
EXPECT
(
test
::
near
(
f
(
1
,
2
),
std
::
sqrt
(
3
)));
EXPECT
(
test
::
within_abs
(
f
(
2
,
2
),
2
));
EXPECT
(
test
::
within_abs
(
f
(
10
,
6
),
4
));
EXPECT
(
test
::
within_abs
(
f
(
1
,
2
),
std
::
sqrt
(
3
)));
}
TEST_CASE
(
generate_module_with_literals
)
...
...
@@ -99,9 +99,9 @@ TEST_CASE(generate_module_with_literals)
auto
f
=
compile_module
<
float
(
float
,
float
)
>
(
m
);
EXPECT
(
test
::
near
(
f
(
1
,
2
),
2
));
EXPECT
(
test
::
near
(
f
(
9
,
6
),
4
));
EXPECT
(
test
::
near
(
f
(
0
,
2
),
std
::
sqrt
(
3
)));
EXPECT
(
test
::
within_abs
(
f
(
1
,
2
),
2
));
EXPECT
(
test
::
within_abs
(
f
(
9
,
6
),
4
));
EXPECT
(
test
::
within_abs
(
f
(
0
,
2
),
std
::
sqrt
(
3
)));
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/layout_nhwc.cpp
View file @
606ed5e8
...
...
@@ -24,7 +24,6 @@
#include <migraphx/layout_nhwc.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/instruction.hpp>
...
...
Prev
1
2
3
4
5
6
7
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment