Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
fd3252dc
Unverified
Commit
fd3252dc
authored
Jul 08, 2022
by
Umang Yadav
Committed by
GitHub
Jul 08, 2022
Browse files
Merge branch 'develop' into dot-add
parents
56615a84
8192f37f
Changes
61
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
1556 additions
and
351 deletions
+1556
-351
src/targets/gpu/kernels/include/migraphx/kernels/functional.hpp
...rgets/gpu/kernels/include/migraphx/kernels/functional.hpp
+1
-1
src/targets/gpu/kernels/include/migraphx/kernels/reduce.hpp
src/targets/gpu/kernels/include/migraphx/kernels/reduce.hpp
+34
-0
src/targets/gpu/kernels/include/migraphx/kernels/shape.hpp
src/targets/gpu/kernels/include/migraphx/kernels/shape.hpp
+1
-0
src/targets/gpu/kernels/include/migraphx/kernels/softmax.hpp
src/targets/gpu/kernels/include/migraphx/kernels/softmax.hpp
+45
-0
src/targets/gpu/kernels/include/migraphx/kernels/vec.hpp
src/targets/gpu/kernels/include/migraphx/kernels/vec.hpp
+2
-0
src/targets/gpu/lowering.cpp
src/targets/gpu/lowering.cpp
+2
-3
src/targets/gpu/mlir.cpp
src/targets/gpu/mlir.cpp
+647
-0
src/targets/gpu/mlir_conv.cpp
src/targets/gpu/mlir_conv.cpp
+0
-315
src/targets/gpu/quant_convolution.cpp
src/targets/gpu/quant_convolution.cpp
+61
-21
src/targets/gpu/target.cpp
src/targets/gpu/target.cpp
+3
-2
test/eliminate_contiguous_test.cpp
test/eliminate_contiguous_test.cpp
+20
-0
test/get_target_assignments.cpp
test/get_target_assignments.cpp
+61
-0
test/gpu/mlir.cpp
test/gpu/mlir.cpp
+194
-0
test/module_test.cpp
test/module_test.cpp
+90
-0
test/op_shape_test.cpp
test/op_shape_test.cpp
+68
-8
test/shape_test.cpp
test/shape_test.cpp
+112
-1
test/simplify_reshapes_test.cpp
test/simplify_reshapes_test.cpp
+134
-0
test/verify/run_verify.cpp
test/verify/run_verify.cpp
+13
-0
test/verify/test_conv_add_relu.cpp
test/verify/test_conv_add_relu.cpp
+52
-0
tools/include/target.hpp
tools/include/target.hpp
+16
-0
No files found.
src/targets/gpu/kernels/include/migraphx/kernels/functional.hpp
View file @
fd3252dc
...
@@ -24,7 +24,7 @@
...
@@ -24,7 +24,7 @@
#ifndef MIGRAPHX_GUARD_KERNELS_FUNCTIONAL_HPP
#ifndef MIGRAPHX_GUARD_KERNELS_FUNCTIONAL_HPP
#define MIGRAPHX_GUARD_KERNELS_FUNCTIONAL_HPP
#define MIGRAPHX_GUARD_KERNELS_FUNCTIONAL_HPP
#include <migraphx/kernels/
array
.hpp>
#include <migraphx/kernels/
integral_constant
.hpp>
// NOLINTNEXTLINE
// NOLINTNEXTLINE
#define MIGRAPHX_RETURNS(...) \
#define MIGRAPHX_RETURNS(...) \
...
...
src/targets/gpu/kernels/include/migraphx/kernels/reduce.hpp
View file @
fd3252dc
...
@@ -175,6 +175,21 @@ constexpr auto sliced(Slicer slicer, F f)
...
@@ -175,6 +175,21 @@ constexpr auto sliced(Slicer slicer, F f)
};
};
}
}
template
<
class
Input
,
index_int
Axis
>
constexpr
auto
compute_reduce_axis
()
{
constexpr
auto
lens
=
transform_i
(
get_shape_c
<
Input
>
{}.
lens
,
[](
index_int
x
,
index_int
i
)
->
index_int
{
if
(
i
==
Axis
)
return
1
;
return
x
;
});
return
make_shape
(
lens
,
get_shape_c
<
Input
>
{}.
strides
);
}
template
<
class
Input
,
index_int
Axis
>
using
with_axis
=
decltype
(
compute_reduce_axis
<
Input
,
Axis
>
());
struct
block
struct
block
{
{
template
<
class
Slicer
>
template
<
class
Slicer
>
...
@@ -201,6 +216,14 @@ struct block
...
@@ -201,6 +216,14 @@ struct block
if
(
idx
.
local
==
0
)
if
(
idx
.
local
==
0
)
f
();
f
();
}
}
template
<
class
F
>
__device__
auto
inner
(
F
f
)
const
{
return
sliced
(
slicer
,
[
=
](
auto
x
,
auto
...
xs
)
{
idx
.
local_stride
(
x
.
get_shape
().
elements
(),
[
&
](
auto
j
)
{
f
(
x
[
j
],
xs
[
j
]...);
});
});
}
};
};
template
<
class
Slicer
>
template
<
class
Slicer
>
...
@@ -247,6 +270,17 @@ struct lane
...
@@ -247,6 +270,17 @@ struct lane
{
{
f
();
f
();
}
}
template
<
class
F
>
__device__
auto
inner
(
F
f
)
const
{
return
sliced
(
slicer
,
[
=
](
auto
x
,
auto
...
xs
)
{
for
(
index_int
j
=
0
;
j
<
x
.
get_shape
().
elements
();
j
++
)
{
f
(
x
[
j
],
xs
[
j
]...);
}
});
}
};
};
template
<
class
Slicer
>
template
<
class
Slicer
>
...
...
src/targets/gpu/kernels/include/migraphx/kernels/shape.hpp
View file @
fd3252dc
...
@@ -32,6 +32,7 @@ namespace migraphx {
...
@@ -32,6 +32,7 @@ namespace migraphx {
template
<
class
Lens
,
class
Strides
>
template
<
class
Lens
,
class
Strides
>
struct
shape
struct
shape
{
{
using
shape_type
=
shape
;
using
index_array
=
typename
Lens
::
base_array
;
using
index_array
=
typename
Lens
::
base_array
;
Lens
lens
=
{};
Lens
lens
=
{};
Strides
strides
=
{};
Strides
strides
=
{};
...
...
src/targets/gpu/kernels/include/migraphx/kernels/softmax.hpp
0 → 100644
View file @
fd3252dc
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_KERNELS_SOFTMAX_HPP
#define MIGRAPHX_GUARD_KERNELS_SOFTMAX_HPP
#include <migraphx/kernels/reduce.hpp>
#include <migraphx/kernels/ops.hpp>
namespace
migraphx
{
template
<
index_int
Axis
,
class
Input
,
class
Output
>
__device__
void
softmax
(
Input
input
,
Output
output
)
{
reduce
::
block
::
run
<
reduce
::
with_axis
<
Input
,
Axis
>>
([
&
](
auto
,
auto
r
)
{
auto
batch_max
=
r
.
reduce
(
op
::
max
{},
lowest
{},
op
::
id
{})(
input
);
auto
batch_sum
=
r
.
reduce
(
op
::
sum
{},
0
,
[
&
](
auto
x
)
{
return
migraphx
::
exp
(
x
-
batch_max
);
})(
input
);
r
.
inner
([
&
](
auto
&
y
,
auto
x
)
{
y
=
migraphx
::
exp
(
x
-
batch_max
)
/
batch_sum
;
})(
output
,
input
);
});
}
}
// namespace migraphx
#endif // MIGRAPHX_GUARD_KERNELS_SOFTMAX_HPP
src/targets/gpu/kernels/include/migraphx/kernels/vec.hpp
View file @
fd3252dc
...
@@ -27,6 +27,8 @@
...
@@ -27,6 +27,8 @@
#include <migraphx/kernels/types.hpp>
#include <migraphx/kernels/types.hpp>
#include <migraphx/kernels/integral_constant.hpp>
#include <migraphx/kernels/integral_constant.hpp>
#include <migraphx/kernels/functional.hpp>
#include <migraphx/kernels/functional.hpp>
#include <migraphx/kernels/type_traits.hpp>
#include <migraphx/kernels/debug.hpp>
namespace
migraphx
{
namespace
migraphx
{
...
...
src/targets/gpu/lowering.cpp
View file @
fd3252dc
...
@@ -186,7 +186,6 @@ struct miopen_apply
...
@@ -186,7 +186,6 @@ struct miopen_apply
add_extend_op
(
"rnn_var_sl_shift_output"
);
add_extend_op
(
"rnn_var_sl_shift_output"
);
add_extend_op
(
"rnn_var_sl_shift_sequence"
);
add_extend_op
(
"rnn_var_sl_shift_sequence"
);
add_extend_op
(
"scatter_none"
);
add_extend_op
(
"scatter_none"
);
add_extend_op
(
"softmax"
);
add_extend_op
(
"topk"
);
add_extend_op
(
"topk"
);
add_batch_norm_inference_op
();
add_batch_norm_inference_op
();
...
@@ -301,7 +300,7 @@ struct miopen_apply
...
@@ -301,7 +300,7 @@ struct miopen_apply
auto
&&
op
=
any_cast
<
op
::
deconvolution
>
(
ins
->
get_operator
());
auto
&&
op
=
any_cast
<
op
::
deconvolution
>
(
ins
->
get_operator
());
auto
conv
=
miopen_deconvolution
{
op
,
make_deconv
(
op
)};
auto
conv
=
miopen_deconvolution
{
op
,
make_deconv
(
op
)};
auto
ws
=
conv
.
compile
(
get_context
(),
ins
->
get_shape
(),
to_shapes
(
ins
->
inputs
()));
auto
ws
=
conv
.
find
(
get_context
(),
ins
->
get_shape
(),
to_shapes
(
ins
->
inputs
()));
auto
workspace
=
insert_allocation
(
ins
,
ws
);
auto
workspace
=
insert_allocation
(
ins
,
ws
);
auto
output
=
insert_allocation
(
ins
,
ins
->
get_shape
());
auto
output
=
insert_allocation
(
ins
,
ins
->
get_shape
());
...
@@ -332,7 +331,7 @@ struct miopen_apply
...
@@ -332,7 +331,7 @@ struct miopen_apply
miopen_quant_convolution
conv
;
miopen_quant_convolution
conv
;
auto
compile_quant_conv_with_format
=
[
&
](
bool
format
)
{
auto
compile_quant_conv_with_format
=
[
&
](
bool
format
)
{
conv
=
miopen_quant_convolution
{
op
,
format
,
make_conv
(
op
)};
conv
=
miopen_quant_convolution
{
op
,
format
,
make_conv
(
op
)};
ws
=
conv
.
compile
(
get_context
(),
ins
->
get_shape
(),
to_shapes
(
ins
->
inputs
()));
ws
=
conv
.
find
(
get_context
(),
ins
->
get_shape
(),
to_shapes
(
ins
->
inputs
()));
};
};
try
try
...
...
src/targets/gpu/mlir.cpp
0 → 100644
View file @
fd3252dc
This diff is collapsed.
Click to expand it.
src/targets/gpu/mlir_conv.cpp
deleted
100644 → 0
View file @
56615a84
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/gpu/mlir_conv.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/convolution.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/convolution.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/program.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/program.hpp>
#include <migraphx/gpu/kernel.hpp>
#include <migraphx/gpu/target.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/gpu/compile_hip.hpp>
#include <utility>
#include <functional>
#include <algorithm>
#ifdef MIGRAPHX_MLIR_MIOPEN_SUPPORT
#include <Miir.h>
#endif // MIGRAPHX_MLIR_MIOPEN_SUPPORT
#include <cstdio>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
struct
mlir_apply
{
module
*
mod
=
nullptr
;
const
mlir_conv
*
pass
=
nullptr
;
const
char
*
mlir_kernel_name
=
"migraphx_conv2d"
;
std
::
unordered_map
<
uint64_t
,
instruction_ref
>
literal_map
{};
struct
execution_spec
{
migraphx
::
value
::
binary
binary
;
size_t
global_size
;
size_t
local_size
;
execution_spec
(
migraphx
::
value
::
binary
&&
binary_m
,
size_t
global_s
,
size_t
local_s
)
:
binary
(
std
::
move
(
binary_m
)),
global_size
(
global_s
),
local_size
(
local_s
)
{
}
};
std
::
unordered_map
<
std
::
string
,
std
::
shared_ptr
<
execution_spec
>>
binary_map
{};
context
&
get_context
()
const
{
assert
(
pass
!=
nullptr
);
assert
(
pass
->
ctx
!=
nullptr
);
return
*
pass
->
ctx
;
}
void
init
()
const
{
assert
(
mod
!=
nullptr
);
assert
(
pass
!=
nullptr
);
}
std
::
shared_ptr
<
execution_spec
>
make_mlir_binary
(
instruction_ref
op_r
)
{
std
::
shared_ptr
<
execution_spec
>
result
;
#ifdef MIGRAPHX_MLIR_MIOPEN_SUPPORT
auto
conv
=
any_cast
<
op
::
convolution
>
(
op_r
->
get_operator
());
auto
inp_t
=
op_r
->
inputs
().
at
(
0
)
->
get_shape
();
auto
flt_t
=
op_r
->
inputs
().
at
(
1
)
->
get_shape
();
auto
out_t
=
op_r
->
get_shape
();
auto
get_type_str
=
[](
const
shape
&
s
)
->
const
char
*
{
switch
(
s
.
type
())
{
case
shape
::
float_type
:
return
"f32"
;
case
shape
::
half_type
:
return
"f16"
;
case
shape
::
bool_type
:
case
shape
::
double_type
:
case
shape
::
uint8_type
:
case
shape
::
int8_type
:
case
shape
::
uint16_type
:
case
shape
::
int16_type
:
case
shape
::
int32_type
:
case
shape
::
int64_type
:
case
shape
::
uint32_type
:
case
shape
::
uint64_type
:
case
shape
::
tuple_type
:
break
;
}
return
nullptr
;
};
const
auto
*
inp_t_s
=
get_type_str
(
inp_t
);
const
auto
*
flt_t_s
=
get_type_str
(
flt_t
);
const
auto
*
out_t_s
=
get_type_str
(
out_t
);
if
(
out_t_s
==
nullptr
||
inp_t_s
==
nullptr
||
flt_t_s
==
nullptr
)
return
result
;
std
::
string
mlir_options
=
"--kernel_name "
+
std
::
string
(
mlir_kernel_name
);
// platform spec
auto
&
device
=
get_context
().
get_current_device
();
char
dev_name
[
64
];
sprintf
(
dev_name
,
"gfx%lu%02lu"
,
device
.
get_device_major
(),
device
.
get_device_minor
());
mlir_options
+=
" --arch "
+
std
::
string
(
dev_name
)
+
" --num_cu "
+
std
::
to_string
(
device
.
get_cu_count
());
// ???
// Conv spec
mlir_options
+=
" --operation "
"conv2d"
" --batchsize "
+
std
::
to_string
(
conv
.
group
)
+
" --groupsize "
+
std
::
to_string
(
1
)
+
" --padding_h "
+
std
::
to_string
(
conv
.
padding
[
0
])
+
" --padding_w "
+
std
::
to_string
(
conv
.
padding
[
1
])
+
" --conv_stride_h "
+
std
::
to_string
(
conv
.
stride
[
0
])
+
" --conv_stride_w "
+
std
::
to_string
(
conv
.
stride
[
1
])
+
" --dilation_h "
+
std
::
to_string
(
conv
.
dilation
[
0
])
+
" --dilation_w "
+
std
::
to_string
(
conv
.
dilation
[
1
]);
// Input spec
mlir_options
+=
" --in_layout "
"NCHWG"
" --in_type "
+
std
::
string
(
inp_t_s
)
+
" --in_channels "
+
std
::
to_string
(
inp_t
.
lens
()[
1
])
+
" --in_h "
+
std
::
to_string
(
inp_t
.
lens
()[
2
])
+
" --in_w "
+
std
::
to_string
(
inp_t
.
lens
()[
3
]);
// Filter spec
mlir_options
+=
" --fil_layout "
"NCHWG"
" --fil_type "
+
std
::
string
(
flt_t_s
)
+
" --fil_h "
+
std
::
to_string
(
flt_t
.
lens
()[
2
])
+
" --fil_w "
+
std
::
to_string
(
flt_t
.
lens
()[
3
]);
// Output spec
mlir_options
+=
" --out_layout "
"NCHWG"
" --out_type "
+
std
::
string
(
out_t_s
)
+
" --out_channels "
+
std
::
to_string
(
out_t
.
lens
()[
1
])
+
" --out_h "
+
std
::
to_string
(
out_t
.
lens
()[
2
])
+
" --out_w "
+
std
::
to_string
(
out_t
.
lens
()[
3
]);
auto
bin_i
=
binary_map
.
find
(
mlir_options
);
if
(
bin_i
==
binary_map
.
end
())
{
size_t
bin_size
=
0
;
using
mlir_handle
=
MIGRAPHX_MANAGE_PTR
(
MiirHandle
,
miirDestroyHandle
);
auto
handle
=
mlir_handle
(
miirCreateHandle
(
mlir_options
.
c_str
()));
if
(
miirLowerBin
(
handle
.
get
())
==
MIIR_SUCCESS
&&
miirBufferGet
(
handle
.
get
(),
nullptr
,
&
bin_size
)
==
MIIR_SUCCESS
)
{
migraphx
::
value
::
binary
bin
(
bin_size
);
if
(
miirBufferGet
(
handle
.
get
(),
reinterpret_cast
<
char
*>
(
bin
.
data
()),
&
bin_size
)
==
MIIR_SUCCESS
)
{
size_t
global_size
;
size_t
block_size
;
if
(
miirGetExecutionDims
(
handle
.
get
(),
&
global_size
,
&
block_size
)
==
MIIR_SUCCESS
)
{
result
=
std
::
make_shared
<
execution_spec
>
(
std
::
move
(
bin
),
global_size
,
block_size
);
}
}
}
binary_map
[
mlir_options
]
=
result
;
}
else
{
result
=
bin_i
->
second
;
}
#else // MIGRAPHX_MLIR_MIOPEN_SUPPORT
(
void
)
op_r
;
#endif // MIGRAPHX_MLIR_MIOPEN_SUPPORT
return
result
;
}
instruction_ref
get_literal
(
uint64_t
value
)
{
auto
fi
=
literal_map
.
find
(
value
);
if
(
fi
!=
literal_map
.
end
())
return
fi
->
second
;
auto
lit
=
mod
->
add_literal
(
value
);
literal_map
.
emplace
(
value
,
lit
);
return
lit
;
}
operation
make_code_object_op
(
instruction_ref
op_r
,
const
std
::
shared_ptr
<
execution_spec
>&
spec
)
{
// each pointer is expanded out to a MemRefDescriptor
auto
inp_t
=
op_r
->
inputs
().
at
(
0
)
->
get_shape
();
auto
flt_t
=
op_r
->
inputs
().
at
(
1
)
->
get_shape
();
auto
out_t
=
op_r
->
get_shape
();
auto
i64
=
shape
(
shape
::
uint64_type
);
std
::
vector
<
shape
>
expected_inputs
=
{
flt_t
,
flt_t
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
inp_t
,
inp_t
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
out_t
,
out_t
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
out_t
};
return
migraphx
::
make_op
(
"gpu::code_object"
,
{
{
"code_object"
,
spec
->
binary
},
{
"symbol_name"
,
mlir_kernel_name
},
{
"global"
,
spec
->
global_size
},
{
"local"
,
spec
->
local_size
},
{
"expected_inputs"
,
migraphx
::
to_value
(
expected_inputs
)},
{
"output"
,
migraphx
::
to_value
(
out_t
)},
});
}
void
add_memref_descriptor
(
std
::
vector
<
instruction_ref
>&
refs
,
instruction_ref
inst
)
{
const
size_t
offset
=
0
;
auto
inst_t
=
inst
->
get_shape
();
refs
.
push_back
(
inst
);
refs
.
push_back
(
inst
);
refs
.
push_back
(
get_literal
(
offset
));
// offset
// dim sizes
std
::
transform
(
inst_t
.
lens
().
begin
(),
inst_t
.
lens
().
end
(),
std
::
back_inserter
(
refs
),
[
&
](
const
auto
&
lval
)
{
return
get_literal
(
lval
);
});
refs
.
push_back
(
get_literal
(
1
));
// G
// dim strides
std
::
transform
(
inst_t
.
strides
().
begin
(),
inst_t
.
strides
().
end
(),
std
::
back_inserter
(
refs
),
[
&
](
const
auto
&
lval
)
{
return
get_literal
(
lval
);
});
refs
.
push_back
(
get_literal
(
1
));
// G
}
instruction_ref
insert_allocation
(
instruction_ref
ins
,
const
shape
&
s
)
const
{
return
mod
->
insert_instruction
(
ins
,
hip_allocate
{
s
});
}
void
replace_conv_op
(
instruction_ref
ins
)
{
auto
conv_bin
=
make_mlir_binary
(
ins
);
if
(
conv_bin
)
{
auto
conv
=
make_code_object_op
(
ins
,
conv_bin
);
auto
inp
=
ins
->
inputs
().
at
(
0
);
auto
flt
=
ins
->
inputs
().
at
(
1
);
auto
out
=
insert_allocation
(
ins
,
ins
->
get_shape
());
std
::
vector
<
instruction_ref
>
refs
;
refs
.
reserve
(
3
*
13
+
1
);
add_memref_descriptor
(
refs
,
flt
);
add_memref_descriptor
(
refs
,
inp
);
add_memref_descriptor
(
refs
,
out
);
refs
.
push_back
(
out
);
mod
->
replace_instruction
(
ins
,
conv
,
refs
);
}
}
void
apply
()
{
init
();
for
(
auto
it
:
iterator_for
(
*
mod
))
{
if
(
it
->
name
()
==
"convolution"
)
{
replace_conv_op
(
it
);
}
}
}
};
void
mlir_conv
::
apply
(
module
&
m
)
const
{
mlir_apply
{
&
m
,
this
}.
apply
();
}
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
src/targets/gpu/quant_convolution.cpp
View file @
fd3252dc
...
@@ -67,9 +67,9 @@ argument miopen_quant_convolution::compute(context& ctx,
...
@@ -67,9 +67,9 @@ argument miopen_quant_convolution::compute(context& ctx,
return
args
[
3
];
return
args
[
3
];
}
}
shape
miopen_quant_convolution
::
compile
(
context
&
ctx
,
shape
miopen_quant_convolution
::
find
(
context
&
ctx
,
const
shape
&
output_shape
,
const
shape
&
output_shape
,
std
::
vector
<
shape
>
inputs
)
std
::
vector
<
shape
>
inputs
)
{
{
shape
workspace_shape
{};
shape
workspace_shape
{};
auto
x_desc
=
make_tensor
(
inputs
[
0
],
int8_x4_format
);
auto
x_desc
=
make_tensor
(
inputs
[
0
],
int8_x4_format
);
...
@@ -92,18 +92,18 @@ shape miopen_quant_convolution::compile(context& ctx,
...
@@ -92,18 +92,18 @@ shape miopen_quant_convolution::compile(context& ctx,
x_shape
=
pack_int8_shape
(
x_shape
);
x_shape
=
pack_int8_shape
(
x_shape
);
w_shape
=
pack_int8_shape
(
w_shape
);
w_shape
=
pack_int8_shape
(
w_shape
);
}
}
auto
arg_vec4_x
=
to_gpu
(
generate_argument
(
x_shape
));
auto
x
=
to_gpu
(
generate_argument
(
x_shape
));
auto
arg_vec4_w
=
to_gpu
(
generate_argument
(
w_shape
));
auto
w
=
to_gpu
(
generate_argument
(
w_shape
));
auto
y
=
allocate_gpu
(
output_shape
);
auto
y
=
allocate_gpu
(
output_shape
);
auto
workspace
=
allocate_gpu
(
workspace_shape
);
auto
workspace
=
allocate_gpu
(
workspace_shape
);
int
algo_count
=
1
;
int
algo_count
=
1
;
miopenConvAlgoPerf_t
perf
;
miopenConvAlgoPerf_t
perf
;
auto
status
=
miopenFindConvolutionForwardAlgorithm
(
ctx
.
get_stream
().
get_miopen
(),
auto
status
=
miopenFindConvolutionForwardAlgorithm
(
ctx
.
get_stream
().
get_miopen
(),
x_desc
.
get
(),
x_desc
.
get
(),
arg_vec4_
x
.
implicit
(),
x
.
implicit
(),
w_desc
.
get
(),
w_desc
.
get
(),
arg_vec4_
w
.
implicit
(),
w
.
implicit
(),
cd
.
get
(),
cd
.
get
(),
y_desc
.
get
(),
y_desc
.
get
(),
y
.
implicit
(),
y
.
implicit
(),
...
@@ -114,11 +114,35 @@ shape miopen_quant_convolution::compile(context& ctx,
...
@@ -114,11 +114,35 @@ shape miopen_quant_convolution::compile(context& ctx,
workspace_size
,
workspace_size
,
false
);
false
);
if
(
status
!=
miopenStatusSuccess
)
if
(
status
!=
miopenStatusSuccess
)
{
MIGRAPHX_THROW
(
"MIOpen Quant Convolution: find convolution failed"
);
MIGRAPHX_THROW
(
"QUANT_CONVOLUTION: find convolution failed"
);
algo
=
perf
.
fwd_algo
;
}
handle
=
ctx
.
get_stream
().
get_miopen
();
size_t
solution_count
;
algo
=
perf
.
fwd_algo
;
status
=
miopenConvolutionForwardGetSolutionCount
(
ctx
.
get_stream
().
get_miopen
(),
w_desc
.
get
(),
x_desc
.
get
(),
cd
.
get
(),
y_desc
.
get
(),
&
solution_count
);
if
(
status
!=
miopenStatusSuccess
)
MIGRAPHX_THROW
(
"MIOpen Quant Convolution: get solution count failed"
);
std
::
vector
<
miopenConvSolution_t
>
solutions
(
solution_count
);
status
=
miopenConvolutionForwardGetSolution
(
ctx
.
get_stream
().
get_miopen
(),
w_desc
.
get
(),
x_desc
.
get
(),
cd
.
get
(),
y_desc
.
get
(),
solution_count
,
&
solution_count
,
solutions
.
data
());
if
(
status
!=
miopenStatusSuccess
)
MIGRAPHX_THROW
(
"MIOpen Quant Convolution: get solution failed"
);
solution_id
=
solutions
.
front
().
solution_id
;
return
shape
{
shape
::
int8_type
,
{
perf
.
memory
}};
return
shape
{
shape
::
int8_type
,
{
perf
.
memory
}};
}
}
...
@@ -126,13 +150,29 @@ void miopen_quant_convolution::finalize(context& ctx,
...
@@ -126,13 +150,29 @@ void miopen_quant_convolution::finalize(context& ctx,
const
shape
&
output_shape
,
const
shape
&
output_shape
,
std
::
vector
<
shape
>
inputs
)
std
::
vector
<
shape
>
inputs
)
{
{
if
(
handle
==
ctx
.
get_stream
().
get_miopen
())
if
(
cd
==
nullptr
)
return
;
cd
=
make_conv
(
op
);
// Check that workspace hasn't changed
if
(
solution_id
==
0
)
auto
size
=
inputs
.
at
(
2
).
bytes
();
{
auto
ws
=
compile
(
ctx
,
output_shape
,
std
::
move
(
inputs
));
// Check that workspace hasn't changed
if
(
ws
.
bytes
()
>
size
)
auto
size
=
inputs
.
at
(
2
).
bytes
();
MIGRAPHX_THROW
(
"Workspace has changed during finalization."
);
auto
ws
=
find
(
ctx
,
output_shape
,
inputs
);
if
(
ws
.
bytes
()
>
size
)
MIGRAPHX_THROW
(
"MIOpen Quant Convolution: workspace has changed during finalization."
);
}
auto
x_desc
=
make_tensor
(
inputs
[
0
],
int8_x4_format
);
auto
w_desc
=
make_tensor
(
inputs
[
1
],
int8_x4_format
);
auto
y_desc
=
make_tensor
(
output_shape
);
auto
status
=
miopenConvolutionForwardCompileSolution
(
ctx
.
get_stream
().
get_miopen
(),
w_desc
.
get
(),
x_desc
.
get
(),
cd
.
get
(),
y_desc
.
get
(),
solution_id
);
if
(
status
!=
miopenStatusSuccess
)
MIGRAPHX_THROW
(
"MIOpen Quant Convolution: compile solution failed"
);
}
}
shape
miopen_quant_convolution
::
pack_int8_shape
(
const
shape
&
s
)
const
shape
miopen_quant_convolution
::
pack_int8_shape
(
const
shape
&
s
)
const
...
...
src/targets/gpu/target.cpp
View file @
fd3252dc
...
@@ -53,10 +53,10 @@
...
@@ -53,10 +53,10 @@
#include <migraphx/gpu/compile_ops.hpp>
#include <migraphx/gpu/compile_ops.hpp>
#include <migraphx/gpu/concat_gpu_opt.hpp>
#include <migraphx/gpu/concat_gpu_opt.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/fuse_mlir.hpp>
#include <migraphx/gpu/fuse_ops.hpp>
#include <migraphx/gpu/fuse_ops.hpp>
#include <migraphx/gpu/prefuse_ops.hpp>
#include <migraphx/gpu/prefuse_ops.hpp>
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/gpu/mlir_conv.hpp>
#include <migraphx/gpu/pack_int8_args.hpp>
#include <migraphx/gpu/pack_int8_args.hpp>
#include <migraphx/gpu/schedule_model.hpp>
#include <migraphx/gpu/schedule_model.hpp>
#include <migraphx/gpu/sync_device.hpp>
#include <migraphx/gpu/sync_device.hpp>
...
@@ -128,7 +128,8 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti
...
@@ -128,7 +128,8 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti
dead_code_elimination
{},
dead_code_elimination
{},
enable_pass
(
not
enabled
(
MIGRAPHX_DISABLE_POINTWISE_FUSION
{}),
fuse_pointwise
{}),
enable_pass
(
not
enabled
(
MIGRAPHX_DISABLE_POINTWISE_FUSION
{}),
fuse_pointwise
{}),
dead_code_elimination
{},
dead_code_elimination
{},
mlir_conv
{
&
ctx
},
fuse_mlir
{
&
ctx
},
dead_code_elimination
{},
lowering
{
&
ctx
,
options
.
offload_copy
},
lowering
{
&
ctx
,
options
.
offload_copy
},
eliminate_contiguous
{
"gpu::contiguous"
},
eliminate_contiguous
{
"gpu::contiguous"
},
dead_code_elimination
{},
dead_code_elimination
{},
...
...
test/eliminate_contiguous_test.cpp
View file @
fd3252dc
...
@@ -205,4 +205,24 @@ TEST_CASE(contiguous_pointwise)
...
@@ -205,4 +205,24 @@ TEST_CASE(contiguous_pointwise)
mm
->
begin
(),
mm
->
end
(),
[](
auto
&&
ins
)
{
return
ins
.
name
()
==
"contiguous"
;
}));
mm
->
begin
(),
mm
->
end
(),
[](
auto
&&
ins
)
{
return
ins
.
name
()
==
"contiguous"
;
}));
}
}
TEST_CASE
(
slice_contiguous
)
{
migraphx
::
module
m
;
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
4
,
2
}};
auto
x
=
m
.
add_parameter
(
"x"
,
s
);
auto
t
=
m
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
1
,
0
}}}),
x
);
auto
c
=
m
.
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
t
);
auto
s1
=
m
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
0
}},
{
"starts"
,
{
0
}},
{
"ends"
,
{
1
}}}),
c
);
auto
s2
=
m
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
0
}},
{
"starts"
,
{
1
}},
{
"ends"
,
{
2
}}}),
c
);
auto
c1
=
m
.
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
s1
);
auto
c2
=
m
.
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
s2
);
m
.
add_instruction
(
pass_standard_op
{},
c1
,
c2
);
run_pass
(
m
);
EXPECT
(
std
::
count_if
(
m
.
begin
(),
m
.
end
(),
[](
auto
&&
ins
)
{
return
ins
.
name
()
==
"contiguous"
;
})
==
1
);
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/get_target_assignments.cpp
0 → 100644
View file @
fd3252dc
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "test.hpp"
#include <migraphx/make_op.hpp>
#include <migraphx/program.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/ref/target.hpp>
#include <migraphx/target_assignments.hpp>
migraphx
::
program
create_program
()
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
3
}};
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
y
=
mm
->
add_parameter
(
"y"
,
s
);
auto
z
=
mm
->
add_parameter
(
"z"
,
s
);
auto
diff
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"div"
),
x
,
y
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"div"
),
diff
,
z
);
return
p
;
}
TEST_CASE
(
is_supported
)
{
auto
p
=
create_program
();
auto
targets
=
migraphx
::
get_targets
();
EXPECT
(
!
targets
.
empty
());
auto
first_target
=
targets
[
0
];
auto
t
=
migraphx
::
make_target
(
first_target
);
const
auto
assignments
=
p
.
get_target_assignments
({
t
});
for
(
const
auto
&
[
ins
,
target
]
:
assignments
)
{
(
void
)
ins
;
EXPECT
(
target
==
first_target
);
}
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/gpu/mlir.cpp
0 → 100644
View file @
fd3252dc
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/gpu/mlir.hpp>
#include <migraphx/gpu/target.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/write_literals.hpp>
#include <migraphx/ref/target.hpp>
#include <migraphx/module.hpp>
#include <migraphx/program.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/verify_args.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/functional.hpp>
#include <test.hpp>
using
migraphx
::
trim
;
// m test_gpu_mlir && ./bin/test_gpu_mlir
struct
mlir_gpu_target
:
migraphx
::
gpu
::
target
{
std
::
string
name
()
const
{
return
"mlir"
;
}
std
::
vector
<
migraphx
::
pass
>
get_passes
(
migraphx
::
context
&
gctx
,
const
migraphx
::
compile_options
&
)
const
{
auto
&
ctx
=
migraphx
::
any_cast
<
migraphx
::
gpu
::
context
>
(
gctx
);
return
{
migraphx
::
gpu
::
write_literals
{
&
ctx
}};
}
};
std
::
string
encode
(
const
std
::
string
&
s
)
{
std
::
stringstream
ss
;
bool
prespace
=
false
;
for
(
auto
c
:
s
)
{
if
(
std
::
isspace
(
c
)
!=
0
)
{
if
(
not
prespace
)
ss
<<
" "
;
prespace
=
true
;
}
else
if
(
std
::
isprint
(
c
)
!=
0
)
{
ss
<<
c
;
prespace
=
false
;
}
}
return
migraphx
::
trim
(
ss
.
str
());
}
migraphx
::
program
create_program_from_mlir
(
const
migraphx
::
module
&
mmlir
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
names
=
mmlir
.
get_parameter_names
();
std
::
vector
<
migraphx
::
instruction_ref
>
inputs
;
std
::
transform
(
names
.
begin
(),
names
.
end
(),
std
::
back_inserter
(
inputs
),
[
&
](
const
auto
&
name
)
{
return
mm
->
add_parameter
(
name
,
mmlir
.
get_parameter_shape
(
name
));
});
std
::
sort
(
inputs
.
begin
(),
inputs
.
end
(),
migraphx
::
by
(
std
::
less
<>
{},
[](
auto
ins
)
{
return
to_string
(
ins
->
get_operator
());
}));
inputs
.
push_back
(
mm
->
add_parameter
(
"output"
,
mmlir
.
get_output_shapes
().
front
()));
migraphx
::
gpu
::
context
ctx
;
migraphx
::
gpu
::
insert_mlir
(
*
mm
,
mm
->
end
(),
compile_mlir
(
ctx
,
mmlir
),
inputs
);
return
p
;
}
migraphx
::
parameter_map
generate_params
(
const
migraphx
::
program
&
p
)
{
migraphx
::
parameter_map
m
;
std
::
size_t
i
=
0
;
for
(
auto
&&
x
:
p
.
get_parameter_shapes
())
{
// m[x.first] = migraphx::fill_argument(x.second, 1);
m
[
x
.
first
]
=
migraphx
::
generate_argument
(
x
.
second
,
i
++
);
}
return
m
;
}
migraphx
::
argument
run_gpu
(
migraphx
::
program
p
,
const
migraphx
::
parameter_map
&
inputs
)
{
mlir_gpu_target
t
;
p
.
compile
(
t
);
migraphx
::
parameter_map
m
;
for
(
auto
&&
input
:
inputs
)
{
m
[
input
.
first
]
=
t
.
copy_to
(
input
.
second
);
}
for
(
auto
&&
x
:
p
.
get_parameter_shapes
())
{
if
(
m
.
count
(
x
.
first
)
==
0
)
{
m
[
x
.
first
]
=
t
.
allocate
(
x
.
second
);
}
}
return
t
.
copy_from
(
p
.
eval
(
m
).
front
());
}
migraphx
::
argument
run_ref
(
migraphx
::
program
p
,
const
migraphx
::
parameter_map
&
inputs
)
{
p
.
compile
(
migraphx
::
ref
::
target
{});
return
p
.
eval
(
inputs
).
front
();
}
bool
verify_mlir
(
const
migraphx
::
module
&
mmlir
)
{
migraphx
::
program
ref
;
ref
.
get_main_module
()
->
insert_instructions
(
ref
.
get_main_module
()
->
end
(),
&
mmlir
);
auto
inputs
=
generate_params
(
ref
);
auto
mlir
=
create_program_from_mlir
(
mmlir
);
return
migraphx
::
verify_args
(
"mlir"
,
run_ref
(
ref
,
inputs
),
run_gpu
(
mlir
,
inputs
));
}
TEST_CASE
(
conv
)
{
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
func @main(%arg0: tensor<2x8x3x3xf32>, %arg1: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {kernel = "mixr"} {
%0 = migraphx.convolution(%arg1, %arg0) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
return %0 : tensor<1x2x2x2xf32>
}
}
)__migraphx__"
;
migraphx
::
module
m
;
auto
x
=
m
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
8
,
4
,
4
}});
auto
w
=
m
.
add_parameter
(
"w"
,
{
migraphx
::
shape
::
float_type
,
{
2
,
8
,
3
,
3
}});
auto
conv
=
m
.
add_instruction
(
migraphx
::
make_op
(
"convolution"
),
x
,
w
);
m
.
add_return
({
conv
});
auto
s
=
migraphx
::
gpu
::
dump_mlir
(
m
);
// Skip test if MLIR is not enabled
if
(
s
.
empty
())
return
;
CHECK
(
encode
(
s
)
==
encode
(
mlir_output
));
EXPECT
(
verify_mlir
(
m
));
}
TEST_CASE
(
conv_add_relu
)
{
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
func @main(%arg0: tensor<1x2x2x2xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {kernel = "mixr"} {
%0 = migraphx.convolution(%arg2, %arg1) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
%1 = migraphx.add(%0, %arg0) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
%2 = migraphx.relu(%1) : (tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
return %2 : tensor<1x2x2x2xf32>
}
}
)__migraphx__"
;
migraphx
::
module
m
;
auto
x
=
m
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
8
,
4
,
4
}});
auto
w
=
m
.
add_parameter
(
"w"
,
{
migraphx
::
shape
::
float_type
,
{
2
,
8
,
3
,
3
}});
auto
b
=
m
.
add_parameter
(
"b"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
2
,
2
}});
auto
conv
=
m
.
add_instruction
(
migraphx
::
make_op
(
"convolution"
),
x
,
w
);
auto
add
=
m
.
add_instruction
(
migraphx
::
make_op
(
"add"
),
conv
,
b
);
auto
relu
=
m
.
add_instruction
(
migraphx
::
make_op
(
"relu"
),
add
);
m
.
add_return
({
relu
});
auto
s
=
migraphx
::
gpu
::
dump_mlir
(
m
);
// Skip test if MLIR is not enabled
if
(
s
.
empty
())
return
;
CHECK
(
encode
(
s
)
==
encode
(
mlir_output
));
EXPECT
(
verify_mlir
(
m
));
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/module_test.cpp
View file @
fd3252dc
...
@@ -300,6 +300,96 @@ TEST_CASE(parameter_name_order)
...
@@ -300,6 +300,96 @@ TEST_CASE(parameter_name_order)
EXPECT
(
param_names
==
names1
);
EXPECT
(
param_names
==
names1
);
}
}
TEST_CASE
(
insert_instructions_module
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
int32_type
,
{
1
}};
migraphx
::
module
m1
(
"m1"
);
auto
x1
=
m1
.
add_parameter
(
"x1"
,
s
);
auto
sqrt
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"sqrt"
),
{
x1
});
m1
.
add_instruction
(
migraphx
::
make_op
(
"add"
),
{
sqrt
,
x1
});
migraphx
::
module
m2
(
"m2"
);
auto
x2
=
m2
.
add_parameter
(
"x2"
,
s
);
m2
.
add_instruction
(
migraphx
::
make_op
(
"sqrt"
),
{
x2
});
m1
.
insert_instructions
(
sqrt
,
&
m2
,
{{
x2
,
x1
}});
EXPECT
(
std
::
prev
(
sqrt
)
->
name
()
==
"sqrt"
);
EXPECT
(
std
::
count_if
(
m1
.
begin
(),
m1
.
end
(),
[](
auto
&&
ins
)
{
return
ins
.
name
()
==
"sqrt"
;
})
==
2
);
EXPECT
(
std
::
count_if
(
m1
.
begin
(),
m1
.
end
(),
[](
auto
&&
ins
)
{
return
ins
.
name
()
==
"@param"
;
})
==
1
);
EXPECT
(
contains
(
m1
.
get_parameter_shapes
(),
"x1"
));
EXPECT
(
not
contains
(
m1
.
get_parameter_shapes
(),
"x2"
));
}
TEST_CASE
(
add_instructions_module
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
int32_type
,
{
1
}};
migraphx
::
module
m1
(
"m1"
);
auto
x1
=
m1
.
add_parameter
(
"x1"
,
s
);
m1
.
add_instruction
(
migraphx
::
make_op
(
"sqrt"
),
{
x1
});
migraphx
::
module
m2
(
"m2"
);
auto
x2
=
m2
.
add_parameter
(
"x2"
,
s
);
m2
.
add_instruction
(
migraphx
::
make_op
(
"sqrt"
),
{
x2
});
m1
.
add_instructions
(
&
m2
,
{{
x2
,
x1
}});
EXPECT
(
std
::
count_if
(
m1
.
begin
(),
m1
.
end
(),
[](
auto
&&
ins
)
{
return
ins
.
name
()
==
"sqrt"
;
})
==
2
);
EXPECT
(
std
::
count_if
(
m1
.
begin
(),
m1
.
end
(),
[](
auto
&&
ins
)
{
return
ins
.
name
()
==
"@param"
;
})
==
1
);
EXPECT
(
contains
(
m1
.
get_parameter_shapes
(),
"x1"
));
EXPECT
(
not
contains
(
m1
.
get_parameter_shapes
(),
"x2"
));
}
TEST_CASE
(
add_instructions_range
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
int32_type
,
{
1
}};
migraphx
::
module
m1
(
"m1"
);
auto
x1
=
m1
.
add_parameter
(
"x1"
,
s
);
m1
.
add_instruction
(
migraphx
::
make_op
(
"sqrt"
),
{
x1
});
migraphx
::
module
m2
(
"m2"
);
auto
x2
=
m2
.
add_parameter
(
"x2"
,
s
);
auto
sqrt2
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"sqrt"
),
{
x2
});
m1
.
add_instructions
(
sqrt2
,
m2
.
end
(),
{{
x2
,
x1
}});
EXPECT
(
std
::
any_of
(
m1
.
begin
(),
m1
.
end
(),
[
&
](
auto
&&
ins
)
{
return
migraphx
::
contains
(
ins
.
inputs
(),
x1
);
}));
EXPECT
(
std
::
count_if
(
m1
.
begin
(),
m1
.
end
(),
[](
auto
&&
ins
)
{
return
ins
.
name
()
==
"sqrt"
;
})
==
2
);
EXPECT
(
std
::
count_if
(
m1
.
begin
(),
m1
.
end
(),
[](
auto
&&
ins
)
{
return
ins
.
name
()
==
"@param"
;
})
==
1
);
EXPECT
(
contains
(
m1
.
get_parameter_shapes
(),
"x1"
));
EXPECT
(
not
contains
(
m1
.
get_parameter_shapes
(),
"x2"
));
}
TEST_CASE
(
add_instructions_vector
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
int32_type
,
{
1
}};
migraphx
::
module
m1
(
"m1"
);
auto
x1
=
m1
.
add_parameter
(
"x1"
,
s
);
m1
.
add_instruction
(
migraphx
::
make_op
(
"sqrt"
),
{
x1
});
migraphx
::
module
m2
(
"m2"
);
auto
x2
=
m2
.
add_parameter
(
"x2"
,
s
);
auto
sqrt2
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"sqrt"
),
{
x2
});
m1
.
add_instructions
({
sqrt2
},
{{
x2
,
x1
}});
EXPECT
(
std
::
any_of
(
m1
.
begin
(),
m1
.
end
(),
[
&
](
auto
&&
ins
)
{
return
migraphx
::
contains
(
ins
.
inputs
(),
x1
);
}));
EXPECT
(
std
::
count_if
(
m1
.
begin
(),
m1
.
end
(),
[](
auto
&&
ins
)
{
return
ins
.
name
()
==
"sqrt"
;
})
==
2
);
EXPECT
(
std
::
count_if
(
m1
.
begin
(),
m1
.
end
(),
[](
auto
&&
ins
)
{
return
ins
.
name
()
==
"@param"
;
})
==
1
);
EXPECT
(
contains
(
m1
.
get_parameter_shapes
(),
"x1"
));
EXPECT
(
not
contains
(
m1
.
get_parameter_shapes
(),
"x2"
));
}
struct
check_for_pass_op
struct
check_for_pass_op
{
{
bool
*
found
=
nullptr
;
bool
*
found
=
nullptr
;
...
...
test/op_shape_test.cpp
View file @
fd3252dc
...
@@ -981,7 +981,8 @@ TEST_CASE(multibroadcast)
...
@@ -981,7 +981,8 @@ TEST_CASE(multibroadcast)
}
}
{
{
std
::
vector
<
std
::
size_t
>
lens
{
4
,
1
,
3
};
std
::
vector
<
std
::
size_t
>
lens
{
4
,
1
,
3
};
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{}};
std
::
vector
<
std
::
size_t
>
empt
=
{};
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
empt
};
throws_shape
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
lens
}}),
input
);
throws_shape
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
lens
}}),
input
);
}
}
{
{
...
@@ -1533,15 +1534,46 @@ TEST_CASE(test_squeeze_wrong_axis)
...
@@ -1533,15 +1534,46 @@ TEST_CASE(test_squeeze_wrong_axis)
TEST_CASE
(
test_unsqueeze
)
TEST_CASE
(
test_unsqueeze
)
{
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
}};
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
3
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
1
,
3
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
1
,
3
}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}}}),
s1
);
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}}}),
s1
);
}
}
TEST_CASE
(
test_unsqueeze_step
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
12
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
2
,
6
}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}},
{
"steps"
,
{
2
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_step_non_divisable
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
3
}};
throws_shape
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}},
{
"steps"
,
{
2
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_step_zero
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
12
}};
throws_shape
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}},
{
"steps"
,
{
0
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_step_at_end
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
12
}};
throws_shape
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
3
}},
{
"steps"
,
{
2
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_mismatch_step_axis
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
12
}};
throws_shape
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}},
{
"steps"
,
{
2
,
3
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_negative_axis
)
TEST_CASE
(
test_unsqueeze_negative_axis
)
{
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
}};
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
3
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
1
,
3
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
1
,
3
}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
-
2
}}}),
s1
);
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
-
2
}}}),
s1
);
}
}
...
@@ -1567,21 +1599,28 @@ TEST_CASE(test_unsqueeze_scalar_tensor2)
...
@@ -1567,21 +1599,28 @@ TEST_CASE(test_unsqueeze_scalar_tensor2)
TEST_CASE
(
test_unsqueeze_transpose
)
TEST_CASE
(
test_unsqueeze_transpose
)
{
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
4
,
3
},
{
12
,
1
,
4
}};
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
4
,
3
},
{
12
,
1
,
4
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
4
,
4
,
1
,
3
},
{
12
,
1
,
1
,
4
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
4
,
4
,
1
,
3
},
{
12
,
1
,
1
2
,
4
}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}}}),
s1
);
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}}}),
s1
);
}
}
TEST_CASE
(
test_unsqueeze_transpose_step
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
4
,
6
},
{
24
,
1
,
4
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
4
,
4
,
2
,
3
},
{
24
,
1
,
12
,
4
}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}},
{
"steps"
,
{
2
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_multibroadcast
)
TEST_CASE
(
test_unsqueeze_multibroadcast
)
{
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
},
{
0
,
1
,
0
}};
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
},
{
0
,
1
,
0
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
1
,
4
},
{
0
,
1
,
1
,
0
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
1
,
4
},
{
0
,
1
,
0
,
0
}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}}}),
s1
);
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}}}),
s1
);
}
}
TEST_CASE
(
test_unsqueeze_slice
)
TEST_CASE
(
test_unsqueeze_slice
)
{
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
},
{
108
,
36
,
1
}};
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
},
{
108
,
36
,
1
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
1
,
4
},
{
108
,
36
,
36
,
1
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
1
,
4
},
{
108
,
36
,
4
,
1
}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}}}),
s1
);
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}}}),
s1
);
}
}
...
@@ -1613,6 +1652,27 @@ TEST_CASE(test_unsqueeze_multiple_axes_2)
...
@@ -1613,6 +1652,27 @@ TEST_CASE(test_unsqueeze_multiple_axes_2)
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
0
,
1
}}}),
s1
);
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
0
,
1
}}}),
s1
);
}
}
TEST_CASE
(
test_unsqueeze_multiple_axes_3
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
5
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
1
,
5
,
1
,
1
}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
,
4
,
5
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_multiple_axes_4
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
5
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
1
,
5
,
1
,
1
}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
5
,
4
,
2
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_multiple_axes_step
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
10
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
2
,
5
,
1
,
1
}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
,
4
,
5
}},
{
"steps"
,
{
2
}}}),
s1
);
}
TEST_CASE
(
transpose_shape
)
TEST_CASE
(
transpose_shape
)
{
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
2
,
2
}};
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
2
,
2
}};
...
...
test/shape_test.cpp
View file @
fd3252dc
...
@@ -38,7 +38,6 @@ TEST_CASE(test_shape_default)
...
@@ -38,7 +38,6 @@ TEST_CASE(test_shape_default)
EXPECT
(
s
.
elements
()
==
0
);
EXPECT
(
s
.
elements
()
==
0
);
EXPECT
(
s
.
bytes
()
==
0
);
EXPECT
(
s
.
bytes
()
==
0
);
}
}
TEST_CASE
(
test_shape_assign
)
TEST_CASE
(
test_shape_assign
)
{
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
100
,
32
,
8
,
8
}};
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
100
,
32
,
8
,
8
}};
...
@@ -65,6 +64,118 @@ TEST_CASE(test_shape_standard)
...
@@ -65,6 +64,118 @@ TEST_CASE(test_shape_standard)
EXPECT
(
not
s
.
broadcasted
());
EXPECT
(
not
s
.
broadcasted
());
}
}
TEST_CASE
(
test_shape_min_max_opt
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
2
,
3
},
{
6
,
3
,
1
}};
EXPECT
(
s
.
min_lens
()
==
s
.
lens
());
EXPECT
(
s
.
max_lens
()
==
s
.
lens
());
EXPECT
(
s
.
opt_lens
()
==
s
.
lens
());
}
TEST_CASE
(
test_shape_dynamic_fixed
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{{
2
,
2
,
0
},
{
2
,
2
,
0
},
{
3
,
3
,
0
}}};
EXPECT
(
not
s
.
standard
());
EXPECT
(
not
s
.
packed
());
EXPECT
(
not
s
.
transposed
());
EXPECT
(
not
s
.
broadcasted
());
EXPECT
(
s
.
dynamic
());
EXPECT
(
s
.
dyn_dims
().
size
()
==
3
);
EXPECT
(
s
.
dyn_dims
().
at
(
0
).
is_fixed
());
EXPECT
(
not
s
.
dyn_dims
().
at
(
0
).
has_optimal
());
EXPECT
(
s
.
min_lens
()
==
std
::
vector
<
std
::
size_t
>
{
2
,
2
,
3
});
EXPECT
(
s
.
max_lens
()
==
std
::
vector
<
std
::
size_t
>
{
2
,
2
,
3
});
EXPECT
(
s
.
opt_lens
()
==
std
::
vector
<
std
::
size_t
>
{
0
,
0
,
0
});
EXPECT
(
s
.
bytes
()
==
2
*
2
*
3
*
sizeof
(
float
));
}
TEST_CASE
(
test_shape_dynamic_not_fixed
)
{
using
migraphx
::
shape
;
std
::
vector
<
shape
::
dynamic_dimension
>
dims
=
{};
dims
.
push_back
(
shape
::
dynamic_dimension
{
2
,
5
,
2
});
dims
.
push_back
(
shape
::
dynamic_dimension
{
2
,
8
,
0
});
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
dims
};
EXPECT
(
not
s
.
standard
());
EXPECT
(
not
s
.
packed
());
EXPECT
(
not
s
.
transposed
());
EXPECT
(
not
s
.
broadcasted
());
EXPECT
(
s
.
dynamic
());
EXPECT
(
s
.
dyn_dims
().
size
()
==
2
);
EXPECT
(
not
s
.
dyn_dims
().
at
(
0
).
is_fixed
());
EXPECT
(
s
.
dyn_dims
().
at
(
0
).
has_optimal
());
EXPECT
(
s
.
min_lens
()
==
std
::
vector
<
std
::
size_t
>
{
2
,
2
});
EXPECT
(
s
.
max_lens
()
==
std
::
vector
<
std
::
size_t
>
{
5
,
8
});
EXPECT
(
s
.
opt_lens
()
==
std
::
vector
<
std
::
size_t
>
{
2
,
0
});
EXPECT
(
s
.
bytes
()
==
5
*
8
*
sizeof
(
float
));
}
TEST_CASE
(
test_shape_dynamic_compares
)
{
using
migraphx
::
shape
;
auto
a
=
shape
::
dynamic_dimension
{
2
,
5
,
2
};
auto
b
=
a
;
auto
c
=
shape
::
dynamic_dimension
{
2
,
5
,
2
};
auto
d
=
shape
::
dynamic_dimension
{
3
,
8
,
4
};
EXPECT
(
a
==
b
);
EXPECT
(
a
==
c
);
EXPECT
(
a
!=
d
);
migraphx
::
shape
s0
{
shape
::
float_type
,
{
a
,
d
}};
migraphx
::
shape
s1
=
s0
;
migraphx
::
shape
s2
{
shape
::
float_type
,
{
a
,
d
}};
migraphx
::
shape
s3
{
shape
::
int32_type
,
{
a
}};
EXPECT
(
s0
==
s1
);
EXPECT
(
s0
==
s2
);
EXPECT
(
s0
!=
s3
);
std
::
stringstream
ss0
;
std
::
stringstream
ss1
;
std
::
stringstream
ss3
;
ss0
<<
s0
;
ss1
<<
s1
;
ss3
<<
s3
;
EXPECT
(
ss0
.
str
()
==
ss1
.
str
());
EXPECT
(
ss0
.
str
()
!=
ss3
.
str
());
}
TEST_CASE
(
test_shape_dynamic_errors
)
{
using
migraphx
::
shape
;
std
::
vector
<
shape
::
dynamic_dimension
>
dims
=
{};
dims
.
push_back
(
shape
::
dynamic_dimension
{
2
,
5
,
2
});
dims
.
push_back
(
shape
::
dynamic_dimension
{
2
,
8
,
0
});
migraphx
::
shape
s
{
shape
::
float_type
,
dims
};
EXPECT
(
test
::
throws
([
&
]
{
s
.
elements
();
}));
EXPECT
(
test
::
throws
([
&
]
{
s
.
index
({
0
,
1
});
}));
EXPECT
(
test
::
throws
([
&
]
{
s
.
index
(
1
);
}));
EXPECT
(
test
::
throws
([
&
]
{
s
.
index
(
std
::
vector
<
std
::
size_t
>
{
0
,
1
});
}));
EXPECT
(
test
::
throws
([
&
]
{
s
.
with_lens
({
3
,
5
});
}));
EXPECT
(
test
::
throws
([
&
]
{
s
.
with_lens
(
shape
::
float_type
,
{
3
,
5
});
}));
}
TEST_CASE
(
test_shape_dynamic_serialize
)
{
using
migraphx
::
shape
;
std
::
vector
<
shape
::
dynamic_dimension
>
dims1
=
{};
dims1
.
push_back
(
shape
::
dynamic_dimension
{
2
,
5
,
2
});
dims1
.
push_back
(
shape
::
dynamic_dimension
{
2
,
8
,
0
});
migraphx
::
shape
s1
{
shape
::
float_type
,
dims1
};
auto
v1
=
migraphx
::
to_value
(
s1
);
std
::
vector
<
shape
::
dynamic_dimension
>
dims2
=
{};
dims2
.
push_back
(
shape
::
dynamic_dimension
{
2
,
5
,
2
});
migraphx
::
shape
s2
{
shape
::
uint64_type
,
dims2
};
auto
v2
=
migraphx
::
to_value
(
s2
);
EXPECT
(
v1
!=
v2
);
auto
s3
=
migraphx
::
from_value
<
shape
>
(
v1
);
EXPECT
(
s3
==
s1
);
auto
s4
=
migraphx
::
from_value
<
shape
>
(
v2
);
EXPECT
(
s4
==
s2
);
EXPECT
(
s3
!=
s4
);
}
TEST_CASE
(
test_shape_packed
)
TEST_CASE
(
test_shape_packed
)
{
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
2
},
{
2
,
1
}};
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
2
},
{
2
,
1
}};
...
...
test/simplify_reshapes_test.cpp
View file @
fd3252dc
...
@@ -1141,4 +1141,138 @@ TEST_CASE(transpose_contiguous_reshape_binary_broadcast)
...
@@ -1141,4 +1141,138 @@ TEST_CASE(transpose_contiguous_reshape_binary_broadcast)
EXPECT
(
m1
==
m2
);
EXPECT
(
m1
==
m2
);
}
}
TEST_CASE
(
transpose_unsqueeze_concat
)
{
migraphx
::
module
m1
;
{
auto
l0
=
m1
.
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
1
,
1
}});
auto
lt0
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
3
,
1
}}}),
l0
);
auto
l1
=
m1
.
add_parameter
(
"1"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
1
,
1
}});
auto
lt1
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
3
,
1
}}}),
l1
);
auto
l2
=
m1
.
add_parameter
(
"2"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
1
,
1
}});
auto
lt2
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
3
,
1
}}}),
l2
);
std
::
vector
<
migraphx
::
instruction_ref
>
args
{
lt0
,
lt1
,
lt2
};
std
::
vector
<
migraphx
::
instruction_ref
>
unsqueezed_args
;
int64_t
axis
=
3
;
std
::
transform
(
args
.
begin
(),
args
.
end
(),
std
::
back_inserter
(
unsqueezed_args
),
[
&
](
migraphx
::
instruction_ref
arg
)
{
return
m1
.
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
axis
}}}),
arg
);
});
m1
.
add_instruction
(
migraphx
::
make_op
(
"concat"
,
{{
"axis"
,
axis
}}),
unsqueezed_args
);
}
// TODO: This could be simplified to a single transpose after concat
migraphx
::
module
m2
=
m1
;
run_pass
(
m1
);
EXPECT
(
m1
==
m2
);
}
TEST_CASE
(
transpose_slice
)
{
migraphx
::
module
m1
;
{
auto
x
=
m1
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
384
,
36
,
64
}});
auto
slice1
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
2
}},
{
"starts"
,
{
0
}},
{
"ends"
,
{
12
}}}),
x
);
auto
transpose1
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
1
,
3
}}}),
slice1
);
auto
slice2
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
2
}},
{
"starts"
,
{
12
}},
{
"ends"
,
{
24
}}}),
x
);
auto
transpose2
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
1
,
3
}}}),
slice2
);
auto
slice3
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
2
}},
{
"starts"
,
{
24
}},
{
"ends"
,
{
36
}}}),
x
);
auto
transpose3
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
1
,
3
}}}),
slice3
);
m1
.
add_return
({
transpose1
,
transpose2
,
transpose3
});
}
run_pass
(
m1
);
migraphx
::
module
m2
;
{
auto
x
=
m2
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
384
,
36
,
64
}});
auto
transpose
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
1
,
3
}}}),
x
);
auto
slice1
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
1
}},
{
"starts"
,
{
0
}},
{
"ends"
,
{
12
}}}),
transpose
);
auto
slice2
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
1
}},
{
"starts"
,
{
12
}},
{
"ends"
,
{
24
}}}),
transpose
);
auto
slice3
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
1
}},
{
"starts"
,
{
24
}},
{
"ends"
,
{
36
}}}),
transpose
);
m2
.
add_return
({
slice1
,
slice2
,
slice3
});
}
EXPECT
(
m1
==
m2
);
}
TEST_CASE
(
transpose_slice_diff_perm
)
{
migraphx
::
module
m1
;
{
auto
x
=
m1
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
384
,
36
,
64
}});
auto
slice1
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
2
}},
{
"starts"
,
{
0
}},
{
"ends"
,
{
12
}}}),
x
);
auto
transpose1
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
1
,
3
}}}),
slice1
);
auto
slice2
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
2
}},
{
"starts"
,
{
12
}},
{
"ends"
,
{
24
}}}),
x
);
auto
transpose2
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
3
,
1
}}}),
slice2
);
auto
slice3
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
2
}},
{
"starts"
,
{
24
}},
{
"ends"
,
{
36
}}}),
x
);
auto
transpose3
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
1
,
3
}}}),
slice3
);
m1
.
add_return
({
transpose1
,
transpose2
,
transpose3
});
}
run_pass
(
m1
);
migraphx
::
module
m2
;
{
auto
x
=
m2
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
384
,
36
,
64
}});
auto
transpose
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
1
,
3
}}}),
x
);
auto
slice1
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
1
}},
{
"starts"
,
{
0
}},
{
"ends"
,
{
12
}}}),
transpose
);
auto
slice2
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
1
}},
{
"starts"
,
{
12
}},
{
"ends"
,
{
24
}}}),
transpose
);
auto
transpose2
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
1
,
3
,
2
}}}),
slice2
);
auto
slice3
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
1
}},
{
"starts"
,
{
24
}},
{
"ends"
,
{
36
}}}),
transpose
);
m2
.
add_return
({
slice1
,
transpose2
,
slice3
});
}
EXPECT
(
m1
==
m2
);
}
TEST_CASE
(
transpose_slice_single_transpose
)
{
migraphx
::
module
m1
;
{
auto
x
=
m1
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
384
,
36
,
64
}});
auto
slice1
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
2
}},
{
"starts"
,
{
0
}},
{
"ends"
,
{
12
}}}),
x
);
auto
sqrt1
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"sqrt"
),
slice1
);
auto
slice2
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
2
}},
{
"starts"
,
{
12
}},
{
"ends"
,
{
24
}}}),
x
);
auto
transpose
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
1
,
3
}}}),
slice2
);
auto
slice3
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
2
}},
{
"starts"
,
{
24
}},
{
"ends"
,
{
36
}}}),
x
);
auto
sqrt3
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"sqrt"
),
slice3
);
m1
.
add_return
({
sqrt1
,
transpose
,
sqrt3
});
}
migraphx
::
module
m2
=
m1
;
run_pass
(
m1
);
EXPECT
(
m1
==
m2
);
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/verify/run_verify.cpp
View file @
fd3252dc
...
@@ -30,6 +30,7 @@
...
@@ -30,6 +30,7 @@
#include <migraphx/ranges.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/load_save.hpp>
#include <migraphx/load_save.hpp>
#include <migraphx/tmp_dir.hpp>
#include <migraphx/verify_args.hpp>
#include <migraphx/verify_args.hpp>
#include <set>
#include <set>
...
@@ -57,6 +58,15 @@ std::future<typename std::result_of<Function()>::type> detach_async(Function&& f
...
@@ -57,6 +58,15 @@ std::future<typename std::result_of<Function()>::type> detach_async(Function&& f
return
std
::
async
(
std
::
launch
::
deferred
,
std
::
forward
<
Function
>
(
f
));
return
std
::
async
(
std
::
launch
::
deferred
,
std
::
forward
<
Function
>
(
f
));
}
}
inline
void
verify_load_save
(
const
migraphx
::
program
&
p
)
{
migraphx
::
tmp_dir
td
{
"migraphx_test"
};
auto
path
=
td
.
path
/
"test.mxr"
;
migraphx
::
save
(
p
,
path
.
string
());
auto
loaded
=
migraphx
::
load
(
path
.
string
());
EXPECT
(
p
==
loaded
);
}
inline
void
compile_check
(
migraphx
::
program
&
p
,
const
migraphx
::
target
&
t
,
bool
show_trace
=
false
)
inline
void
compile_check
(
migraphx
::
program
&
p
,
const
migraphx
::
target
&
t
,
bool
show_trace
=
false
)
{
{
auto
name
=
t
.
name
();
auto
name
=
t
.
name
();
...
@@ -82,6 +92,8 @@ inline void compile_check(migraphx::program& p, const migraphx::target& t, bool
...
@@ -82,6 +92,8 @@ inline void compile_check(migraphx::program& p, const migraphx::target& t, bool
throw
std
::
runtime_error
(
"Compiling program with "
+
name
+
" alters its shape"
);
throw
std
::
runtime_error
(
"Compiling program with "
+
name
+
" alters its shape"
);
}
}
}
}
if
(
t
.
name
()
!=
"ref"
)
verify_load_save
(
p
);
}
}
target_info
run_verify
::
get_target_info
(
const
std
::
string
&
name
)
const
target_info
run_verify
::
get_target_info
(
const
std
::
string
&
name
)
const
...
@@ -152,6 +164,7 @@ void run_verify::verify(const std::string& name, const migraphx::program& p) con
...
@@ -152,6 +164,7 @@ void run_verify::verify(const std::string& name, const migraphx::program& p) con
auto_print
::
set_terminate_handler
(
name
);
auto_print
::
set_terminate_handler
(
name
);
if
(
migraphx
::
enabled
(
MIGRAPHX_DUMP_TEST
{}))
if
(
migraphx
::
enabled
(
MIGRAPHX_DUMP_TEST
{}))
migraphx
::
save
(
p
,
name
+
".mxr"
);
migraphx
::
save
(
p
,
name
+
".mxr"
);
verify_load_save
(
p
);
std
::
vector
<
std
::
string
>
target_names
;
std
::
vector
<
std
::
string
>
target_names
;
for
(
const
auto
&
tname
:
migraphx
::
get_targets
())
for
(
const
auto
&
tname
:
migraphx
::
get_targets
())
{
{
...
...
test/verify/test_conv_add_relu.cpp
0 → 100644
View file @
fd3252dc
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/instruction.hpp>
struct
test_conv_add_relu
:
verify_program
<
test_conv_add_relu
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
input
=
mm
->
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}});
auto
weights
=
mm
->
add_parameter
(
"w"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}});
auto
bias_literal
=
migraphx
::
literal
{
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
}},
{
2.0
f
,
2.0
f
,
2.0
f
,
2.0
f
}};
auto
bias
=
mm
->
add_literal
(
bias_literal
);
auto
conv
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"convolution"
),
input
,
weights
);
auto
bcast_bias
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
1
},
{
"out_lens"
,
conv
->
get_shape
().
lens
()}}),
bias
);
auto
bias_add
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
conv
,
bcast_bias
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"relu"
),
bias_add
);
return
p
;
}
};
tools/include/target.hpp
View file @
fd3252dc
...
@@ -37,6 +37,8 @@
...
@@ -37,6 +37,8 @@
#include <migraphx/compile_options.hpp>
#include <migraphx/compile_options.hpp>
#include <migraphx/argument.hpp>
#include <migraphx/argument.hpp>
#include <migraphx/rank.hpp>
#include <migraphx/rank.hpp>
#include <migraphx/support_metric.hpp>
#include <migraphx/instruction_ref.hpp>
namespace
migraphx
{
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
inline
namespace
MIGRAPHX_INLINE_NS
{
...
@@ -61,6 +63,13 @@ struct target
...
@@ -61,6 +63,13 @@ struct target
* @return The context to be used during compilation and execution.
* @return The context to be used during compilation and execution.
*/
*/
context
get_context
()
const
;
context
get_context
()
const
;
/**
* @brief Check how well an instruction is supported on a target with the given metric
* @param ins Instruction to check if it's supported
* @param metric Used to define how the return value should be interpreted
* @return The value based on the chosen metric. Negative numbers mean unsupported
*/
float
is_supported
(
T
&
,
instruction_ref
ins
,
support_metric
m
)
const
;
/**
/**
* @brief copy an argument to the current target.
* @brief copy an argument to the current target.
*
*
...
@@ -105,11 +114,18 @@ argument copy_from_target(T&, const argument& arg)
...
@@ -105,11 +114,18 @@ argument copy_from_target(T&, const argument& arg)
return
arg
;
return
arg
;
}
}
template
<
class
T
>
float
target_is_supported
(
T
&
,
instruction_ref
,
support_metric
)
{
return
0
;
}
<%
<%
interface
(
'
target
'
,
interface
(
'
target
'
,
virtual
(
'
name
'
,
returns
=
'
std
::
string
'
,
const
=
True
),
virtual
(
'
name
'
,
returns
=
'
std
::
string
'
,
const
=
True
),
virtual
(
'
get_passes
'
,
ctx
=
'
context
&
'
,
options
=
'
const
compile_options
&
'
,
returns
=
'
std
::
vector
<
pass
>
'
,
const
=
True
),
virtual
(
'
get_passes
'
,
ctx
=
'
context
&
'
,
options
=
'
const
compile_options
&
'
,
returns
=
'
std
::
vector
<
pass
>
'
,
const
=
True
),
virtual
(
'
get_context
'
,
returns
=
'
context
'
,
const
=
True
),
virtual
(
'
get_context
'
,
returns
=
'
context
'
,
const
=
True
),
virtual
(
'
is_supported
'
,
returns
=
'
float
'
,
ins
=
'
instruction_ref
'
,
m
=
'
support_metric
'
,
const
=
True
,
default
=
'
target_is_supported
'
),
virtual
(
'
copy_to
'
,
virtual
(
'
copy_to
'
,
returns
=
'
argument
'
,
returns
=
'
argument
'
,
input
=
'
const
argument
&
'
,
input
=
'
const
argument
&
'
,
...
...
Prev
1
2
3
4
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment