Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
fd3252dc
Unverified
Commit
fd3252dc
authored
Jul 08, 2022
by
Umang Yadav
Committed by
GitHub
Jul 08, 2022
Browse files
Merge branch 'develop' into dot-add
parents
56615a84
8192f37f
Changes
61
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
1556 additions
and
351 deletions
+1556
-351
src/targets/gpu/kernels/include/migraphx/kernels/functional.hpp
...rgets/gpu/kernels/include/migraphx/kernels/functional.hpp
+1
-1
src/targets/gpu/kernels/include/migraphx/kernels/reduce.hpp
src/targets/gpu/kernels/include/migraphx/kernels/reduce.hpp
+34
-0
src/targets/gpu/kernels/include/migraphx/kernels/shape.hpp
src/targets/gpu/kernels/include/migraphx/kernels/shape.hpp
+1
-0
src/targets/gpu/kernels/include/migraphx/kernels/softmax.hpp
src/targets/gpu/kernels/include/migraphx/kernels/softmax.hpp
+45
-0
src/targets/gpu/kernels/include/migraphx/kernels/vec.hpp
src/targets/gpu/kernels/include/migraphx/kernels/vec.hpp
+2
-0
src/targets/gpu/lowering.cpp
src/targets/gpu/lowering.cpp
+2
-3
src/targets/gpu/mlir.cpp
src/targets/gpu/mlir.cpp
+647
-0
src/targets/gpu/mlir_conv.cpp
src/targets/gpu/mlir_conv.cpp
+0
-315
src/targets/gpu/quant_convolution.cpp
src/targets/gpu/quant_convolution.cpp
+61
-21
src/targets/gpu/target.cpp
src/targets/gpu/target.cpp
+3
-2
test/eliminate_contiguous_test.cpp
test/eliminate_contiguous_test.cpp
+20
-0
test/get_target_assignments.cpp
test/get_target_assignments.cpp
+61
-0
test/gpu/mlir.cpp
test/gpu/mlir.cpp
+194
-0
test/module_test.cpp
test/module_test.cpp
+90
-0
test/op_shape_test.cpp
test/op_shape_test.cpp
+68
-8
test/shape_test.cpp
test/shape_test.cpp
+112
-1
test/simplify_reshapes_test.cpp
test/simplify_reshapes_test.cpp
+134
-0
test/verify/run_verify.cpp
test/verify/run_verify.cpp
+13
-0
test/verify/test_conv_add_relu.cpp
test/verify/test_conv_add_relu.cpp
+52
-0
tools/include/target.hpp
tools/include/target.hpp
+16
-0
No files found.
src/targets/gpu/kernels/include/migraphx/kernels/functional.hpp
View file @
fd3252dc
...
...
@@ -24,7 +24,7 @@
#ifndef MIGRAPHX_GUARD_KERNELS_FUNCTIONAL_HPP
#define MIGRAPHX_GUARD_KERNELS_FUNCTIONAL_HPP
#include <migraphx/kernels/
array
.hpp>
#include <migraphx/kernels/
integral_constant
.hpp>
// NOLINTNEXTLINE
#define MIGRAPHX_RETURNS(...) \
...
...
src/targets/gpu/kernels/include/migraphx/kernels/reduce.hpp
View file @
fd3252dc
...
...
@@ -175,6 +175,21 @@ constexpr auto sliced(Slicer slicer, F f)
};
}
template
<
class
Input
,
index_int
Axis
>
constexpr
auto
compute_reduce_axis
()
{
constexpr
auto
lens
=
transform_i
(
get_shape_c
<
Input
>
{}.
lens
,
[](
index_int
x
,
index_int
i
)
->
index_int
{
if
(
i
==
Axis
)
return
1
;
return
x
;
});
return
make_shape
(
lens
,
get_shape_c
<
Input
>
{}.
strides
);
}
template
<
class
Input
,
index_int
Axis
>
using
with_axis
=
decltype
(
compute_reduce_axis
<
Input
,
Axis
>
());
struct
block
{
template
<
class
Slicer
>
...
...
@@ -201,6 +216,14 @@ struct block
if
(
idx
.
local
==
0
)
f
();
}
template
<
class
F
>
__device__
auto
inner
(
F
f
)
const
{
return
sliced
(
slicer
,
[
=
](
auto
x
,
auto
...
xs
)
{
idx
.
local_stride
(
x
.
get_shape
().
elements
(),
[
&
](
auto
j
)
{
f
(
x
[
j
],
xs
[
j
]...);
});
});
}
};
template
<
class
Slicer
>
...
...
@@ -247,6 +270,17 @@ struct lane
{
f
();
}
template
<
class
F
>
__device__
auto
inner
(
F
f
)
const
{
return
sliced
(
slicer
,
[
=
](
auto
x
,
auto
...
xs
)
{
for
(
index_int
j
=
0
;
j
<
x
.
get_shape
().
elements
();
j
++
)
{
f
(
x
[
j
],
xs
[
j
]...);
}
});
}
};
template
<
class
Slicer
>
...
...
src/targets/gpu/kernels/include/migraphx/kernels/shape.hpp
View file @
fd3252dc
...
...
@@ -32,6 +32,7 @@ namespace migraphx {
template
<
class
Lens
,
class
Strides
>
struct
shape
{
using
shape_type
=
shape
;
using
index_array
=
typename
Lens
::
base_array
;
Lens
lens
=
{};
Strides
strides
=
{};
...
...
src/targets/gpu/kernels/include/migraphx/kernels/softmax.hpp
0 → 100644
View file @
fd3252dc
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_KERNELS_SOFTMAX_HPP
#define MIGRAPHX_GUARD_KERNELS_SOFTMAX_HPP
#include <migraphx/kernels/reduce.hpp>
#include <migraphx/kernels/ops.hpp>
namespace
migraphx
{
template
<
index_int
Axis
,
class
Input
,
class
Output
>
__device__
void
softmax
(
Input
input
,
Output
output
)
{
reduce
::
block
::
run
<
reduce
::
with_axis
<
Input
,
Axis
>>
([
&
](
auto
,
auto
r
)
{
auto
batch_max
=
r
.
reduce
(
op
::
max
{},
lowest
{},
op
::
id
{})(
input
);
auto
batch_sum
=
r
.
reduce
(
op
::
sum
{},
0
,
[
&
](
auto
x
)
{
return
migraphx
::
exp
(
x
-
batch_max
);
})(
input
);
r
.
inner
([
&
](
auto
&
y
,
auto
x
)
{
y
=
migraphx
::
exp
(
x
-
batch_max
)
/
batch_sum
;
})(
output
,
input
);
});
}
}
// namespace migraphx
#endif // MIGRAPHX_GUARD_KERNELS_SOFTMAX_HPP
src/targets/gpu/kernels/include/migraphx/kernels/vec.hpp
View file @
fd3252dc
...
...
@@ -27,6 +27,8 @@
#include <migraphx/kernels/types.hpp>
#include <migraphx/kernels/integral_constant.hpp>
#include <migraphx/kernels/functional.hpp>
#include <migraphx/kernels/type_traits.hpp>
#include <migraphx/kernels/debug.hpp>
namespace
migraphx
{
...
...
src/targets/gpu/lowering.cpp
View file @
fd3252dc
...
...
@@ -186,7 +186,6 @@ struct miopen_apply
add_extend_op
(
"rnn_var_sl_shift_output"
);
add_extend_op
(
"rnn_var_sl_shift_sequence"
);
add_extend_op
(
"scatter_none"
);
add_extend_op
(
"softmax"
);
add_extend_op
(
"topk"
);
add_batch_norm_inference_op
();
...
...
@@ -301,7 +300,7 @@ struct miopen_apply
auto
&&
op
=
any_cast
<
op
::
deconvolution
>
(
ins
->
get_operator
());
auto
conv
=
miopen_deconvolution
{
op
,
make_deconv
(
op
)};
auto
ws
=
conv
.
compile
(
get_context
(),
ins
->
get_shape
(),
to_shapes
(
ins
->
inputs
()));
auto
ws
=
conv
.
find
(
get_context
(),
ins
->
get_shape
(),
to_shapes
(
ins
->
inputs
()));
auto
workspace
=
insert_allocation
(
ins
,
ws
);
auto
output
=
insert_allocation
(
ins
,
ins
->
get_shape
());
...
...
@@ -332,7 +331,7 @@ struct miopen_apply
miopen_quant_convolution
conv
;
auto
compile_quant_conv_with_format
=
[
&
](
bool
format
)
{
conv
=
miopen_quant_convolution
{
op
,
format
,
make_conv
(
op
)};
ws
=
conv
.
compile
(
get_context
(),
ins
->
get_shape
(),
to_shapes
(
ins
->
inputs
()));
ws
=
conv
.
find
(
get_context
(),
ins
->
get_shape
(),
to_shapes
(
ins
->
inputs
()));
};
try
...
...
src/targets/gpu/mlir.cpp
0 → 100644
View file @
fd3252dc
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/gpu/mlir.hpp>
#ifdef MIGRAPHX_MLIR
#include <mlir-c/IR.h>
#include <mlir-c/BuiltinAttributes.h>
#include <mlir-c/BuiltinTypes.h>
#include <mlir-c/Diagnostics.h>
#include <mlir-c/Dialect/MIGraphX.h>
#include <mlir-c/IntegerSet.h>
#include <mlir-c/Pass.h>
#include <mlir-c/Registration.h>
#endif
#include <migraphx/env.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/module.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/config.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/gpu/code_object_op.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/device_name.hpp>
#include <migraphx/iterator_for.hpp>
#include <deque>
#include <variant>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
MIGRAPHX_DECLARE_ENV_VAR
(
MIGRAPHX_TRACE_MLIR
);
#ifdef MIGRAPHX_MLIR
template
<
class
T
,
class
F
,
F
f
>
// NOLINT
struct
mlir_handle
{
struct
ptr
{
ptr
()
=
default
;
ptr
(
std
::
nullptr_t
)
{}
ptr
(
T
x
)
:
obj
(
x
)
{}
std
::
intptr_t
get_value
()
const
{
static_assert
(
sizeof
(
T
)
==
sizeof
(
std
::
intptr_t
),
"MLIR Handle different size"
);
return
reinterpret_cast
<
const
std
::
intptr_t
&>
(
obj
);
}
T
get
()
const
{
return
obj
;
}
friend
bool
operator
==
(
ptr
x
,
ptr
y
)
{
return
x
.
get_value
()
==
y
.
get_value
();
}
friend
bool
operator
!=
(
ptr
x
,
ptr
y
)
{
return
!
(
x
==
y
);
}
T
obj
{};
};
struct
deleter
{
using
pointer
=
ptr
;
void
operator
()(
pointer
x
)
const
{
if
(
x
!=
nullptr
)
{
(
void
)
f
(
x
.
obj
);
}
}
};
mlir_handle
()
:
handle
(
nullptr
)
{}
mlir_handle
(
T
p
)
:
handle
(
ptr
{
p
})
{}
T
get
()
const
{
return
handle
.
get
().
get
();
}
T
release
()
{
return
handle
.
release
().
get
();
}
private:
std
::
unique_ptr
<
ptr
,
deleter
>
handle
;
};
#define MIGRAPHX_MANAGE_MLIR_HANDLE(T, F) migraphx::gpu::mlir_handle<T, decltype(&F), &F> // NOLINT
using
mlir_context
=
MIGRAPHX_MANAGE_MLIR_HANDLE
(
MlirContext
,
mlirContextDestroy
);
using
mlir_module
=
MIGRAPHX_MANAGE_MLIR_HANDLE
(
MlirModule
,
mlirModuleDestroy
);
using
mlir_operation
=
MIGRAPHX_MANAGE_MLIR_HANDLE
(
MlirOperation
,
mlirOperationDestroy
);
using
mlir_op_printing_flags
=
MIGRAPHX_MANAGE_MLIR_HANDLE
(
MlirOpPrintingFlags
,
mlirOpPrintingFlagsDestroy
);
using
mlir_region
=
MIGRAPHX_MANAGE_MLIR_HANDLE
(
MlirRegion
,
mlirRegionDestroy
);
using
mlir_block
=
MIGRAPHX_MANAGE_MLIR_HANDLE
(
MlirBlock
,
mlirBlockDestroy
);
using
mlir_pass_manager
=
MIGRAPHX_MANAGE_MLIR_HANDLE
(
MlirPassManager
,
mlirPassManagerDestroy
);
std
::
string_view
to_string_view
(
MlirStringRef
s
)
{
return
{
s
.
data
,
s
.
length
};
}
MlirStringRef
make_mlir_string_ref
(
const
std
::
string_view
&
s
)
{
return
mlirStringRefCreate
(
s
.
data
(),
s
.
size
());
}
template
<
class
F
,
class
T
,
class
Printer
>
void
mlir_print
(
F
f
,
T
x
,
Printer
printer
)
{
f
(
x
,
+
[](
MlirStringRef
s
,
void
*
data
)
{
(
*
reinterpret_cast
<
Printer
*>
(
data
))(
to_string_view
(
s
));
},
&
printer
);
}
template
<
class
F
,
class
T
>
void
mlir_print
(
F
f
,
T
x
,
std
::
ostream
&
os
)
{
mlir_print
(
f
,
x
,
[
&
](
auto
s
)
{
os
<<
s
;
});
}
template
<
class
F
,
class
T
>
std
::
string
mlir_print
(
F
f
,
T
x
)
{
std
::
stringstream
ss
;
mlir_print
(
f
,
x
,
[
&
](
auto
s
)
{
ss
<<
s
;
});
return
ss
.
str
();
}
struct
mlir_program
{
mlir_program
()
:
ctx
(
mlirContextCreate
()),
location
(
mlirLocationUnknownGet
(
ctx
.
get
())),
mmodule
(
mlirModuleCreateEmpty
(
location
))
{
MlirDialectHandle
mixr_handle
=
mlirGetDialectHandle__migraphx__
();
mlirDialectHandleRegisterDialect
(
mixr_handle
,
ctx
.
get
());
mlirRegisterAllDialects
(
ctx
.
get
());
mlirContextSetAllowUnregisteredDialects
(
ctx
.
get
(),
true
/*allow*/
);
}
MlirType
make_type
(
shape
::
type_t
t
)
const
{
MlirType
result
;
shape
::
visit
(
t
,
[
&
](
auto
as
)
{
if
(
as
.
type_enum
()
==
shape
::
float_type
)
result
=
mlirF32TypeGet
(
ctx
.
get
());
else
if
(
as
.
type_enum
()
==
shape
::
half_type
)
result
=
mlirF16TypeGet
(
ctx
.
get
());
else
if
(
as
.
type_enum
()
==
shape
::
double_type
)
result
=
mlirF64TypeGet
(
ctx
.
get
());
else
if
(
as
.
is_integral
())
{
if
(
as
.
is_signed
())
result
=
mlirIntegerTypeSignedGet
(
ctx
.
get
(),
as
.
size
()
*
8
);
else
result
=
mlirIntegerTypeGet
(
ctx
.
get
(),
as
.
size
()
*
8
);
}
else
MIGRAPHX_THROW
(
"Unsupported type: "
+
std
::
to_string
(
as
.
type_enum
()));
});
return
result
;
}
MlirType
make_tensor
(
const
shape
&
s
)
const
{
assert
(
s
.
standard
());
std
::
vector
<
int64_t
>
lens
(
s
.
lens
().
begin
(),
s
.
lens
().
end
());
return
mlirRankedTensorTypeGet
(
lens
.
size
(),
lens
.
data
(),
make_type
(
s
.
type
()),
mlirAttributeGetNull
());
}
template
<
class
Range
>
std
::
vector
<
MlirType
>
make_tensors
(
const
Range
&
r
)
{
std
::
vector
<
MlirType
>
result
;
std
::
transform
(
r
.
begin
(),
r
.
end
(),
std
::
back_inserter
(
result
),
[
&
](
const
auto
&
s
)
{
return
make_tensor
(
s
);
});
return
result
;
}
MlirType
make_function_type
(
const
std
::
vector
<
shape
>&
inputs
,
const
std
::
vector
<
shape
>&
outputs
)
{
auto
in
=
make_tensors
(
inputs
);
auto
out
=
make_tensors
(
outputs
);
return
mlirFunctionTypeGet
(
ctx
.
get
(),
in
.
size
(),
in
.
data
(),
out
.
size
(),
out
.
data
());
}
MlirIdentifier
id
(
const
std
::
string_view
&
s
)
const
{
return
mlirIdentifierGet
(
ctx
.
get
(),
make_mlir_string_ref
(
s
));
}
MlirAttribute
attribute
(
std
::
int64_t
i
)
const
{
if
(
i
<
0
)
MIGRAPHX_THROW
(
"MLIR cant handle negative values since they are ambiguous"
);
return
mlirIntegerAttrGet
(
mlirIntegerTypeGet
(
ctx
.
get
(),
64
),
i
);
}
MlirAttribute
attribute
(
std
::
uint64_t
i
)
const
{
if
(
i
>
(
std
::
numeric_limits
<
std
::
uint64_t
>::
max
()
/
2
))
MIGRAPHX_THROW
(
"MLIR cant handle large integer values since they are ambiguous"
);
return
mlirIntegerAttrGet
(
mlirIntegerTypeGet
(
ctx
.
get
(),
64
),
i
);
}
MlirAttribute
attribute
(
unsigned
char
i
)
const
{
return
attribute
(
std
::
uint64_t
(
i
));
}
MlirAttribute
attribute
(
bool
b
)
const
{
return
mlirBoolAttrGet
(
ctx
.
get
(),
b
?
1
:
0
);
}
MlirAttribute
attribute
(
double
d
)
const
{
return
mlirFloatAttrDoubleGet
(
ctx
.
get
(),
mlirF64TypeGet
(
ctx
.
get
()),
d
);
}
MlirAttribute
attribute
(
const
std
::
string
&
s
)
const
{
return
mlirStringAttrGet
(
ctx
.
get
(),
make_mlir_string_ref
(
s
));
}
MlirAttribute
attribute
(
std
::
nullptr_t
)
const
{
return
{};
}
template
<
class
T
>
MlirAttribute
attribute
(
const
std
::
vector
<
T
>&
v
)
const
{
std
::
vector
<
MlirAttribute
>
attributes
;
attributes
.
reserve
(
v
.
size
());
std
::
transform
(
v
.
begin
(),
v
.
end
(),
std
::
back_inserter
(
attributes
),
[
&
](
auto
&&
x
)
{
return
attribute
(
x
);
});
return
mlirArrayAttrGet
(
ctx
.
get
(),
attributes
.
size
(),
attributes
.
data
());
}
MlirAttribute
attribute
(
const
value
&
v
)
const
{
MlirAttribute
attr
;
v
.
visit_value
([
&
](
auto
&&
x
)
{
attr
=
attribute
(
x
);
});
return
attr
;
}
MlirAttribute
attribute
(
const
std
::
vector
<
value
>&
v
)
const
{
if
(
v
.
empty
())
{
return
mlirArrayAttrGet
(
ctx
.
get
(),
0
,
nullptr
);
}
if
(
not
v
.
front
().
get_key
().
empty
())
{
std
::
vector
<
MlirNamedAttribute
>
attributes
=
name_attributes
(
v
);
return
mlirDictionaryAttrGet
(
ctx
.
get
(),
attributes
.
size
(),
attributes
.
data
());
}
else
{
std
::
vector
<
MlirAttribute
>
attributes
;
attributes
.
reserve
(
v
.
size
());
std
::
transform
(
v
.
begin
(),
v
.
end
(),
std
::
back_inserter
(
attributes
),
[
&
](
auto
&&
x
)
{
return
attribute
(
x
);
});
return
mlirArrayAttrGet
(
ctx
.
get
(),
attributes
.
size
(),
attributes
.
data
());
}
}
MlirAttribute
attribute
(
MlirType
t
)
const
{
return
mlirTypeAttrGet
(
t
);
}
MlirAttribute
attribute
(
MlirAttribute
a
)
const
{
return
a
;
}
template
<
class
T
>
MlirNamedAttribute
name_attribute
(
const
std
::
string_view
&
key
,
const
T
&
x
)
const
{
MlirNamedAttribute
attr
;
attr
.
name
=
id
(
key
);
attr
.
attribute
=
attribute
(
x
);
return
attr
;
}
using
attribute_t
=
std
::
variant
<
std
::
nullptr_t
,
std
::
uint64_t
,
unsigned
char
,
bool
,
double
,
std
::
string
,
value
,
std
::
vector
<
value
>
,
MlirType
>
;
using
named_attribute_t
=
std
::
pair
<
std
::
string_view
,
attribute_t
>
;
MlirNamedAttribute
name_attribute
(
const
named_attribute_t
&
na
)
const
{
return
name_attribute
(
na
.
first
,
std
::
visit
([
&
](
const
auto
&
x
)
{
return
attribute
(
x
);
},
na
.
second
));
}
std
::
vector
<
MlirNamedAttribute
>
name_attributes
(
const
std
::
vector
<
named_attribute_t
>&
named_attrs
)
const
{
std
::
vector
<
MlirNamedAttribute
>
attributes
;
attributes
.
reserve
(
named_attrs
.
size
());
std
::
transform
(
named_attrs
.
begin
(),
named_attrs
.
end
(),
std
::
back_inserter
(
attributes
),
[
&
](
const
named_attribute_t
&
a
)
{
return
name_attribute
(
a
);
});
return
attributes
;
}
std
::
vector
<
MlirNamedAttribute
>
name_attributes
(
const
value
&
v
)
const
{
std
::
vector
<
MlirNamedAttribute
>
attributes
;
attributes
.
reserve
(
v
.
size
());
std
::
transform
(
v
.
begin
(),
v
.
end
(),
std
::
back_inserter
(
attributes
),
[
&
](
const
value
&
x
)
{
return
name_attribute
(
x
.
get_key
(),
x
.
without_key
());
});
return
attributes
;
}
struct
mlir_operation_state
{
mlir_operation_state
(
mlir_program
&
p
,
const
std
::
string_view
&
name
)
:
prog
(
&
p
),
op_state
(
mlirOperationStateGet
(
make_mlir_string_ref
(
name
),
p
.
location
))
{
}
mlir_operation_state
&
add_attributes
(
const
std
::
vector
<
named_attribute_t
>&
named_attrs
)
{
auto
attributes
=
prog
->
name_attributes
(
named_attrs
);
mlirOperationStateAddAttributes
(
&
op_state
,
attributes
.
size
(),
attributes
.
data
());
return
*
this
;
}
mlir_operation_state
&
add_attribute_value
(
const
value
&
v
)
{
auto
attributes
=
prog
->
name_attributes
(
v
);
mlirOperationStateAddAttributes
(
&
op_state
,
attributes
.
size
(),
attributes
.
data
());
return
*
this
;
}
mlir_operation_state
&
add_regions
(
std
::
vector
<
mlir_region
>
rs
)
{
regions
=
std
::
move
(
rs
);
return
*
this
;
}
mlir_operation_state
&
add_region
(
mlir_region
r
)
{
regions
.
emplace_back
(
std
::
move
(
r
));
return
*
this
;
}
mlir_operation_state
&
add_results
(
const
std
::
vector
<
shape
>&
outputs
)
{
auto
x
=
prog
->
make_tensors
(
outputs
);
mlirOperationStateAddResults
(
&
op_state
,
x
.
size
(),
x
.
data
());
return
*
this
;
}
mlir_operation_state
&
add_operands
(
const
std
::
vector
<
MlirValue
>&
inputs
)
{
mlirOperationStateAddOperands
(
&
op_state
,
inputs
.
size
(),
inputs
.
data
());
return
*
this
;
}
mlir_operation
create_operation
()
{
std
::
vector
<
MlirRegion
>
mregions
(
regions
.
size
());
std
::
transform
(
regions
.
begin
(),
regions
.
end
(),
mregions
.
begin
(),
[](
const
auto
&
r
)
{
return
r
.
get
();
});
mlirOperationStateAddOwnedRegions
(
&
op_state
,
mregions
.
size
(),
mregions
.
data
());
mlir_operation
op
(
mlirOperationCreate
(
&
op_state
));
// Release memory since mlir_operation owns it
for
(
auto
&
r
:
regions
)
r
.
release
();
regions
.
clear
();
return
op
;
}
mlir_program
*
prog
;
MlirOperationState
op_state
;
std
::
vector
<
mlir_region
>
regions
=
{};
};
mlir_operation_state
create_operation_state
(
const
std
::
string_view
&
name
)
{
return
{
*
this
,
name
};
}
std
::
vector
<
MlirValue
>
insert
(
MlirBlock
body
,
mlir_operation_state
ops
)
{
std
::
vector
<
MlirValue
>
result
;
mlir_operation
op
=
ops
.
create_operation
();
auto
weak_op
=
op
.
get
();
mlirBlockAppendOwnedOperation
(
body
,
op
.
release
());
auto
n
=
mlirOperationGetNumResults
(
weak_op
);
result
.
reserve
(
n
);
transform
(
range
(
n
),
std
::
back_inserter
(
result
),
[
&
](
auto
i
)
{
return
mlirOperationGetResult
(
weak_op
,
i
);
});
return
result
;
}
MlirBlock
insert
(
MlirBlock
body
,
const
module
&
m
,
std
::
unordered_map
<
instruction_ref
,
MlirValue
>&
ins_map
)
{
auto
names
=
m
.
get_parameter_names
();
std
::
sort
(
names
.
begin
(),
names
.
end
());
std
::
vector
<
shape
>
inputs
;
std
::
transform
(
names
.
begin
(),
names
.
end
(),
std
::
back_inserter
(
inputs
),
[
&
](
const
std
::
string
&
name
)
{
return
m
.
get_parameter_shape
(
name
);
});
std
::
vector
<
shape
>
outputs
=
m
.
get_output_shapes
();
std
::
vector
<
MlirLocation
>
arg_locs
(
inputs
.
size
(),
location
);
auto
body_inputs
=
make_tensors
(
inputs
);
mlir_region
region
=
mlirRegionCreate
();
mlir_block
fbody
=
mlirBlockCreate
(
body_inputs
.
size
(),
body_inputs
.
data
(),
arg_locs
.
data
());
MlirBlock
result
=
fbody
.
get
();
mlirRegionAppendOwnedBlock
(
region
.
get
(),
fbody
.
release
());
auto
ops
=
create_operation_state
(
"func.func"
);
ops
.
add_attributes
({{
"function_type"
,
make_function_type
(
inputs
,
outputs
)},
{
"sym_name"
,
std
::
string
(
"main"
)},
{
"kernel"
,
std
::
string
(
"mixr"
)}});
ops
.
add_region
(
std
::
move
(
region
));
insert
(
body
,
std
::
move
(
ops
));
for
(
auto
i
:
range
(
names
.
size
()))
ins_map
[
m
.
get_parameter
(
names
[
i
])]
=
mlirBlockGetArgument
(
result
,
i
);
return
result
;
}
static
std
::
string
get_name
(
instruction_ref
ins
)
{
if
(
ins
->
name
()
==
"@return"
)
return
"func.return"
;
return
"migraphx."
+
ins
->
name
();
}
static
value
get_operator_value
(
const
operation
&
op
)
{
auto
v
=
op
.
to_value
();
if
(
op
.
name
()
==
"convolution"
)
{
// Adjust symetrical padding
if
(
v
.
at
(
"padding"
).
size
()
==
v
.
at
(
"stride"
).
size
())
{
auto
padding
=
v
.
at
(
"padding"
);
std
::
copy
(
padding
.
begin
(),
padding
.
end
(),
std
::
back_inserter
(
v
.
at
(
"padding"
)));
}
}
return
v
;
}
static
shape
get_shape
(
instruction_ref
ins
)
{
if
(
ins
->
name
()
==
"@return"
)
{
assert
(
ins
->
inputs
().
size
()
==
1
);
return
ins
->
inputs
().
front
()
->
get_shape
();
}
return
ins
->
get_shape
();
}
void
parse
(
const
module
&
m
)
{
auto
mbody
=
mlirModuleGetBody
(
mmodule
.
get
());
std
::
unordered_map
<
instruction_ref
,
MlirValue
>
ins_map
;
auto
fbody
=
insert
(
mbody
,
m
,
ins_map
);
for
(
auto
ins
:
iterator_for
(
m
))
{
if
(
ins
->
name
()
==
"@param"
)
continue
;
auto
name
=
get_name
(
ins
);
auto
ops
=
create_operation_state
(
name
);
ops
.
add_attribute_value
(
get_operator_value
(
ins
->
get_operator
()));
if
(
ins
->
name
()
!=
"@return"
)
ops
.
add_results
({
get_shape
(
ins
)});
std
::
vector
<
MlirValue
>
inputs
;
transform
(
ins
->
inputs
(),
std
::
back_inserter
(
inputs
),
[
&
](
auto
i
)
{
return
ins_map
.
at
(
i
);
});
ops
.
add_operands
(
inputs
);
auto
outputs
=
insert
(
fbody
,
std
::
move
(
ops
));
if
(
ins
->
name
()
!=
"@return"
)
{
assert
(
outputs
.
size
()
==
1
);
ins_map
[
ins
]
=
outputs
.
front
();
}
}
}
code_object_op
compile
()
MIGRAPHX_TIDY_CONST
{
mlir_pass_manager
pm
{
mlirPassManagerCreate
(
ctx
.
get
())};
// 1st pipeline to call
mlirMIGraphXAddHighLevelPipeline
(
pm
.
get
());
// 2nd pipeline to call
std
::
string
tname
=
get_device_name
();
// HACK: Since MLIR can't handle the full target name
auto
hacked_tname
=
tname
.
substr
(
0
,
tname
.
find
(
':'
));
if
(
tname
.
size
()
!=
hacked_tname
.
size
())
std
::
cout
<<
"*************** WARNING: MLIR may not compile the correct target features for: "
<<
tname
<<
std
::
endl
;
mlirMIGraphXAddBackendPipeline
(
pm
.
get
(),
hacked_tname
.
c_str
(),
"amdgcn-amd-amdhsa"
,
""
);
mlirPassManagerRun
(
pm
.
get
(),
mmodule
.
get
());
code_object_op
op
{};
op
.
symbol_name
=
"main"
;
op
.
code_object
=
get_binary
();
std
::
tie
(
op
.
global
,
op
.
local
)
=
get_launch_params
();
return
op
;
}
std
::
pair
<
std
::
size_t
,
std
::
size_t
>
get_launch_params
()
const
{
uint32_t
attrs
[
2
];
// returns block and grid sizes
mlirGetKernelAttrs
(
mmodule
.
get
(),
attrs
);
std
::
size_t
local
=
attrs
[
0
];
std
::
size_t
global
=
local
*
attrs
[
1
];
return
{
global
,
local
};
}
value
::
binary
get_binary
()
const
{
int
size
=
0
;
mlirGetBinary
(
mmodule
.
get
(),
&
size
,
nullptr
);
value
::
binary
result
(
size
);
if
(
mlirGetBinary
(
mmodule
.
get
(),
&
size
,
reinterpret_cast
<
char
*>
(
result
.
data
())))
return
result
;
MIGRAPHX_THROW
(
"Failed to compile mlir program"
);
}
mlir_context
ctx
;
MlirLocation
location
;
mlir_module
mmodule
;
std
::
deque
<
std
::
string
>
strings
{};
};
std
::
string
dump_mlir
(
const
module
&
m
)
{
mlir_program
mp
;
mp
.
parse
(
m
);
auto
mod_op
=
mlirModuleGetOperation
(
mp
.
mmodule
.
get
());
return
mlir_print
(
&
mlirOperationPrint
,
mod_op
);
}
code_object_op
compile_mlir
(
const
context
&
,
const
module
&
m
)
{
const
bool
trace
=
enabled
(
MIGRAPHX_TRACE_MLIR
{});
if
(
trace
)
std
::
cout
<<
m
<<
std
::
endl
;
mlir_program
mp
;
mp
.
parse
(
m
);
auto
mod_op
=
mlirModuleGetOperation
(
mp
.
mmodule
.
get
());
if
(
trace
)
std
::
cout
<<
mlir_print
(
&
mlirOperationPrint
,
mod_op
)
<<
std
::
endl
;
auto
co
=
mp
.
compile
();
co
.
output
=
m
.
get_output_shapes
().
front
();
return
co
;
}
instruction_ref
insert_mlir
(
module
&
m
,
instruction_ref
ins
,
code_object_op
co
,
const
std
::
vector
<
instruction_ref
>&
inputs
)
{
std
::
vector
<
instruction_ref
>
refs
;
refs
.
reserve
(
inputs
.
size
()
*
15
);
std
::
unordered_map
<
uint64_t
,
instruction_ref
>
literal_map
{};
auto
get_literal
=
[
&
](
uint64_t
value
)
{
auto
fi
=
literal_map
.
find
(
value
);
if
(
fi
!=
literal_map
.
end
())
return
fi
->
second
;
auto
lit
=
m
.
add_literal
(
value
);
literal_map
.
emplace
(
value
,
lit
);
return
lit
;
};
std
::
size_t
last
=
0
;
for
(
auto
input
:
inputs
)
{
const
size_t
offset
=
0
;
auto
s
=
input
->
get_shape
();
last
=
refs
.
size
();
refs
.
push_back
(
input
);
refs
.
push_back
(
input
);
refs
.
push_back
(
get_literal
(
offset
));
// offset
// dim sizes
std
::
transform
(
s
.
lens
().
begin
(),
s
.
lens
().
end
(),
std
::
back_inserter
(
refs
),
[
&
](
const
auto
&
lval
)
{
return
get_literal
(
lval
);
});
// refs.push_back(get_literal(1)); // G
// dim strides
std
::
transform
(
s
.
strides
().
begin
(),
s
.
strides
().
end
(),
std
::
back_inserter
(
refs
),
[
&
](
const
auto
&
lval
)
{
return
get_literal
(
lval
);
});
// refs.push_back(get_literal(1)); // G
}
co
.
expected_inputs
=
to_shapes
(
refs
);
co
.
output_arg
=
last
;
return
m
.
insert_instruction
(
ins
,
co
,
refs
);
}
#else
std
::
string
dump_mlir
(
const
module
&
)
{
return
{};
}
code_object_op
compile_mlir
(
const
context
&
,
const
module
&
)
{
return
{};
}
template
<
class
T
>
void
use
(
T
&
)
{
}
instruction_ref
// cppcheck-suppress funcArgNamesDifferent
insert_mlir
(
module
&
m
,
instruction_ref
,
code_object_op
co
,
const
std
::
vector
<
instruction_ref
>&
)
{
use
(
co
);
return
m
.
end
();
}
#endif
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
src/targets/gpu/mlir_conv.cpp
deleted
100644 → 0
View file @
56615a84
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/gpu/mlir_conv.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/convolution.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/convolution.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/program.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/program.hpp>
#include <migraphx/gpu/kernel.hpp>
#include <migraphx/gpu/target.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/gpu/compile_hip.hpp>
#include <utility>
#include <functional>
#include <algorithm>
#ifdef MIGRAPHX_MLIR_MIOPEN_SUPPORT
#include <Miir.h>
#endif // MIGRAPHX_MLIR_MIOPEN_SUPPORT
#include <cstdio>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
struct
mlir_apply
{
module
*
mod
=
nullptr
;
const
mlir_conv
*
pass
=
nullptr
;
const
char
*
mlir_kernel_name
=
"migraphx_conv2d"
;
std
::
unordered_map
<
uint64_t
,
instruction_ref
>
literal_map
{};
struct
execution_spec
{
migraphx
::
value
::
binary
binary
;
size_t
global_size
;
size_t
local_size
;
execution_spec
(
migraphx
::
value
::
binary
&&
binary_m
,
size_t
global_s
,
size_t
local_s
)
:
binary
(
std
::
move
(
binary_m
)),
global_size
(
global_s
),
local_size
(
local_s
)
{
}
};
std
::
unordered_map
<
std
::
string
,
std
::
shared_ptr
<
execution_spec
>>
binary_map
{};
context
&
get_context
()
const
{
assert
(
pass
!=
nullptr
);
assert
(
pass
->
ctx
!=
nullptr
);
return
*
pass
->
ctx
;
}
void
init
()
const
{
assert
(
mod
!=
nullptr
);
assert
(
pass
!=
nullptr
);
}
std
::
shared_ptr
<
execution_spec
>
make_mlir_binary
(
instruction_ref
op_r
)
{
std
::
shared_ptr
<
execution_spec
>
result
;
#ifdef MIGRAPHX_MLIR_MIOPEN_SUPPORT
auto
conv
=
any_cast
<
op
::
convolution
>
(
op_r
->
get_operator
());
auto
inp_t
=
op_r
->
inputs
().
at
(
0
)
->
get_shape
();
auto
flt_t
=
op_r
->
inputs
().
at
(
1
)
->
get_shape
();
auto
out_t
=
op_r
->
get_shape
();
auto
get_type_str
=
[](
const
shape
&
s
)
->
const
char
*
{
switch
(
s
.
type
())
{
case
shape
::
float_type
:
return
"f32"
;
case
shape
::
half_type
:
return
"f16"
;
case
shape
::
bool_type
:
case
shape
::
double_type
:
case
shape
::
uint8_type
:
case
shape
::
int8_type
:
case
shape
::
uint16_type
:
case
shape
::
int16_type
:
case
shape
::
int32_type
:
case
shape
::
int64_type
:
case
shape
::
uint32_type
:
case
shape
::
uint64_type
:
case
shape
::
tuple_type
:
break
;
}
return
nullptr
;
};
const
auto
*
inp_t_s
=
get_type_str
(
inp_t
);
const
auto
*
flt_t_s
=
get_type_str
(
flt_t
);
const
auto
*
out_t_s
=
get_type_str
(
out_t
);
if
(
out_t_s
==
nullptr
||
inp_t_s
==
nullptr
||
flt_t_s
==
nullptr
)
return
result
;
std
::
string
mlir_options
=
"--kernel_name "
+
std
::
string
(
mlir_kernel_name
);
// platform spec
auto
&
device
=
get_context
().
get_current_device
();
char
dev_name
[
64
];
sprintf
(
dev_name
,
"gfx%lu%02lu"
,
device
.
get_device_major
(),
device
.
get_device_minor
());
mlir_options
+=
" --arch "
+
std
::
string
(
dev_name
)
+
" --num_cu "
+
std
::
to_string
(
device
.
get_cu_count
());
// ???
// Conv spec
mlir_options
+=
" --operation "
"conv2d"
" --batchsize "
+
std
::
to_string
(
conv
.
group
)
+
" --groupsize "
+
std
::
to_string
(
1
)
+
" --padding_h "
+
std
::
to_string
(
conv
.
padding
[
0
])
+
" --padding_w "
+
std
::
to_string
(
conv
.
padding
[
1
])
+
" --conv_stride_h "
+
std
::
to_string
(
conv
.
stride
[
0
])
+
" --conv_stride_w "
+
std
::
to_string
(
conv
.
stride
[
1
])
+
" --dilation_h "
+
std
::
to_string
(
conv
.
dilation
[
0
])
+
" --dilation_w "
+
std
::
to_string
(
conv
.
dilation
[
1
]);
// Input spec
mlir_options
+=
" --in_layout "
"NCHWG"
" --in_type "
+
std
::
string
(
inp_t_s
)
+
" --in_channels "
+
std
::
to_string
(
inp_t
.
lens
()[
1
])
+
" --in_h "
+
std
::
to_string
(
inp_t
.
lens
()[
2
])
+
" --in_w "
+
std
::
to_string
(
inp_t
.
lens
()[
3
]);
// Filter spec
mlir_options
+=
" --fil_layout "
"NCHWG"
" --fil_type "
+
std
::
string
(
flt_t_s
)
+
" --fil_h "
+
std
::
to_string
(
flt_t
.
lens
()[
2
])
+
" --fil_w "
+
std
::
to_string
(
flt_t
.
lens
()[
3
]);
// Output spec
mlir_options
+=
" --out_layout "
"NCHWG"
" --out_type "
+
std
::
string
(
out_t_s
)
+
" --out_channels "
+
std
::
to_string
(
out_t
.
lens
()[
1
])
+
" --out_h "
+
std
::
to_string
(
out_t
.
lens
()[
2
])
+
" --out_w "
+
std
::
to_string
(
out_t
.
lens
()[
3
]);
auto
bin_i
=
binary_map
.
find
(
mlir_options
);
if
(
bin_i
==
binary_map
.
end
())
{
size_t
bin_size
=
0
;
using
mlir_handle
=
MIGRAPHX_MANAGE_PTR
(
MiirHandle
,
miirDestroyHandle
);
auto
handle
=
mlir_handle
(
miirCreateHandle
(
mlir_options
.
c_str
()));
if
(
miirLowerBin
(
handle
.
get
())
==
MIIR_SUCCESS
&&
miirBufferGet
(
handle
.
get
(),
nullptr
,
&
bin_size
)
==
MIIR_SUCCESS
)
{
migraphx
::
value
::
binary
bin
(
bin_size
);
if
(
miirBufferGet
(
handle
.
get
(),
reinterpret_cast
<
char
*>
(
bin
.
data
()),
&
bin_size
)
==
MIIR_SUCCESS
)
{
size_t
global_size
;
size_t
block_size
;
if
(
miirGetExecutionDims
(
handle
.
get
(),
&
global_size
,
&
block_size
)
==
MIIR_SUCCESS
)
{
result
=
std
::
make_shared
<
execution_spec
>
(
std
::
move
(
bin
),
global_size
,
block_size
);
}
}
}
binary_map
[
mlir_options
]
=
result
;
}
else
{
result
=
bin_i
->
second
;
}
#else // MIGRAPHX_MLIR_MIOPEN_SUPPORT
(
void
)
op_r
;
#endif // MIGRAPHX_MLIR_MIOPEN_SUPPORT
return
result
;
}
instruction_ref
get_literal
(
uint64_t
value
)
{
auto
fi
=
literal_map
.
find
(
value
);
if
(
fi
!=
literal_map
.
end
())
return
fi
->
second
;
auto
lit
=
mod
->
add_literal
(
value
);
literal_map
.
emplace
(
value
,
lit
);
return
lit
;
}
operation
make_code_object_op
(
instruction_ref
op_r
,
const
std
::
shared_ptr
<
execution_spec
>&
spec
)
{
// each pointer is expanded out to a MemRefDescriptor
auto
inp_t
=
op_r
->
inputs
().
at
(
0
)
->
get_shape
();
auto
flt_t
=
op_r
->
inputs
().
at
(
1
)
->
get_shape
();
auto
out_t
=
op_r
->
get_shape
();
auto
i64
=
shape
(
shape
::
uint64_type
);
std
::
vector
<
shape
>
expected_inputs
=
{
flt_t
,
flt_t
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
inp_t
,
inp_t
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
out_t
,
out_t
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
out_t
};
return
migraphx
::
make_op
(
"gpu::code_object"
,
{
{
"code_object"
,
spec
->
binary
},
{
"symbol_name"
,
mlir_kernel_name
},
{
"global"
,
spec
->
global_size
},
{
"local"
,
spec
->
local_size
},
{
"expected_inputs"
,
migraphx
::
to_value
(
expected_inputs
)},
{
"output"
,
migraphx
::
to_value
(
out_t
)},
});
}
void
add_memref_descriptor
(
std
::
vector
<
instruction_ref
>&
refs
,
instruction_ref
inst
)
{
const
size_t
offset
=
0
;
auto
inst_t
=
inst
->
get_shape
();
refs
.
push_back
(
inst
);
refs
.
push_back
(
inst
);
refs
.
push_back
(
get_literal
(
offset
));
// offset
// dim sizes
std
::
transform
(
inst_t
.
lens
().
begin
(),
inst_t
.
lens
().
end
(),
std
::
back_inserter
(
refs
),
[
&
](
const
auto
&
lval
)
{
return
get_literal
(
lval
);
});
refs
.
push_back
(
get_literal
(
1
));
// G
// dim strides
std
::
transform
(
inst_t
.
strides
().
begin
(),
inst_t
.
strides
().
end
(),
std
::
back_inserter
(
refs
),
[
&
](
const
auto
&
lval
)
{
return
get_literal
(
lval
);
});
refs
.
push_back
(
get_literal
(
1
));
// G
}
instruction_ref
insert_allocation
(
instruction_ref
ins
,
const
shape
&
s
)
const
{
return
mod
->
insert_instruction
(
ins
,
hip_allocate
{
s
});
}
void
replace_conv_op
(
instruction_ref
ins
)
{
auto
conv_bin
=
make_mlir_binary
(
ins
);
if
(
conv_bin
)
{
auto
conv
=
make_code_object_op
(
ins
,
conv_bin
);
auto
inp
=
ins
->
inputs
().
at
(
0
);
auto
flt
=
ins
->
inputs
().
at
(
1
);
auto
out
=
insert_allocation
(
ins
,
ins
->
get_shape
());
std
::
vector
<
instruction_ref
>
refs
;
refs
.
reserve
(
3
*
13
+
1
);
add_memref_descriptor
(
refs
,
flt
);
add_memref_descriptor
(
refs
,
inp
);
add_memref_descriptor
(
refs
,
out
);
refs
.
push_back
(
out
);
mod
->
replace_instruction
(
ins
,
conv
,
refs
);
}
}
void
apply
()
{
init
();
for
(
auto
it
:
iterator_for
(
*
mod
))
{
if
(
it
->
name
()
==
"convolution"
)
{
replace_conv_op
(
it
);
}
}
}
};
void
mlir_conv
::
apply
(
module
&
m
)
const
{
mlir_apply
{
&
m
,
this
}.
apply
();
}
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
src/targets/gpu/quant_convolution.cpp
View file @
fd3252dc
...
...
@@ -67,7 +67,7 @@ argument miopen_quant_convolution::compute(context& ctx,
return
args
[
3
];
}
shape
miopen_quant_convolution
::
compile
(
context
&
ctx
,
shape
miopen_quant_convolution
::
find
(
context
&
ctx
,
const
shape
&
output_shape
,
std
::
vector
<
shape
>
inputs
)
{
...
...
@@ -92,8 +92,8 @@ shape miopen_quant_convolution::compile(context& ctx,
x_shape
=
pack_int8_shape
(
x_shape
);
w_shape
=
pack_int8_shape
(
w_shape
);
}
auto
arg_vec4_x
=
to_gpu
(
generate_argument
(
x_shape
));
auto
arg_vec4_w
=
to_gpu
(
generate_argument
(
w_shape
));
auto
x
=
to_gpu
(
generate_argument
(
x_shape
));
auto
w
=
to_gpu
(
generate_argument
(
w_shape
));
auto
y
=
allocate_gpu
(
output_shape
);
auto
workspace
=
allocate_gpu
(
workspace_shape
);
...
...
@@ -101,9 +101,9 @@ shape miopen_quant_convolution::compile(context& ctx,
miopenConvAlgoPerf_t
perf
;
auto
status
=
miopenFindConvolutionForwardAlgorithm
(
ctx
.
get_stream
().
get_miopen
(),
x_desc
.
get
(),
arg_vec4_
x
.
implicit
(),
x
.
implicit
(),
w_desc
.
get
(),
arg_vec4_
w
.
implicit
(),
w
.
implicit
(),
cd
.
get
(),
y_desc
.
get
(),
y
.
implicit
(),
...
...
@@ -114,11 +114,35 @@ shape miopen_quant_convolution::compile(context& ctx,
workspace_size
,
false
);
if
(
status
!=
miopenStatusSuccess
)
{
MIGRAPHX_THROW
(
"QUANT_CONVOLUTION: find convolution failed"
);
}
handle
=
ctx
.
get_stream
().
get_miopen
();
MIGRAPHX_THROW
(
"MIOpen Quant Convolution: find convolution failed"
);
algo
=
perf
.
fwd_algo
;
size_t
solution_count
;
status
=
miopenConvolutionForwardGetSolutionCount
(
ctx
.
get_stream
().
get_miopen
(),
w_desc
.
get
(),
x_desc
.
get
(),
cd
.
get
(),
y_desc
.
get
(),
&
solution_count
);
if
(
status
!=
miopenStatusSuccess
)
MIGRAPHX_THROW
(
"MIOpen Quant Convolution: get solution count failed"
);
std
::
vector
<
miopenConvSolution_t
>
solutions
(
solution_count
);
status
=
miopenConvolutionForwardGetSolution
(
ctx
.
get_stream
().
get_miopen
(),
w_desc
.
get
(),
x_desc
.
get
(),
cd
.
get
(),
y_desc
.
get
(),
solution_count
,
&
solution_count
,
solutions
.
data
());
if
(
status
!=
miopenStatusSuccess
)
MIGRAPHX_THROW
(
"MIOpen Quant Convolution: get solution failed"
);
solution_id
=
solutions
.
front
().
solution_id
;
return
shape
{
shape
::
int8_type
,
{
perf
.
memory
}};
}
...
...
@@ -126,13 +150,29 @@ void miopen_quant_convolution::finalize(context& ctx,
const
shape
&
output_shape
,
std
::
vector
<
shape
>
inputs
)
{
if
(
handle
==
ctx
.
get_stream
().
get_miopen
())
return
;
if
(
cd
==
nullptr
)
cd
=
make_conv
(
op
);
if
(
solution_id
==
0
)
{
// Check that workspace hasn't changed
auto
size
=
inputs
.
at
(
2
).
bytes
();
auto
ws
=
compile
(
ctx
,
output_shape
,
std
::
move
(
inputs
)
)
;
auto
ws
=
find
(
ctx
,
output_shape
,
inputs
);
if
(
ws
.
bytes
()
>
size
)
MIGRAPHX_THROW
(
"Workspace has changed during finalization."
);
MIGRAPHX_THROW
(
"MIOpen Quant Convolution: workspace has changed during finalization."
);
}
auto
x_desc
=
make_tensor
(
inputs
[
0
],
int8_x4_format
);
auto
w_desc
=
make_tensor
(
inputs
[
1
],
int8_x4_format
);
auto
y_desc
=
make_tensor
(
output_shape
);
auto
status
=
miopenConvolutionForwardCompileSolution
(
ctx
.
get_stream
().
get_miopen
(),
w_desc
.
get
(),
x_desc
.
get
(),
cd
.
get
(),
y_desc
.
get
(),
solution_id
);
if
(
status
!=
miopenStatusSuccess
)
MIGRAPHX_THROW
(
"MIOpen Quant Convolution: compile solution failed"
);
}
shape
miopen_quant_convolution
::
pack_int8_shape
(
const
shape
&
s
)
const
...
...
src/targets/gpu/target.cpp
View file @
fd3252dc
...
...
@@ -53,10 +53,10 @@
#include <migraphx/gpu/compile_ops.hpp>
#include <migraphx/gpu/concat_gpu_opt.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/fuse_mlir.hpp>
#include <migraphx/gpu/fuse_ops.hpp>
#include <migraphx/gpu/prefuse_ops.hpp>
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/gpu/mlir_conv.hpp>
#include <migraphx/gpu/pack_int8_args.hpp>
#include <migraphx/gpu/schedule_model.hpp>
#include <migraphx/gpu/sync_device.hpp>
...
...
@@ -128,7 +128,8 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti
dead_code_elimination
{},
enable_pass
(
not
enabled
(
MIGRAPHX_DISABLE_POINTWISE_FUSION
{}),
fuse_pointwise
{}),
dead_code_elimination
{},
mlir_conv
{
&
ctx
},
fuse_mlir
{
&
ctx
},
dead_code_elimination
{},
lowering
{
&
ctx
,
options
.
offload_copy
},
eliminate_contiguous
{
"gpu::contiguous"
},
dead_code_elimination
{},
...
...
test/eliminate_contiguous_test.cpp
View file @
fd3252dc
...
...
@@ -205,4 +205,24 @@ TEST_CASE(contiguous_pointwise)
mm
->
begin
(),
mm
->
end
(),
[](
auto
&&
ins
)
{
return
ins
.
name
()
==
"contiguous"
;
}));
}
TEST_CASE
(
slice_contiguous
)
{
migraphx
::
module
m
;
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
4
,
2
}};
auto
x
=
m
.
add_parameter
(
"x"
,
s
);
auto
t
=
m
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
1
,
0
}}}),
x
);
auto
c
=
m
.
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
t
);
auto
s1
=
m
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
0
}},
{
"starts"
,
{
0
}},
{
"ends"
,
{
1
}}}),
c
);
auto
s2
=
m
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
0
}},
{
"starts"
,
{
1
}},
{
"ends"
,
{
2
}}}),
c
);
auto
c1
=
m
.
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
s1
);
auto
c2
=
m
.
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
s2
);
m
.
add_instruction
(
pass_standard_op
{},
c1
,
c2
);
run_pass
(
m
);
EXPECT
(
std
::
count_if
(
m
.
begin
(),
m
.
end
(),
[](
auto
&&
ins
)
{
return
ins
.
name
()
==
"contiguous"
;
})
==
1
);
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/get_target_assignments.cpp
0 → 100644
View file @
fd3252dc
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "test.hpp"
#include <migraphx/make_op.hpp>
#include <migraphx/program.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/ref/target.hpp>
#include <migraphx/target_assignments.hpp>
migraphx
::
program
create_program
()
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
3
}};
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
y
=
mm
->
add_parameter
(
"y"
,
s
);
auto
z
=
mm
->
add_parameter
(
"z"
,
s
);
auto
diff
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"div"
),
x
,
y
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"div"
),
diff
,
z
);
return
p
;
}
TEST_CASE
(
is_supported
)
{
auto
p
=
create_program
();
auto
targets
=
migraphx
::
get_targets
();
EXPECT
(
!
targets
.
empty
());
auto
first_target
=
targets
[
0
];
auto
t
=
migraphx
::
make_target
(
first_target
);
const
auto
assignments
=
p
.
get_target_assignments
({
t
});
for
(
const
auto
&
[
ins
,
target
]
:
assignments
)
{
(
void
)
ins
;
EXPECT
(
target
==
first_target
);
}
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/gpu/mlir.cpp
0 → 100644
View file @
fd3252dc
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/gpu/mlir.hpp>
#include <migraphx/gpu/target.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/write_literals.hpp>
#include <migraphx/ref/target.hpp>
#include <migraphx/module.hpp>
#include <migraphx/program.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/verify_args.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/functional.hpp>
#include <test.hpp>
using
migraphx
::
trim
;
// m test_gpu_mlir && ./bin/test_gpu_mlir
struct
mlir_gpu_target
:
migraphx
::
gpu
::
target
{
std
::
string
name
()
const
{
return
"mlir"
;
}
std
::
vector
<
migraphx
::
pass
>
get_passes
(
migraphx
::
context
&
gctx
,
const
migraphx
::
compile_options
&
)
const
{
auto
&
ctx
=
migraphx
::
any_cast
<
migraphx
::
gpu
::
context
>
(
gctx
);
return
{
migraphx
::
gpu
::
write_literals
{
&
ctx
}};
}
};
std
::
string
encode
(
const
std
::
string
&
s
)
{
std
::
stringstream
ss
;
bool
prespace
=
false
;
for
(
auto
c
:
s
)
{
if
(
std
::
isspace
(
c
)
!=
0
)
{
if
(
not
prespace
)
ss
<<
" "
;
prespace
=
true
;
}
else
if
(
std
::
isprint
(
c
)
!=
0
)
{
ss
<<
c
;
prespace
=
false
;
}
}
return
migraphx
::
trim
(
ss
.
str
());
}
migraphx
::
program
create_program_from_mlir
(
const
migraphx
::
module
&
mmlir
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
names
=
mmlir
.
get_parameter_names
();
std
::
vector
<
migraphx
::
instruction_ref
>
inputs
;
std
::
transform
(
names
.
begin
(),
names
.
end
(),
std
::
back_inserter
(
inputs
),
[
&
](
const
auto
&
name
)
{
return
mm
->
add_parameter
(
name
,
mmlir
.
get_parameter_shape
(
name
));
});
std
::
sort
(
inputs
.
begin
(),
inputs
.
end
(),
migraphx
::
by
(
std
::
less
<>
{},
[](
auto
ins
)
{
return
to_string
(
ins
->
get_operator
());
}));
inputs
.
push_back
(
mm
->
add_parameter
(
"output"
,
mmlir
.
get_output_shapes
().
front
()));
migraphx
::
gpu
::
context
ctx
;
migraphx
::
gpu
::
insert_mlir
(
*
mm
,
mm
->
end
(),
compile_mlir
(
ctx
,
mmlir
),
inputs
);
return
p
;
}
migraphx
::
parameter_map
generate_params
(
const
migraphx
::
program
&
p
)
{
migraphx
::
parameter_map
m
;
std
::
size_t
i
=
0
;
for
(
auto
&&
x
:
p
.
get_parameter_shapes
())
{
// m[x.first] = migraphx::fill_argument(x.second, 1);
m
[
x
.
first
]
=
migraphx
::
generate_argument
(
x
.
second
,
i
++
);
}
return
m
;
}
migraphx
::
argument
run_gpu
(
migraphx
::
program
p
,
const
migraphx
::
parameter_map
&
inputs
)
{
mlir_gpu_target
t
;
p
.
compile
(
t
);
migraphx
::
parameter_map
m
;
for
(
auto
&&
input
:
inputs
)
{
m
[
input
.
first
]
=
t
.
copy_to
(
input
.
second
);
}
for
(
auto
&&
x
:
p
.
get_parameter_shapes
())
{
if
(
m
.
count
(
x
.
first
)
==
0
)
{
m
[
x
.
first
]
=
t
.
allocate
(
x
.
second
);
}
}
return
t
.
copy_from
(
p
.
eval
(
m
).
front
());
}
migraphx
::
argument
run_ref
(
migraphx
::
program
p
,
const
migraphx
::
parameter_map
&
inputs
)
{
p
.
compile
(
migraphx
::
ref
::
target
{});
return
p
.
eval
(
inputs
).
front
();
}
bool
verify_mlir
(
const
migraphx
::
module
&
mmlir
)
{
migraphx
::
program
ref
;
ref
.
get_main_module
()
->
insert_instructions
(
ref
.
get_main_module
()
->
end
(),
&
mmlir
);
auto
inputs
=
generate_params
(
ref
);
auto
mlir
=
create_program_from_mlir
(
mmlir
);
return
migraphx
::
verify_args
(
"mlir"
,
run_ref
(
ref
,
inputs
),
run_gpu
(
mlir
,
inputs
));
}
TEST_CASE
(
conv
)
{
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
func @main(%arg0: tensor<2x8x3x3xf32>, %arg1: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {kernel = "mixr"} {
%0 = migraphx.convolution(%arg1, %arg0) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
return %0 : tensor<1x2x2x2xf32>
}
}
)__migraphx__"
;
migraphx
::
module
m
;
auto
x
=
m
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
8
,
4
,
4
}});
auto
w
=
m
.
add_parameter
(
"w"
,
{
migraphx
::
shape
::
float_type
,
{
2
,
8
,
3
,
3
}});
auto
conv
=
m
.
add_instruction
(
migraphx
::
make_op
(
"convolution"
),
x
,
w
);
m
.
add_return
({
conv
});
auto
s
=
migraphx
::
gpu
::
dump_mlir
(
m
);
// Skip test if MLIR is not enabled
if
(
s
.
empty
())
return
;
CHECK
(
encode
(
s
)
==
encode
(
mlir_output
));
EXPECT
(
verify_mlir
(
m
));
}
TEST_CASE
(
conv_add_relu
)
{
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
func @main(%arg0: tensor<1x2x2x2xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {kernel = "mixr"} {
%0 = migraphx.convolution(%arg2, %arg1) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
%1 = migraphx.add(%0, %arg0) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
%2 = migraphx.relu(%1) : (tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
return %2 : tensor<1x2x2x2xf32>
}
}
)__migraphx__"
;
migraphx
::
module
m
;
auto
x
=
m
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
8
,
4
,
4
}});
auto
w
=
m
.
add_parameter
(
"w"
,
{
migraphx
::
shape
::
float_type
,
{
2
,
8
,
3
,
3
}});
auto
b
=
m
.
add_parameter
(
"b"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
2
,
2
}});
auto
conv
=
m
.
add_instruction
(
migraphx
::
make_op
(
"convolution"
),
x
,
w
);
auto
add
=
m
.
add_instruction
(
migraphx
::
make_op
(
"add"
),
conv
,
b
);
auto
relu
=
m
.
add_instruction
(
migraphx
::
make_op
(
"relu"
),
add
);
m
.
add_return
({
relu
});
auto
s
=
migraphx
::
gpu
::
dump_mlir
(
m
);
// Skip test if MLIR is not enabled
if
(
s
.
empty
())
return
;
CHECK
(
encode
(
s
)
==
encode
(
mlir_output
));
EXPECT
(
verify_mlir
(
m
));
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/module_test.cpp
View file @
fd3252dc
...
...
@@ -300,6 +300,96 @@ TEST_CASE(parameter_name_order)
EXPECT
(
param_names
==
names1
);
}
TEST_CASE
(
insert_instructions_module
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
int32_type
,
{
1
}};
migraphx
::
module
m1
(
"m1"
);
auto
x1
=
m1
.
add_parameter
(
"x1"
,
s
);
auto
sqrt
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"sqrt"
),
{
x1
});
m1
.
add_instruction
(
migraphx
::
make_op
(
"add"
),
{
sqrt
,
x1
});
migraphx
::
module
m2
(
"m2"
);
auto
x2
=
m2
.
add_parameter
(
"x2"
,
s
);
m2
.
add_instruction
(
migraphx
::
make_op
(
"sqrt"
),
{
x2
});
m1
.
insert_instructions
(
sqrt
,
&
m2
,
{{
x2
,
x1
}});
EXPECT
(
std
::
prev
(
sqrt
)
->
name
()
==
"sqrt"
);
EXPECT
(
std
::
count_if
(
m1
.
begin
(),
m1
.
end
(),
[](
auto
&&
ins
)
{
return
ins
.
name
()
==
"sqrt"
;
})
==
2
);
EXPECT
(
std
::
count_if
(
m1
.
begin
(),
m1
.
end
(),
[](
auto
&&
ins
)
{
return
ins
.
name
()
==
"@param"
;
})
==
1
);
EXPECT
(
contains
(
m1
.
get_parameter_shapes
(),
"x1"
));
EXPECT
(
not
contains
(
m1
.
get_parameter_shapes
(),
"x2"
));
}
TEST_CASE
(
add_instructions_module
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
int32_type
,
{
1
}};
migraphx
::
module
m1
(
"m1"
);
auto
x1
=
m1
.
add_parameter
(
"x1"
,
s
);
m1
.
add_instruction
(
migraphx
::
make_op
(
"sqrt"
),
{
x1
});
migraphx
::
module
m2
(
"m2"
);
auto
x2
=
m2
.
add_parameter
(
"x2"
,
s
);
m2
.
add_instruction
(
migraphx
::
make_op
(
"sqrt"
),
{
x2
});
m1
.
add_instructions
(
&
m2
,
{{
x2
,
x1
}});
EXPECT
(
std
::
count_if
(
m1
.
begin
(),
m1
.
end
(),
[](
auto
&&
ins
)
{
return
ins
.
name
()
==
"sqrt"
;
})
==
2
);
EXPECT
(
std
::
count_if
(
m1
.
begin
(),
m1
.
end
(),
[](
auto
&&
ins
)
{
return
ins
.
name
()
==
"@param"
;
})
==
1
);
EXPECT
(
contains
(
m1
.
get_parameter_shapes
(),
"x1"
));
EXPECT
(
not
contains
(
m1
.
get_parameter_shapes
(),
"x2"
));
}
TEST_CASE
(
add_instructions_range
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
int32_type
,
{
1
}};
migraphx
::
module
m1
(
"m1"
);
auto
x1
=
m1
.
add_parameter
(
"x1"
,
s
);
m1
.
add_instruction
(
migraphx
::
make_op
(
"sqrt"
),
{
x1
});
migraphx
::
module
m2
(
"m2"
);
auto
x2
=
m2
.
add_parameter
(
"x2"
,
s
);
auto
sqrt2
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"sqrt"
),
{
x2
});
m1
.
add_instructions
(
sqrt2
,
m2
.
end
(),
{{
x2
,
x1
}});
EXPECT
(
std
::
any_of
(
m1
.
begin
(),
m1
.
end
(),
[
&
](
auto
&&
ins
)
{
return
migraphx
::
contains
(
ins
.
inputs
(),
x1
);
}));
EXPECT
(
std
::
count_if
(
m1
.
begin
(),
m1
.
end
(),
[](
auto
&&
ins
)
{
return
ins
.
name
()
==
"sqrt"
;
})
==
2
);
EXPECT
(
std
::
count_if
(
m1
.
begin
(),
m1
.
end
(),
[](
auto
&&
ins
)
{
return
ins
.
name
()
==
"@param"
;
})
==
1
);
EXPECT
(
contains
(
m1
.
get_parameter_shapes
(),
"x1"
));
EXPECT
(
not
contains
(
m1
.
get_parameter_shapes
(),
"x2"
));
}
TEST_CASE
(
add_instructions_vector
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
int32_type
,
{
1
}};
migraphx
::
module
m1
(
"m1"
);
auto
x1
=
m1
.
add_parameter
(
"x1"
,
s
);
m1
.
add_instruction
(
migraphx
::
make_op
(
"sqrt"
),
{
x1
});
migraphx
::
module
m2
(
"m2"
);
auto
x2
=
m2
.
add_parameter
(
"x2"
,
s
);
auto
sqrt2
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"sqrt"
),
{
x2
});
m1
.
add_instructions
({
sqrt2
},
{{
x2
,
x1
}});
EXPECT
(
std
::
any_of
(
m1
.
begin
(),
m1
.
end
(),
[
&
](
auto
&&
ins
)
{
return
migraphx
::
contains
(
ins
.
inputs
(),
x1
);
}));
EXPECT
(
std
::
count_if
(
m1
.
begin
(),
m1
.
end
(),
[](
auto
&&
ins
)
{
return
ins
.
name
()
==
"sqrt"
;
})
==
2
);
EXPECT
(
std
::
count_if
(
m1
.
begin
(),
m1
.
end
(),
[](
auto
&&
ins
)
{
return
ins
.
name
()
==
"@param"
;
})
==
1
);
EXPECT
(
contains
(
m1
.
get_parameter_shapes
(),
"x1"
));
EXPECT
(
not
contains
(
m1
.
get_parameter_shapes
(),
"x2"
));
}
struct
check_for_pass_op
{
bool
*
found
=
nullptr
;
...
...
test/op_shape_test.cpp
View file @
fd3252dc
...
...
@@ -981,7 +981,8 @@ TEST_CASE(multibroadcast)
}
{
std
::
vector
<
std
::
size_t
>
lens
{
4
,
1
,
3
};
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{}};
std
::
vector
<
std
::
size_t
>
empt
=
{};
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
empt
};
throws_shape
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
lens
}}),
input
);
}
{
...
...
@@ -1533,15 +1534,46 @@ TEST_CASE(test_squeeze_wrong_axis)
TEST_CASE
(
test_unsqueeze
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
1
,
3
}};
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
3
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
1
,
3
}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_step
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
12
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
2
,
6
}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}},
{
"steps"
,
{
2
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_step_non_divisable
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
3
}};
throws_shape
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}},
{
"steps"
,
{
2
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_step_zero
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
12
}};
throws_shape
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}},
{
"steps"
,
{
0
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_step_at_end
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
12
}};
throws_shape
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
3
}},
{
"steps"
,
{
2
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_mismatch_step_axis
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
12
}};
throws_shape
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}},
{
"steps"
,
{
2
,
3
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_negative_axis
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
1
,
3
}};
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
3
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
1
,
3
}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
-
2
}}}),
s1
);
}
...
...
@@ -1567,21 +1599,28 @@ TEST_CASE(test_unsqueeze_scalar_tensor2)
TEST_CASE
(
test_unsqueeze_transpose
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
4
,
3
},
{
12
,
1
,
4
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
4
,
4
,
1
,
3
},
{
12
,
1
,
1
,
4
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
4
,
4
,
1
,
3
},
{
12
,
1
,
1
2
,
4
}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_transpose_step
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
4
,
6
},
{
24
,
1
,
4
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
4
,
4
,
2
,
3
},
{
24
,
1
,
12
,
4
}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}},
{
"steps"
,
{
2
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_multibroadcast
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
},
{
0
,
1
,
0
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
1
,
4
},
{
0
,
1
,
1
,
0
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
1
,
4
},
{
0
,
1
,
0
,
0
}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_slice
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
},
{
108
,
36
,
1
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
1
,
4
},
{
108
,
36
,
36
,
1
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
1
,
4
},
{
108
,
36
,
4
,
1
}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}}}),
s1
);
}
...
...
@@ -1613,6 +1652,27 @@ TEST_CASE(test_unsqueeze_multiple_axes_2)
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
0
,
1
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_multiple_axes_3
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
5
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
1
,
5
,
1
,
1
}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
,
4
,
5
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_multiple_axes_4
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
5
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
1
,
5
,
1
,
1
}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
5
,
4
,
2
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_multiple_axes_step
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
10
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
2
,
5
,
1
,
1
}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
,
4
,
5
}},
{
"steps"
,
{
2
}}}),
s1
);
}
TEST_CASE
(
transpose_shape
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
2
,
2
}};
...
...
test/shape_test.cpp
View file @
fd3252dc
...
...
@@ -38,7 +38,6 @@ TEST_CASE(test_shape_default)
EXPECT
(
s
.
elements
()
==
0
);
EXPECT
(
s
.
bytes
()
==
0
);
}
TEST_CASE
(
test_shape_assign
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
100
,
32
,
8
,
8
}};
...
...
@@ -65,6 +64,118 @@ TEST_CASE(test_shape_standard)
EXPECT
(
not
s
.
broadcasted
());
}
TEST_CASE
(
test_shape_min_max_opt
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
2
,
3
},
{
6
,
3
,
1
}};
EXPECT
(
s
.
min_lens
()
==
s
.
lens
());
EXPECT
(
s
.
max_lens
()
==
s
.
lens
());
EXPECT
(
s
.
opt_lens
()
==
s
.
lens
());
}
TEST_CASE
(
test_shape_dynamic_fixed
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{{
2
,
2
,
0
},
{
2
,
2
,
0
},
{
3
,
3
,
0
}}};
EXPECT
(
not
s
.
standard
());
EXPECT
(
not
s
.
packed
());
EXPECT
(
not
s
.
transposed
());
EXPECT
(
not
s
.
broadcasted
());
EXPECT
(
s
.
dynamic
());
EXPECT
(
s
.
dyn_dims
().
size
()
==
3
);
EXPECT
(
s
.
dyn_dims
().
at
(
0
).
is_fixed
());
EXPECT
(
not
s
.
dyn_dims
().
at
(
0
).
has_optimal
());
EXPECT
(
s
.
min_lens
()
==
std
::
vector
<
std
::
size_t
>
{
2
,
2
,
3
});
EXPECT
(
s
.
max_lens
()
==
std
::
vector
<
std
::
size_t
>
{
2
,
2
,
3
});
EXPECT
(
s
.
opt_lens
()
==
std
::
vector
<
std
::
size_t
>
{
0
,
0
,
0
});
EXPECT
(
s
.
bytes
()
==
2
*
2
*
3
*
sizeof
(
float
));
}
TEST_CASE
(
test_shape_dynamic_not_fixed
)
{
using
migraphx
::
shape
;
std
::
vector
<
shape
::
dynamic_dimension
>
dims
=
{};
dims
.
push_back
(
shape
::
dynamic_dimension
{
2
,
5
,
2
});
dims
.
push_back
(
shape
::
dynamic_dimension
{
2
,
8
,
0
});
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
dims
};
EXPECT
(
not
s
.
standard
());
EXPECT
(
not
s
.
packed
());
EXPECT
(
not
s
.
transposed
());
EXPECT
(
not
s
.
broadcasted
());
EXPECT
(
s
.
dynamic
());
EXPECT
(
s
.
dyn_dims
().
size
()
==
2
);
EXPECT
(
not
s
.
dyn_dims
().
at
(
0
).
is_fixed
());
EXPECT
(
s
.
dyn_dims
().
at
(
0
).
has_optimal
());
EXPECT
(
s
.
min_lens
()
==
std
::
vector
<
std
::
size_t
>
{
2
,
2
});
EXPECT
(
s
.
max_lens
()
==
std
::
vector
<
std
::
size_t
>
{
5
,
8
});
EXPECT
(
s
.
opt_lens
()
==
std
::
vector
<
std
::
size_t
>
{
2
,
0
});
EXPECT
(
s
.
bytes
()
==
5
*
8
*
sizeof
(
float
));
}
TEST_CASE
(
test_shape_dynamic_compares
)
{
using
migraphx
::
shape
;
auto
a
=
shape
::
dynamic_dimension
{
2
,
5
,
2
};
auto
b
=
a
;
auto
c
=
shape
::
dynamic_dimension
{
2
,
5
,
2
};
auto
d
=
shape
::
dynamic_dimension
{
3
,
8
,
4
};
EXPECT
(
a
==
b
);
EXPECT
(
a
==
c
);
EXPECT
(
a
!=
d
);
migraphx
::
shape
s0
{
shape
::
float_type
,
{
a
,
d
}};
migraphx
::
shape
s1
=
s0
;
migraphx
::
shape
s2
{
shape
::
float_type
,
{
a
,
d
}};
migraphx
::
shape
s3
{
shape
::
int32_type
,
{
a
}};
EXPECT
(
s0
==
s1
);
EXPECT
(
s0
==
s2
);
EXPECT
(
s0
!=
s3
);
std
::
stringstream
ss0
;
std
::
stringstream
ss1
;
std
::
stringstream
ss3
;
ss0
<<
s0
;
ss1
<<
s1
;
ss3
<<
s3
;
EXPECT
(
ss0
.
str
()
==
ss1
.
str
());
EXPECT
(
ss0
.
str
()
!=
ss3
.
str
());
}
TEST_CASE
(
test_shape_dynamic_errors
)
{
using
migraphx
::
shape
;
std
::
vector
<
shape
::
dynamic_dimension
>
dims
=
{};
dims
.
push_back
(
shape
::
dynamic_dimension
{
2
,
5
,
2
});
dims
.
push_back
(
shape
::
dynamic_dimension
{
2
,
8
,
0
});
migraphx
::
shape
s
{
shape
::
float_type
,
dims
};
EXPECT
(
test
::
throws
([
&
]
{
s
.
elements
();
}));
EXPECT
(
test
::
throws
([
&
]
{
s
.
index
({
0
,
1
});
}));
EXPECT
(
test
::
throws
([
&
]
{
s
.
index
(
1
);
}));
EXPECT
(
test
::
throws
([
&
]
{
s
.
index
(
std
::
vector
<
std
::
size_t
>
{
0
,
1
});
}));
EXPECT
(
test
::
throws
([
&
]
{
s
.
with_lens
({
3
,
5
});
}));
EXPECT
(
test
::
throws
([
&
]
{
s
.
with_lens
(
shape
::
float_type
,
{
3
,
5
});
}));
}
TEST_CASE
(
test_shape_dynamic_serialize
)
{
using
migraphx
::
shape
;
std
::
vector
<
shape
::
dynamic_dimension
>
dims1
=
{};
dims1
.
push_back
(
shape
::
dynamic_dimension
{
2
,
5
,
2
});
dims1
.
push_back
(
shape
::
dynamic_dimension
{
2
,
8
,
0
});
migraphx
::
shape
s1
{
shape
::
float_type
,
dims1
};
auto
v1
=
migraphx
::
to_value
(
s1
);
std
::
vector
<
shape
::
dynamic_dimension
>
dims2
=
{};
dims2
.
push_back
(
shape
::
dynamic_dimension
{
2
,
5
,
2
});
migraphx
::
shape
s2
{
shape
::
uint64_type
,
dims2
};
auto
v2
=
migraphx
::
to_value
(
s2
);
EXPECT
(
v1
!=
v2
);
auto
s3
=
migraphx
::
from_value
<
shape
>
(
v1
);
EXPECT
(
s3
==
s1
);
auto
s4
=
migraphx
::
from_value
<
shape
>
(
v2
);
EXPECT
(
s4
==
s2
);
EXPECT
(
s3
!=
s4
);
}
TEST_CASE
(
test_shape_packed
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
2
},
{
2
,
1
}};
...
...
test/simplify_reshapes_test.cpp
View file @
fd3252dc
...
...
@@ -1141,4 +1141,138 @@ TEST_CASE(transpose_contiguous_reshape_binary_broadcast)
EXPECT
(
m1
==
m2
);
}
TEST_CASE
(
transpose_unsqueeze_concat
)
{
migraphx
::
module
m1
;
{
auto
l0
=
m1
.
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
1
,
1
}});
auto
lt0
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
3
,
1
}}}),
l0
);
auto
l1
=
m1
.
add_parameter
(
"1"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
1
,
1
}});
auto
lt1
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
3
,
1
}}}),
l1
);
auto
l2
=
m1
.
add_parameter
(
"2"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
1
,
1
}});
auto
lt2
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
3
,
1
}}}),
l2
);
std
::
vector
<
migraphx
::
instruction_ref
>
args
{
lt0
,
lt1
,
lt2
};
std
::
vector
<
migraphx
::
instruction_ref
>
unsqueezed_args
;
int64_t
axis
=
3
;
std
::
transform
(
args
.
begin
(),
args
.
end
(),
std
::
back_inserter
(
unsqueezed_args
),
[
&
](
migraphx
::
instruction_ref
arg
)
{
return
m1
.
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
axis
}}}),
arg
);
});
m1
.
add_instruction
(
migraphx
::
make_op
(
"concat"
,
{{
"axis"
,
axis
}}),
unsqueezed_args
);
}
// TODO: This could be simplified to a single transpose after concat
migraphx
::
module
m2
=
m1
;
run_pass
(
m1
);
EXPECT
(
m1
==
m2
);
}
TEST_CASE
(
transpose_slice
)
{
migraphx
::
module
m1
;
{
auto
x
=
m1
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
384
,
36
,
64
}});
auto
slice1
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
2
}},
{
"starts"
,
{
0
}},
{
"ends"
,
{
12
}}}),
x
);
auto
transpose1
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
1
,
3
}}}),
slice1
);
auto
slice2
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
2
}},
{
"starts"
,
{
12
}},
{
"ends"
,
{
24
}}}),
x
);
auto
transpose2
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
1
,
3
}}}),
slice2
);
auto
slice3
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
2
}},
{
"starts"
,
{
24
}},
{
"ends"
,
{
36
}}}),
x
);
auto
transpose3
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
1
,
3
}}}),
slice3
);
m1
.
add_return
({
transpose1
,
transpose2
,
transpose3
});
}
run_pass
(
m1
);
migraphx
::
module
m2
;
{
auto
x
=
m2
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
384
,
36
,
64
}});
auto
transpose
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
1
,
3
}}}),
x
);
auto
slice1
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
1
}},
{
"starts"
,
{
0
}},
{
"ends"
,
{
12
}}}),
transpose
);
auto
slice2
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
1
}},
{
"starts"
,
{
12
}},
{
"ends"
,
{
24
}}}),
transpose
);
auto
slice3
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
1
}},
{
"starts"
,
{
24
}},
{
"ends"
,
{
36
}}}),
transpose
);
m2
.
add_return
({
slice1
,
slice2
,
slice3
});
}
EXPECT
(
m1
==
m2
);
}
TEST_CASE
(
transpose_slice_diff_perm
)
{
migraphx
::
module
m1
;
{
auto
x
=
m1
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
384
,
36
,
64
}});
auto
slice1
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
2
}},
{
"starts"
,
{
0
}},
{
"ends"
,
{
12
}}}),
x
);
auto
transpose1
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
1
,
3
}}}),
slice1
);
auto
slice2
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
2
}},
{
"starts"
,
{
12
}},
{
"ends"
,
{
24
}}}),
x
);
auto
transpose2
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
3
,
1
}}}),
slice2
);
auto
slice3
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
2
}},
{
"starts"
,
{
24
}},
{
"ends"
,
{
36
}}}),
x
);
auto
transpose3
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
1
,
3
}}}),
slice3
);
m1
.
add_return
({
transpose1
,
transpose2
,
transpose3
});
}
run_pass
(
m1
);
migraphx
::
module
m2
;
{
auto
x
=
m2
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
384
,
36
,
64
}});
auto
transpose
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
1
,
3
}}}),
x
);
auto
slice1
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
1
}},
{
"starts"
,
{
0
}},
{
"ends"
,
{
12
}}}),
transpose
);
auto
slice2
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
1
}},
{
"starts"
,
{
12
}},
{
"ends"
,
{
24
}}}),
transpose
);
auto
transpose2
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
1
,
3
,
2
}}}),
slice2
);
auto
slice3
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
1
}},
{
"starts"
,
{
24
}},
{
"ends"
,
{
36
}}}),
transpose
);
m2
.
add_return
({
slice1
,
transpose2
,
slice3
});
}
EXPECT
(
m1
==
m2
);
}
TEST_CASE
(
transpose_slice_single_transpose
)
{
migraphx
::
module
m1
;
{
auto
x
=
m1
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
384
,
36
,
64
}});
auto
slice1
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
2
}},
{
"starts"
,
{
0
}},
{
"ends"
,
{
12
}}}),
x
);
auto
sqrt1
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"sqrt"
),
slice1
);
auto
slice2
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
2
}},
{
"starts"
,
{
12
}},
{
"ends"
,
{
24
}}}),
x
);
auto
transpose
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
1
,
3
}}}),
slice2
);
auto
slice3
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
2
}},
{
"starts"
,
{
24
}},
{
"ends"
,
{
36
}}}),
x
);
auto
sqrt3
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"sqrt"
),
slice3
);
m1
.
add_return
({
sqrt1
,
transpose
,
sqrt3
});
}
migraphx
::
module
m2
=
m1
;
run_pass
(
m1
);
EXPECT
(
m1
==
m2
);
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/verify/run_verify.cpp
View file @
fd3252dc
...
...
@@ -30,6 +30,7 @@
#include <migraphx/ranges.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/load_save.hpp>
#include <migraphx/tmp_dir.hpp>
#include <migraphx/verify_args.hpp>
#include <set>
...
...
@@ -57,6 +58,15 @@ std::future<typename std::result_of<Function()>::type> detach_async(Function&& f
return
std
::
async
(
std
::
launch
::
deferred
,
std
::
forward
<
Function
>
(
f
));
}
inline
void
verify_load_save
(
const
migraphx
::
program
&
p
)
{
migraphx
::
tmp_dir
td
{
"migraphx_test"
};
auto
path
=
td
.
path
/
"test.mxr"
;
migraphx
::
save
(
p
,
path
.
string
());
auto
loaded
=
migraphx
::
load
(
path
.
string
());
EXPECT
(
p
==
loaded
);
}
inline
void
compile_check
(
migraphx
::
program
&
p
,
const
migraphx
::
target
&
t
,
bool
show_trace
=
false
)
{
auto
name
=
t
.
name
();
...
...
@@ -82,6 +92,8 @@ inline void compile_check(migraphx::program& p, const migraphx::target& t, bool
throw
std
::
runtime_error
(
"Compiling program with "
+
name
+
" alters its shape"
);
}
}
if
(
t
.
name
()
!=
"ref"
)
verify_load_save
(
p
);
}
target_info
run_verify
::
get_target_info
(
const
std
::
string
&
name
)
const
...
...
@@ -152,6 +164,7 @@ void run_verify::verify(const std::string& name, const migraphx::program& p) con
auto_print
::
set_terminate_handler
(
name
);
if
(
migraphx
::
enabled
(
MIGRAPHX_DUMP_TEST
{}))
migraphx
::
save
(
p
,
name
+
".mxr"
);
verify_load_save
(
p
);
std
::
vector
<
std
::
string
>
target_names
;
for
(
const
auto
&
tname
:
migraphx
::
get_targets
())
{
...
...
test/verify/test_conv_add_relu.cpp
0 → 100644
View file @
fd3252dc
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/instruction.hpp>
struct
test_conv_add_relu
:
verify_program
<
test_conv_add_relu
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
input
=
mm
->
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}});
auto
weights
=
mm
->
add_parameter
(
"w"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}});
auto
bias_literal
=
migraphx
::
literal
{
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
}},
{
2.0
f
,
2.0
f
,
2.0
f
,
2.0
f
}};
auto
bias
=
mm
->
add_literal
(
bias_literal
);
auto
conv
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"convolution"
),
input
,
weights
);
auto
bcast_bias
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
1
},
{
"out_lens"
,
conv
->
get_shape
().
lens
()}}),
bias
);
auto
bias_add
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
conv
,
bcast_bias
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"relu"
),
bias_add
);
return
p
;
}
};
tools/include/target.hpp
View file @
fd3252dc
...
...
@@ -37,6 +37,8 @@
#include <migraphx/compile_options.hpp>
#include <migraphx/argument.hpp>
#include <migraphx/rank.hpp>
#include <migraphx/support_metric.hpp>
#include <migraphx/instruction_ref.hpp>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
...
...
@@ -61,6 +63,13 @@ struct target
* @return The context to be used during compilation and execution.
*/
context
get_context
()
const
;
/**
* @brief Check how well an instruction is supported on a target with the given metric
* @param ins Instruction to check if it's supported
* @param metric Used to define how the return value should be interpreted
* @return The value based on the chosen metric. Negative numbers mean unsupported
*/
float
is_supported
(
T
&
,
instruction_ref
ins
,
support_metric
m
)
const
;
/**
* @brief copy an argument to the current target.
*
...
...
@@ -105,11 +114,18 @@ argument copy_from_target(T&, const argument& arg)
return
arg
;
}
template
<
class
T
>
float
target_is_supported
(
T
&
,
instruction_ref
,
support_metric
)
{
return
0
;
}
<%
interface
(
'
target
'
,
virtual
(
'
name
'
,
returns
=
'
std
::
string
'
,
const
=
True
),
virtual
(
'
get_passes
'
,
ctx
=
'
context
&
'
,
options
=
'
const
compile_options
&
'
,
returns
=
'
std
::
vector
<
pass
>
'
,
const
=
True
),
virtual
(
'
get_context
'
,
returns
=
'
context
'
,
const
=
True
),
virtual
(
'
is_supported
'
,
returns
=
'
float
'
,
ins
=
'
instruction_ref
'
,
m
=
'
support_metric
'
,
const
=
True
,
default
=
'
target_is_supported
'
),
virtual
(
'
copy_to
'
,
returns
=
'
argument
'
,
input
=
'
const
argument
&
'
,
...
...
Prev
1
2
3
4
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment