Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
a0edd061
Commit
a0edd061
authored
Jul 11, 2022
by
Paul
Browse files
Merge branch 'develop' into jit-improve
parents
6deee23b
2781ccd8
Changes
59
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
19 changed files
with
1466 additions
and
351 deletions
+1466
-351
src/targets/gpu/kernels/include/migraphx/kernels/reduce.hpp
src/targets/gpu/kernels/include/migraphx/kernels/reduce.hpp
+34
-0
src/targets/gpu/kernels/include/migraphx/kernels/shape.hpp
src/targets/gpu/kernels/include/migraphx/kernels/shape.hpp
+1
-0
src/targets/gpu/kernels/include/migraphx/kernels/softmax.hpp
src/targets/gpu/kernels/include/migraphx/kernels/softmax.hpp
+45
-0
src/targets/gpu/kernels/include/migraphx/kernels/vec.hpp
src/targets/gpu/kernels/include/migraphx/kernels/vec.hpp
+2
-0
src/targets/gpu/lowering.cpp
src/targets/gpu/lowering.cpp
+2
-3
src/targets/gpu/mlir.cpp
src/targets/gpu/mlir.cpp
+647
-0
src/targets/gpu/mlir_conv.cpp
src/targets/gpu/mlir_conv.cpp
+0
-315
src/targets/gpu/quant_convolution.cpp
src/targets/gpu/quant_convolution.cpp
+61
-21
src/targets/gpu/target.cpp
src/targets/gpu/target.cpp
+3
-2
test/eliminate_contiguous_test.cpp
test/eliminate_contiguous_test.cpp
+20
-0
test/get_target_assignments.cpp
test/get_target_assignments.cpp
+61
-0
test/gpu/mlir.cpp
test/gpu/mlir.cpp
+194
-0
test/op_shape_test.cpp
test/op_shape_test.cpp
+68
-8
test/shape_test.cpp
test/shape_test.cpp
+112
-1
test/simplify_reshapes_test.cpp
test/simplify_reshapes_test.cpp
+134
-0
test/verify/run_verify.cpp
test/verify/run_verify.cpp
+13
-0
test/verify/test_conv_add_relu.cpp
test/verify/test_conv_add_relu.cpp
+52
-0
tools/include/target.hpp
tools/include/target.hpp
+16
-0
tools/te.py
tools/te.py
+1
-1
No files found.
src/targets/gpu/kernels/include/migraphx/kernels/reduce.hpp
View file @
a0edd061
...
...
@@ -175,6 +175,21 @@ constexpr auto sliced(Slicer slicer, F f)
};
}
template
<
class
Input
,
index_int
Axis
>
constexpr
auto
compute_reduce_axis
()
{
constexpr
auto
lens
=
transform_i
(
get_shape_c
<
Input
>
{}.
lens
,
[](
index_int
x
,
index_int
i
)
->
index_int
{
if
(
i
==
Axis
)
return
1
;
return
x
;
});
return
make_shape
(
lens
,
get_shape_c
<
Input
>
{}.
strides
);
}
template
<
class
Input
,
index_int
Axis
>
using
with_axis
=
decltype
(
compute_reduce_axis
<
Input
,
Axis
>
());
struct
block
{
template
<
class
Slicer
>
...
...
@@ -201,6 +216,14 @@ struct block
if
(
idx
.
local
==
0
)
f
();
}
template
<
class
F
>
__device__
auto
inner
(
F
f
)
const
{
return
sliced
(
slicer
,
[
=
](
auto
x
,
auto
...
xs
)
{
idx
.
local_stride
(
x
.
get_shape
().
elements
(),
[
&
](
auto
j
)
{
f
(
x
[
j
],
xs
[
j
]...);
});
});
}
};
template
<
class
Slicer
>
...
...
@@ -247,6 +270,17 @@ struct lane
{
f
();
}
template
<
class
F
>
__device__
auto
inner
(
F
f
)
const
{
return
sliced
(
slicer
,
[
=
](
auto
x
,
auto
...
xs
)
{
for
(
index_int
j
=
0
;
j
<
x
.
get_shape
().
elements
();
j
++
)
{
f
(
x
[
j
],
xs
[
j
]...);
}
});
}
};
template
<
class
Slicer
>
...
...
src/targets/gpu/kernels/include/migraphx/kernels/shape.hpp
View file @
a0edd061
...
...
@@ -32,6 +32,7 @@ namespace migraphx {
template
<
class
Lens
,
class
Strides
>
struct
shape
{
using
shape_type
=
shape
;
using
index_array
=
typename
Lens
::
base_array
;
Lens
lens
=
{};
Strides
strides
=
{};
...
...
src/targets/gpu/kernels/include/migraphx/kernels/softmax.hpp
0 → 100644
View file @
a0edd061
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_KERNELS_SOFTMAX_HPP
#define MIGRAPHX_GUARD_KERNELS_SOFTMAX_HPP
#include <migraphx/kernels/reduce.hpp>
#include <migraphx/kernels/ops.hpp>
namespace
migraphx
{
template
<
index_int
Axis
,
class
Input
,
class
Output
>
__device__
void
softmax
(
Input
input
,
Output
output
)
{
reduce
::
block
::
run
<
reduce
::
with_axis
<
Input
,
Axis
>>
([
&
](
auto
,
auto
r
)
{
auto
batch_max
=
r
.
reduce
(
op
::
max
{},
lowest
{},
op
::
id
{})(
input
);
auto
batch_sum
=
r
.
reduce
(
op
::
sum
{},
0
,
[
&
](
auto
x
)
{
return
migraphx
::
exp
(
x
-
batch_max
);
})(
input
);
r
.
inner
([
&
](
auto
&
y
,
auto
x
)
{
y
=
migraphx
::
exp
(
x
-
batch_max
)
/
batch_sum
;
})(
output
,
input
);
});
}
}
// namespace migraphx
#endif // MIGRAPHX_GUARD_KERNELS_SOFTMAX_HPP
src/targets/gpu/kernels/include/migraphx/kernels/vec.hpp
View file @
a0edd061
...
...
@@ -27,6 +27,8 @@
#include <migraphx/kernels/types.hpp>
#include <migraphx/kernels/integral_constant.hpp>
#include <migraphx/kernels/functional.hpp>
#include <migraphx/kernels/type_traits.hpp>
#include <migraphx/kernels/debug.hpp>
namespace
migraphx
{
...
...
src/targets/gpu/lowering.cpp
View file @
a0edd061
...
...
@@ -186,7 +186,6 @@ struct miopen_apply
add_extend_op
(
"rnn_var_sl_shift_output"
);
add_extend_op
(
"rnn_var_sl_shift_sequence"
);
add_extend_op
(
"scatter_none"
);
add_extend_op
(
"softmax"
);
add_extend_op
(
"topk"
);
add_batch_norm_inference_op
();
...
...
@@ -301,7 +300,7 @@ struct miopen_apply
auto
&&
op
=
any_cast
<
op
::
deconvolution
>
(
ins
->
get_operator
());
auto
conv
=
miopen_deconvolution
{
op
,
make_deconv
(
op
)};
auto
ws
=
conv
.
compile
(
get_context
(),
ins
->
get_shape
(),
to_shapes
(
ins
->
inputs
()));
auto
ws
=
conv
.
find
(
get_context
(),
ins
->
get_shape
(),
to_shapes
(
ins
->
inputs
()));
auto
workspace
=
insert_allocation
(
ins
,
ws
);
auto
output
=
insert_allocation
(
ins
,
ins
->
get_shape
());
...
...
@@ -332,7 +331,7 @@ struct miopen_apply
miopen_quant_convolution
conv
;
auto
compile_quant_conv_with_format
=
[
&
](
bool
format
)
{
conv
=
miopen_quant_convolution
{
op
,
format
,
make_conv
(
op
)};
ws
=
conv
.
compile
(
get_context
(),
ins
->
get_shape
(),
to_shapes
(
ins
->
inputs
()));
ws
=
conv
.
find
(
get_context
(),
ins
->
get_shape
(),
to_shapes
(
ins
->
inputs
()));
};
try
...
...
src/targets/gpu/mlir.cpp
0 → 100644
View file @
a0edd061
This diff is collapsed.
Click to expand it.
src/targets/gpu/mlir_conv.cpp
deleted
100644 → 0
View file @
6deee23b
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/gpu/mlir_conv.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/convolution.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/convolution.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/program.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/program.hpp>
#include <migraphx/gpu/kernel.hpp>
#include <migraphx/gpu/target.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/gpu/compile_hip.hpp>
#include <utility>
#include <functional>
#include <algorithm>
#ifdef MIGRAPHX_MLIR_MIOPEN_SUPPORT
#include <Miir.h>
#endif // MIGRAPHX_MLIR_MIOPEN_SUPPORT
#include <cstdio>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
struct
mlir_apply
{
module
*
mod
=
nullptr
;
const
mlir_conv
*
pass
=
nullptr
;
const
char
*
mlir_kernel_name
=
"migraphx_conv2d"
;
std
::
unordered_map
<
uint64_t
,
instruction_ref
>
literal_map
{};
struct
execution_spec
{
migraphx
::
value
::
binary
binary
;
size_t
global_size
;
size_t
local_size
;
execution_spec
(
migraphx
::
value
::
binary
&&
binary_m
,
size_t
global_s
,
size_t
local_s
)
:
binary
(
std
::
move
(
binary_m
)),
global_size
(
global_s
),
local_size
(
local_s
)
{
}
};
std
::
unordered_map
<
std
::
string
,
std
::
shared_ptr
<
execution_spec
>>
binary_map
{};
context
&
get_context
()
const
{
assert
(
pass
!=
nullptr
);
assert
(
pass
->
ctx
!=
nullptr
);
return
*
pass
->
ctx
;
}
void
init
()
const
{
assert
(
mod
!=
nullptr
);
assert
(
pass
!=
nullptr
);
}
std
::
shared_ptr
<
execution_spec
>
make_mlir_binary
(
instruction_ref
op_r
)
{
std
::
shared_ptr
<
execution_spec
>
result
;
#ifdef MIGRAPHX_MLIR_MIOPEN_SUPPORT
auto
conv
=
any_cast
<
op
::
convolution
>
(
op_r
->
get_operator
());
auto
inp_t
=
op_r
->
inputs
().
at
(
0
)
->
get_shape
();
auto
flt_t
=
op_r
->
inputs
().
at
(
1
)
->
get_shape
();
auto
out_t
=
op_r
->
get_shape
();
auto
get_type_str
=
[](
const
shape
&
s
)
->
const
char
*
{
switch
(
s
.
type
())
{
case
shape
::
float_type
:
return
"f32"
;
case
shape
::
half_type
:
return
"f16"
;
case
shape
::
bool_type
:
case
shape
::
double_type
:
case
shape
::
uint8_type
:
case
shape
::
int8_type
:
case
shape
::
uint16_type
:
case
shape
::
int16_type
:
case
shape
::
int32_type
:
case
shape
::
int64_type
:
case
shape
::
uint32_type
:
case
shape
::
uint64_type
:
case
shape
::
tuple_type
:
break
;
}
return
nullptr
;
};
const
auto
*
inp_t_s
=
get_type_str
(
inp_t
);
const
auto
*
flt_t_s
=
get_type_str
(
flt_t
);
const
auto
*
out_t_s
=
get_type_str
(
out_t
);
if
(
out_t_s
==
nullptr
||
inp_t_s
==
nullptr
||
flt_t_s
==
nullptr
)
return
result
;
std
::
string
mlir_options
=
"--kernel_name "
+
std
::
string
(
mlir_kernel_name
);
// platform spec
auto
&
device
=
get_context
().
get_current_device
();
char
dev_name
[
64
];
sprintf
(
dev_name
,
"gfx%lu%02lu"
,
device
.
get_device_major
(),
device
.
get_device_minor
());
mlir_options
+=
" --arch "
+
std
::
string
(
dev_name
)
+
" --num_cu "
+
std
::
to_string
(
device
.
get_cu_count
());
// ???
// Conv spec
mlir_options
+=
" --operation "
"conv2d"
" --batchsize "
+
std
::
to_string
(
conv
.
group
)
+
" --groupsize "
+
std
::
to_string
(
1
)
+
" --padding_h "
+
std
::
to_string
(
conv
.
padding
[
0
])
+
" --padding_w "
+
std
::
to_string
(
conv
.
padding
[
1
])
+
" --conv_stride_h "
+
std
::
to_string
(
conv
.
stride
[
0
])
+
" --conv_stride_w "
+
std
::
to_string
(
conv
.
stride
[
1
])
+
" --dilation_h "
+
std
::
to_string
(
conv
.
dilation
[
0
])
+
" --dilation_w "
+
std
::
to_string
(
conv
.
dilation
[
1
]);
// Input spec
mlir_options
+=
" --in_layout "
"NCHWG"
" --in_type "
+
std
::
string
(
inp_t_s
)
+
" --in_channels "
+
std
::
to_string
(
inp_t
.
lens
()[
1
])
+
" --in_h "
+
std
::
to_string
(
inp_t
.
lens
()[
2
])
+
" --in_w "
+
std
::
to_string
(
inp_t
.
lens
()[
3
]);
// Filter spec
mlir_options
+=
" --fil_layout "
"NCHWG"
" --fil_type "
+
std
::
string
(
flt_t_s
)
+
" --fil_h "
+
std
::
to_string
(
flt_t
.
lens
()[
2
])
+
" --fil_w "
+
std
::
to_string
(
flt_t
.
lens
()[
3
]);
// Output spec
mlir_options
+=
" --out_layout "
"NCHWG"
" --out_type "
+
std
::
string
(
out_t_s
)
+
" --out_channels "
+
std
::
to_string
(
out_t
.
lens
()[
1
])
+
" --out_h "
+
std
::
to_string
(
out_t
.
lens
()[
2
])
+
" --out_w "
+
std
::
to_string
(
out_t
.
lens
()[
3
]);
auto
bin_i
=
binary_map
.
find
(
mlir_options
);
if
(
bin_i
==
binary_map
.
end
())
{
size_t
bin_size
=
0
;
using
mlir_handle
=
MIGRAPHX_MANAGE_PTR
(
MiirHandle
,
miirDestroyHandle
);
auto
handle
=
mlir_handle
(
miirCreateHandle
(
mlir_options
.
c_str
()));
if
(
miirLowerBin
(
handle
.
get
())
==
MIIR_SUCCESS
&&
miirBufferGet
(
handle
.
get
(),
nullptr
,
&
bin_size
)
==
MIIR_SUCCESS
)
{
migraphx
::
value
::
binary
bin
(
bin_size
);
if
(
miirBufferGet
(
handle
.
get
(),
reinterpret_cast
<
char
*>
(
bin
.
data
()),
&
bin_size
)
==
MIIR_SUCCESS
)
{
size_t
global_size
;
size_t
block_size
;
if
(
miirGetExecutionDims
(
handle
.
get
(),
&
global_size
,
&
block_size
)
==
MIIR_SUCCESS
)
{
result
=
std
::
make_shared
<
execution_spec
>
(
std
::
move
(
bin
),
global_size
,
block_size
);
}
}
}
binary_map
[
mlir_options
]
=
result
;
}
else
{
result
=
bin_i
->
second
;
}
#else // MIGRAPHX_MLIR_MIOPEN_SUPPORT
(
void
)
op_r
;
#endif // MIGRAPHX_MLIR_MIOPEN_SUPPORT
return
result
;
}
instruction_ref
get_literal
(
uint64_t
value
)
{
auto
fi
=
literal_map
.
find
(
value
);
if
(
fi
!=
literal_map
.
end
())
return
fi
->
second
;
auto
lit
=
mod
->
add_literal
(
value
);
literal_map
.
emplace
(
value
,
lit
);
return
lit
;
}
operation
make_code_object_op
(
instruction_ref
op_r
,
const
std
::
shared_ptr
<
execution_spec
>&
spec
)
{
// each pointer is expanded out to a MemRefDescriptor
auto
inp_t
=
op_r
->
inputs
().
at
(
0
)
->
get_shape
();
auto
flt_t
=
op_r
->
inputs
().
at
(
1
)
->
get_shape
();
auto
out_t
=
op_r
->
get_shape
();
auto
i64
=
shape
(
shape
::
uint64_type
);
std
::
vector
<
shape
>
expected_inputs
=
{
flt_t
,
flt_t
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
inp_t
,
inp_t
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
out_t
,
out_t
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
i64
,
out_t
};
return
migraphx
::
make_op
(
"gpu::code_object"
,
{
{
"code_object"
,
spec
->
binary
},
{
"symbol_name"
,
mlir_kernel_name
},
{
"global"
,
spec
->
global_size
},
{
"local"
,
spec
->
local_size
},
{
"expected_inputs"
,
migraphx
::
to_value
(
expected_inputs
)},
{
"output"
,
migraphx
::
to_value
(
out_t
)},
});
}
void
add_memref_descriptor
(
std
::
vector
<
instruction_ref
>&
refs
,
instruction_ref
inst
)
{
const
size_t
offset
=
0
;
auto
inst_t
=
inst
->
get_shape
();
refs
.
push_back
(
inst
);
refs
.
push_back
(
inst
);
refs
.
push_back
(
get_literal
(
offset
));
// offset
// dim sizes
std
::
transform
(
inst_t
.
lens
().
begin
(),
inst_t
.
lens
().
end
(),
std
::
back_inserter
(
refs
),
[
&
](
const
auto
&
lval
)
{
return
get_literal
(
lval
);
});
refs
.
push_back
(
get_literal
(
1
));
// G
// dim strides
std
::
transform
(
inst_t
.
strides
().
begin
(),
inst_t
.
strides
().
end
(),
std
::
back_inserter
(
refs
),
[
&
](
const
auto
&
lval
)
{
return
get_literal
(
lval
);
});
refs
.
push_back
(
get_literal
(
1
));
// G
}
instruction_ref
insert_allocation
(
instruction_ref
ins
,
const
shape
&
s
)
const
{
return
mod
->
insert_instruction
(
ins
,
hip_allocate
{
s
});
}
void
replace_conv_op
(
instruction_ref
ins
)
{
auto
conv_bin
=
make_mlir_binary
(
ins
);
if
(
conv_bin
)
{
auto
conv
=
make_code_object_op
(
ins
,
conv_bin
);
auto
inp
=
ins
->
inputs
().
at
(
0
);
auto
flt
=
ins
->
inputs
().
at
(
1
);
auto
out
=
insert_allocation
(
ins
,
ins
->
get_shape
());
std
::
vector
<
instruction_ref
>
refs
;
refs
.
reserve
(
3
*
13
+
1
);
add_memref_descriptor
(
refs
,
flt
);
add_memref_descriptor
(
refs
,
inp
);
add_memref_descriptor
(
refs
,
out
);
refs
.
push_back
(
out
);
mod
->
replace_instruction
(
ins
,
conv
,
refs
);
}
}
void
apply
()
{
init
();
for
(
auto
it
:
iterator_for
(
*
mod
))
{
if
(
it
->
name
()
==
"convolution"
)
{
replace_conv_op
(
it
);
}
}
}
};
void
mlir_conv
::
apply
(
module
&
m
)
const
{
mlir_apply
{
&
m
,
this
}.
apply
();
}
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
src/targets/gpu/quant_convolution.cpp
View file @
a0edd061
...
...
@@ -67,9 +67,9 @@ argument miopen_quant_convolution::compute(context& ctx,
return
args
[
3
];
}
shape
miopen_quant_convolution
::
compile
(
context
&
ctx
,
const
shape
&
output_shape
,
std
::
vector
<
shape
>
inputs
)
shape
miopen_quant_convolution
::
find
(
context
&
ctx
,
const
shape
&
output_shape
,
std
::
vector
<
shape
>
inputs
)
{
shape
workspace_shape
{};
auto
x_desc
=
make_tensor
(
inputs
[
0
],
int8_x4_format
);
...
...
@@ -92,18 +92,18 @@ shape miopen_quant_convolution::compile(context& ctx,
x_shape
=
pack_int8_shape
(
x_shape
);
w_shape
=
pack_int8_shape
(
w_shape
);
}
auto
arg_vec4_x
=
to_gpu
(
generate_argument
(
x_shape
));
auto
arg_vec4_w
=
to_gpu
(
generate_argument
(
w_shape
));
auto
y
=
allocate_gpu
(
output_shape
);
auto
workspace
=
allocate_gpu
(
workspace_shape
);
auto
x
=
to_gpu
(
generate_argument
(
x_shape
));
auto
w
=
to_gpu
(
generate_argument
(
w_shape
));
auto
y
=
allocate_gpu
(
output_shape
);
auto
workspace
=
allocate_gpu
(
workspace_shape
);
int
algo_count
=
1
;
miopenConvAlgoPerf_t
perf
;
auto
status
=
miopenFindConvolutionForwardAlgorithm
(
ctx
.
get_stream
().
get_miopen
(),
x_desc
.
get
(),
arg_vec4_
x
.
implicit
(),
x
.
implicit
(),
w_desc
.
get
(),
arg_vec4_
w
.
implicit
(),
w
.
implicit
(),
cd
.
get
(),
y_desc
.
get
(),
y
.
implicit
(),
...
...
@@ -114,11 +114,35 @@ shape miopen_quant_convolution::compile(context& ctx,
workspace_size
,
false
);
if
(
status
!=
miopenStatusSuccess
)
{
MIGRAPHX_THROW
(
"QUANT_CONVOLUTION: find convolution failed"
);
}
handle
=
ctx
.
get_stream
().
get_miopen
();
algo
=
perf
.
fwd_algo
;
MIGRAPHX_THROW
(
"MIOpen Quant Convolution: find convolution failed"
);
algo
=
perf
.
fwd_algo
;
size_t
solution_count
;
status
=
miopenConvolutionForwardGetSolutionCount
(
ctx
.
get_stream
().
get_miopen
(),
w_desc
.
get
(),
x_desc
.
get
(),
cd
.
get
(),
y_desc
.
get
(),
&
solution_count
);
if
(
status
!=
miopenStatusSuccess
)
MIGRAPHX_THROW
(
"MIOpen Quant Convolution: get solution count failed"
);
std
::
vector
<
miopenConvSolution_t
>
solutions
(
solution_count
);
status
=
miopenConvolutionForwardGetSolution
(
ctx
.
get_stream
().
get_miopen
(),
w_desc
.
get
(),
x_desc
.
get
(),
cd
.
get
(),
y_desc
.
get
(),
solution_count
,
&
solution_count
,
solutions
.
data
());
if
(
status
!=
miopenStatusSuccess
)
MIGRAPHX_THROW
(
"MIOpen Quant Convolution: get solution failed"
);
solution_id
=
solutions
.
front
().
solution_id
;
return
shape
{
shape
::
int8_type
,
{
perf
.
memory
}};
}
...
...
@@ -126,13 +150,29 @@ void miopen_quant_convolution::finalize(context& ctx,
const
shape
&
output_shape
,
std
::
vector
<
shape
>
inputs
)
{
if
(
handle
==
ctx
.
get_stream
().
get_miopen
())
return
;
// Check that workspace hasn't changed
auto
size
=
inputs
.
at
(
2
).
bytes
();
auto
ws
=
compile
(
ctx
,
output_shape
,
std
::
move
(
inputs
));
if
(
ws
.
bytes
()
>
size
)
MIGRAPHX_THROW
(
"Workspace has changed during finalization."
);
if
(
cd
==
nullptr
)
cd
=
make_conv
(
op
);
if
(
solution_id
==
0
)
{
// Check that workspace hasn't changed
auto
size
=
inputs
.
at
(
2
).
bytes
();
auto
ws
=
find
(
ctx
,
output_shape
,
inputs
);
if
(
ws
.
bytes
()
>
size
)
MIGRAPHX_THROW
(
"MIOpen Quant Convolution: workspace has changed during finalization."
);
}
auto
x_desc
=
make_tensor
(
inputs
[
0
],
int8_x4_format
);
auto
w_desc
=
make_tensor
(
inputs
[
1
],
int8_x4_format
);
auto
y_desc
=
make_tensor
(
output_shape
);
auto
status
=
miopenConvolutionForwardCompileSolution
(
ctx
.
get_stream
().
get_miopen
(),
w_desc
.
get
(),
x_desc
.
get
(),
cd
.
get
(),
y_desc
.
get
(),
solution_id
);
if
(
status
!=
miopenStatusSuccess
)
MIGRAPHX_THROW
(
"MIOpen Quant Convolution: compile solution failed"
);
}
shape
miopen_quant_convolution
::
pack_int8_shape
(
const
shape
&
s
)
const
...
...
src/targets/gpu/target.cpp
View file @
a0edd061
...
...
@@ -53,10 +53,10 @@
#include <migraphx/gpu/compile_ops.hpp>
#include <migraphx/gpu/concat_gpu_opt.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/fuse_mlir.hpp>
#include <migraphx/gpu/fuse_ops.hpp>
#include <migraphx/gpu/prefuse_ops.hpp>
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/gpu/mlir_conv.hpp>
#include <migraphx/gpu/pack_int8_args.hpp>
#include <migraphx/gpu/schedule_model.hpp>
#include <migraphx/gpu/sync_device.hpp>
...
...
@@ -128,7 +128,8 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti
dead_code_elimination
{},
enable_pass
(
not
enabled
(
MIGRAPHX_DISABLE_POINTWISE_FUSION
{}),
fuse_pointwise
{}),
dead_code_elimination
{},
mlir_conv
{
&
ctx
},
fuse_mlir
{
&
ctx
},
dead_code_elimination
{},
lowering
{
&
ctx
,
options
.
offload_copy
},
eliminate_contiguous
{
"gpu::contiguous"
},
dead_code_elimination
{},
...
...
test/eliminate_contiguous_test.cpp
View file @
a0edd061
...
...
@@ -205,4 +205,24 @@ TEST_CASE(contiguous_pointwise)
mm
->
begin
(),
mm
->
end
(),
[](
auto
&&
ins
)
{
return
ins
.
name
()
==
"contiguous"
;
}));
}
TEST_CASE
(
slice_contiguous
)
{
migraphx
::
module
m
;
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
4
,
2
}};
auto
x
=
m
.
add_parameter
(
"x"
,
s
);
auto
t
=
m
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
1
,
0
}}}),
x
);
auto
c
=
m
.
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
t
);
auto
s1
=
m
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
0
}},
{
"starts"
,
{
0
}},
{
"ends"
,
{
1
}}}),
c
);
auto
s2
=
m
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
0
}},
{
"starts"
,
{
1
}},
{
"ends"
,
{
2
}}}),
c
);
auto
c1
=
m
.
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
s1
);
auto
c2
=
m
.
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
s2
);
m
.
add_instruction
(
pass_standard_op
{},
c1
,
c2
);
run_pass
(
m
);
EXPECT
(
std
::
count_if
(
m
.
begin
(),
m
.
end
(),
[](
auto
&&
ins
)
{
return
ins
.
name
()
==
"contiguous"
;
})
==
1
);
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/get_target_assignments.cpp
0 → 100644
View file @
a0edd061
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "test.hpp"
#include <migraphx/make_op.hpp>
#include <migraphx/program.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/ref/target.hpp>
#include <migraphx/target_assignments.hpp>
migraphx
::
program
create_program
()
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
3
}};
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
y
=
mm
->
add_parameter
(
"y"
,
s
);
auto
z
=
mm
->
add_parameter
(
"z"
,
s
);
auto
diff
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"div"
),
x
,
y
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"div"
),
diff
,
z
);
return
p
;
}
TEST_CASE
(
is_supported
)
{
auto
p
=
create_program
();
auto
targets
=
migraphx
::
get_targets
();
EXPECT
(
!
targets
.
empty
());
auto
first_target
=
targets
[
0
];
auto
t
=
migraphx
::
make_target
(
first_target
);
const
auto
assignments
=
p
.
get_target_assignments
({
t
});
for
(
const
auto
&
[
ins
,
target
]
:
assignments
)
{
(
void
)
ins
;
EXPECT
(
target
==
first_target
);
}
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/gpu/mlir.cpp
0 → 100644
View file @
a0edd061
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/gpu/mlir.hpp>
#include <migraphx/gpu/target.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/write_literals.hpp>
#include <migraphx/ref/target.hpp>
#include <migraphx/module.hpp>
#include <migraphx/program.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/verify_args.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/functional.hpp>
#include <test.hpp>
using
migraphx
::
trim
;
// m test_gpu_mlir && ./bin/test_gpu_mlir
struct
mlir_gpu_target
:
migraphx
::
gpu
::
target
{
std
::
string
name
()
const
{
return
"mlir"
;
}
std
::
vector
<
migraphx
::
pass
>
get_passes
(
migraphx
::
context
&
gctx
,
const
migraphx
::
compile_options
&
)
const
{
auto
&
ctx
=
migraphx
::
any_cast
<
migraphx
::
gpu
::
context
>
(
gctx
);
return
{
migraphx
::
gpu
::
write_literals
{
&
ctx
}};
}
};
std
::
string
encode
(
const
std
::
string
&
s
)
{
std
::
stringstream
ss
;
bool
prespace
=
false
;
for
(
auto
c
:
s
)
{
if
(
std
::
isspace
(
c
)
!=
0
)
{
if
(
not
prespace
)
ss
<<
" "
;
prespace
=
true
;
}
else
if
(
std
::
isprint
(
c
)
!=
0
)
{
ss
<<
c
;
prespace
=
false
;
}
}
return
migraphx
::
trim
(
ss
.
str
());
}
migraphx
::
program
create_program_from_mlir
(
const
migraphx
::
module
&
mmlir
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
names
=
mmlir
.
get_parameter_names
();
std
::
vector
<
migraphx
::
instruction_ref
>
inputs
;
std
::
transform
(
names
.
begin
(),
names
.
end
(),
std
::
back_inserter
(
inputs
),
[
&
](
const
auto
&
name
)
{
return
mm
->
add_parameter
(
name
,
mmlir
.
get_parameter_shape
(
name
));
});
std
::
sort
(
inputs
.
begin
(),
inputs
.
end
(),
migraphx
::
by
(
std
::
less
<>
{},
[](
auto
ins
)
{
return
to_string
(
ins
->
get_operator
());
}));
inputs
.
push_back
(
mm
->
add_parameter
(
"output"
,
mmlir
.
get_output_shapes
().
front
()));
migraphx
::
gpu
::
context
ctx
;
migraphx
::
gpu
::
insert_mlir
(
*
mm
,
mm
->
end
(),
compile_mlir
(
ctx
,
mmlir
),
inputs
);
return
p
;
}
migraphx
::
parameter_map
generate_params
(
const
migraphx
::
program
&
p
)
{
migraphx
::
parameter_map
m
;
std
::
size_t
i
=
0
;
for
(
auto
&&
x
:
p
.
get_parameter_shapes
())
{
// m[x.first] = migraphx::fill_argument(x.second, 1);
m
[
x
.
first
]
=
migraphx
::
generate_argument
(
x
.
second
,
i
++
);
}
return
m
;
}
migraphx
::
argument
run_gpu
(
migraphx
::
program
p
,
const
migraphx
::
parameter_map
&
inputs
)
{
mlir_gpu_target
t
;
p
.
compile
(
t
);
migraphx
::
parameter_map
m
;
for
(
auto
&&
input
:
inputs
)
{
m
[
input
.
first
]
=
t
.
copy_to
(
input
.
second
);
}
for
(
auto
&&
x
:
p
.
get_parameter_shapes
())
{
if
(
m
.
count
(
x
.
first
)
==
0
)
{
m
[
x
.
first
]
=
t
.
allocate
(
x
.
second
);
}
}
return
t
.
copy_from
(
p
.
eval
(
m
).
front
());
}
migraphx
::
argument
run_ref
(
migraphx
::
program
p
,
const
migraphx
::
parameter_map
&
inputs
)
{
p
.
compile
(
migraphx
::
ref
::
target
{});
return
p
.
eval
(
inputs
).
front
();
}
bool
verify_mlir
(
const
migraphx
::
module
&
mmlir
)
{
migraphx
::
program
ref
;
ref
.
get_main_module
()
->
insert_instructions
(
ref
.
get_main_module
()
->
end
(),
&
mmlir
);
auto
inputs
=
generate_params
(
ref
);
auto
mlir
=
create_program_from_mlir
(
mmlir
);
return
migraphx
::
verify_args
(
"mlir"
,
run_ref
(
ref
,
inputs
),
run_gpu
(
mlir
,
inputs
));
}
TEST_CASE
(
conv
)
{
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
func @main(%arg0: tensor<2x8x3x3xf32>, %arg1: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {kernel = "mixr"} {
%0 = migraphx.convolution(%arg1, %arg0) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
return %0 : tensor<1x2x2x2xf32>
}
}
)__migraphx__"
;
migraphx
::
module
m
;
auto
x
=
m
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
8
,
4
,
4
}});
auto
w
=
m
.
add_parameter
(
"w"
,
{
migraphx
::
shape
::
float_type
,
{
2
,
8
,
3
,
3
}});
auto
conv
=
m
.
add_instruction
(
migraphx
::
make_op
(
"convolution"
),
x
,
w
);
m
.
add_return
({
conv
});
auto
s
=
migraphx
::
gpu
::
dump_mlir
(
m
);
// Skip test if MLIR is not enabled
if
(
s
.
empty
())
return
;
CHECK
(
encode
(
s
)
==
encode
(
mlir_output
));
EXPECT
(
verify_mlir
(
m
));
}
TEST_CASE
(
conv_add_relu
)
{
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
func @main(%arg0: tensor<1x2x2x2xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {kernel = "mixr"} {
%0 = migraphx.convolution(%arg2, %arg1) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
%1 = migraphx.add(%0, %arg0) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
%2 = migraphx.relu(%1) : (tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
return %2 : tensor<1x2x2x2xf32>
}
}
)__migraphx__"
;
migraphx
::
module
m
;
auto
x
=
m
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
8
,
4
,
4
}});
auto
w
=
m
.
add_parameter
(
"w"
,
{
migraphx
::
shape
::
float_type
,
{
2
,
8
,
3
,
3
}});
auto
b
=
m
.
add_parameter
(
"b"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
2
,
2
}});
auto
conv
=
m
.
add_instruction
(
migraphx
::
make_op
(
"convolution"
),
x
,
w
);
auto
add
=
m
.
add_instruction
(
migraphx
::
make_op
(
"add"
),
conv
,
b
);
auto
relu
=
m
.
add_instruction
(
migraphx
::
make_op
(
"relu"
),
add
);
m
.
add_return
({
relu
});
auto
s
=
migraphx
::
gpu
::
dump_mlir
(
m
);
// Skip test if MLIR is not enabled
if
(
s
.
empty
())
return
;
CHECK
(
encode
(
s
)
==
encode
(
mlir_output
));
EXPECT
(
verify_mlir
(
m
));
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/op_shape_test.cpp
View file @
a0edd061
...
...
@@ -981,7 +981,8 @@ TEST_CASE(multibroadcast)
}
{
std
::
vector
<
std
::
size_t
>
lens
{
4
,
1
,
3
};
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{}};
std
::
vector
<
std
::
size_t
>
empt
=
{};
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
empt
};
throws_shape
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
lens
}}),
input
);
}
{
...
...
@@ -1533,15 +1534,46 @@ TEST_CASE(test_squeeze_wrong_axis)
TEST_CASE
(
test_unsqueeze
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
1
,
3
}};
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
3
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
1
,
3
}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_step
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
12
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
2
,
6
}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}},
{
"steps"
,
{
2
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_step_non_divisable
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
3
}};
throws_shape
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}},
{
"steps"
,
{
2
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_step_zero
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
12
}};
throws_shape
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}},
{
"steps"
,
{
0
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_step_at_end
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
12
}};
throws_shape
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
3
}},
{
"steps"
,
{
2
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_mismatch_step_axis
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
12
}};
throws_shape
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}},
{
"steps"
,
{
2
,
3
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_negative_axis
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
1
,
3
}};
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
3
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
4
,
5
,
1
,
3
}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
-
2
}}}),
s1
);
}
...
...
@@ -1567,21 +1599,28 @@ TEST_CASE(test_unsqueeze_scalar_tensor2)
TEST_CASE
(
test_unsqueeze_transpose
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
4
,
3
},
{
12
,
1
,
4
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
4
,
4
,
1
,
3
},
{
12
,
1
,
1
,
4
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
4
,
4
,
1
,
3
},
{
12
,
1
,
1
2
,
4
}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_transpose_step
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
4
,
4
,
6
},
{
24
,
1
,
4
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
4
,
4
,
2
,
3
},
{
24
,
1
,
12
,
4
}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}},
{
"steps"
,
{
2
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_multibroadcast
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
},
{
0
,
1
,
0
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
1
,
4
},
{
0
,
1
,
1
,
0
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
1
,
4
},
{
0
,
1
,
0
,
0
}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_slice
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
},
{
108
,
36
,
1
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
1
,
4
},
{
108
,
36
,
36
,
1
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
1
,
4
},
{
108
,
36
,
4
,
1
}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
}}}),
s1
);
}
...
...
@@ -1613,6 +1652,27 @@ TEST_CASE(test_unsqueeze_multiple_axes_2)
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
0
,
1
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_multiple_axes_3
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
5
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
1
,
5
,
1
,
1
}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
,
4
,
5
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_multiple_axes_4
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
5
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
1
,
5
,
1
,
1
}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
5
,
4
,
2
}}}),
s1
);
}
TEST_CASE
(
test_unsqueeze_multiple_axes_step
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
10
}};
migraphx
::
shape
s2
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
2
,
5
,
1
,
1
}};
expect_shape
(
s2
,
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
2
,
4
,
5
}},
{
"steps"
,
{
2
}}}),
s1
);
}
TEST_CASE
(
transpose_shape
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
2
,
2
}};
...
...
test/shape_test.cpp
View file @
a0edd061
...
...
@@ -38,7 +38,6 @@ TEST_CASE(test_shape_default)
EXPECT
(
s
.
elements
()
==
0
);
EXPECT
(
s
.
bytes
()
==
0
);
}
TEST_CASE
(
test_shape_assign
)
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
100
,
32
,
8
,
8
}};
...
...
@@ -65,6 +64,118 @@ TEST_CASE(test_shape_standard)
EXPECT
(
not
s
.
broadcasted
());
}
TEST_CASE
(
test_shape_min_max_opt
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
2
,
3
},
{
6
,
3
,
1
}};
EXPECT
(
s
.
min_lens
()
==
s
.
lens
());
EXPECT
(
s
.
max_lens
()
==
s
.
lens
());
EXPECT
(
s
.
opt_lens
()
==
s
.
lens
());
}
TEST_CASE
(
test_shape_dynamic_fixed
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{{
2
,
2
,
0
},
{
2
,
2
,
0
},
{
3
,
3
,
0
}}};
EXPECT
(
not
s
.
standard
());
EXPECT
(
not
s
.
packed
());
EXPECT
(
not
s
.
transposed
());
EXPECT
(
not
s
.
broadcasted
());
EXPECT
(
s
.
dynamic
());
EXPECT
(
s
.
dyn_dims
().
size
()
==
3
);
EXPECT
(
s
.
dyn_dims
().
at
(
0
).
is_fixed
());
EXPECT
(
not
s
.
dyn_dims
().
at
(
0
).
has_optimal
());
EXPECT
(
s
.
min_lens
()
==
std
::
vector
<
std
::
size_t
>
{
2
,
2
,
3
});
EXPECT
(
s
.
max_lens
()
==
std
::
vector
<
std
::
size_t
>
{
2
,
2
,
3
});
EXPECT
(
s
.
opt_lens
()
==
std
::
vector
<
std
::
size_t
>
{
0
,
0
,
0
});
EXPECT
(
s
.
bytes
()
==
2
*
2
*
3
*
sizeof
(
float
));
}
TEST_CASE
(
test_shape_dynamic_not_fixed
)
{
using
migraphx
::
shape
;
std
::
vector
<
shape
::
dynamic_dimension
>
dims
=
{};
dims
.
push_back
(
shape
::
dynamic_dimension
{
2
,
5
,
2
});
dims
.
push_back
(
shape
::
dynamic_dimension
{
2
,
8
,
0
});
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
dims
};
EXPECT
(
not
s
.
standard
());
EXPECT
(
not
s
.
packed
());
EXPECT
(
not
s
.
transposed
());
EXPECT
(
not
s
.
broadcasted
());
EXPECT
(
s
.
dynamic
());
EXPECT
(
s
.
dyn_dims
().
size
()
==
2
);
EXPECT
(
not
s
.
dyn_dims
().
at
(
0
).
is_fixed
());
EXPECT
(
s
.
dyn_dims
().
at
(
0
).
has_optimal
());
EXPECT
(
s
.
min_lens
()
==
std
::
vector
<
std
::
size_t
>
{
2
,
2
});
EXPECT
(
s
.
max_lens
()
==
std
::
vector
<
std
::
size_t
>
{
5
,
8
});
EXPECT
(
s
.
opt_lens
()
==
std
::
vector
<
std
::
size_t
>
{
2
,
0
});
EXPECT
(
s
.
bytes
()
==
5
*
8
*
sizeof
(
float
));
}
TEST_CASE
(
test_shape_dynamic_compares
)
{
using
migraphx
::
shape
;
auto
a
=
shape
::
dynamic_dimension
{
2
,
5
,
2
};
auto
b
=
a
;
auto
c
=
shape
::
dynamic_dimension
{
2
,
5
,
2
};
auto
d
=
shape
::
dynamic_dimension
{
3
,
8
,
4
};
EXPECT
(
a
==
b
);
EXPECT
(
a
==
c
);
EXPECT
(
a
!=
d
);
migraphx
::
shape
s0
{
shape
::
float_type
,
{
a
,
d
}};
migraphx
::
shape
s1
=
s0
;
migraphx
::
shape
s2
{
shape
::
float_type
,
{
a
,
d
}};
migraphx
::
shape
s3
{
shape
::
int32_type
,
{
a
}};
EXPECT
(
s0
==
s1
);
EXPECT
(
s0
==
s2
);
EXPECT
(
s0
!=
s3
);
std
::
stringstream
ss0
;
std
::
stringstream
ss1
;
std
::
stringstream
ss3
;
ss0
<<
s0
;
ss1
<<
s1
;
ss3
<<
s3
;
EXPECT
(
ss0
.
str
()
==
ss1
.
str
());
EXPECT
(
ss0
.
str
()
!=
ss3
.
str
());
}
TEST_CASE
(
test_shape_dynamic_errors
)
{
using
migraphx
::
shape
;
std
::
vector
<
shape
::
dynamic_dimension
>
dims
=
{};
dims
.
push_back
(
shape
::
dynamic_dimension
{
2
,
5
,
2
});
dims
.
push_back
(
shape
::
dynamic_dimension
{
2
,
8
,
0
});
migraphx
::
shape
s
{
shape
::
float_type
,
dims
};
EXPECT
(
test
::
throws
([
&
]
{
s
.
elements
();
}));
EXPECT
(
test
::
throws
([
&
]
{
s
.
index
({
0
,
1
});
}));
EXPECT
(
test
::
throws
([
&
]
{
s
.
index
(
1
);
}));
EXPECT
(
test
::
throws
([
&
]
{
s
.
index
(
std
::
vector
<
std
::
size_t
>
{
0
,
1
});
}));
EXPECT
(
test
::
throws
([
&
]
{
s
.
with_lens
({
3
,
5
});
}));
EXPECT
(
test
::
throws
([
&
]
{
s
.
with_lens
(
shape
::
float_type
,
{
3
,
5
});
}));
}
TEST_CASE
(
test_shape_dynamic_serialize
)
{
using
migraphx
::
shape
;
std
::
vector
<
shape
::
dynamic_dimension
>
dims1
=
{};
dims1
.
push_back
(
shape
::
dynamic_dimension
{
2
,
5
,
2
});
dims1
.
push_back
(
shape
::
dynamic_dimension
{
2
,
8
,
0
});
migraphx
::
shape
s1
{
shape
::
float_type
,
dims1
};
auto
v1
=
migraphx
::
to_value
(
s1
);
std
::
vector
<
shape
::
dynamic_dimension
>
dims2
=
{};
dims2
.
push_back
(
shape
::
dynamic_dimension
{
2
,
5
,
2
});
migraphx
::
shape
s2
{
shape
::
uint64_type
,
dims2
};
auto
v2
=
migraphx
::
to_value
(
s2
);
EXPECT
(
v1
!=
v2
);
auto
s3
=
migraphx
::
from_value
<
shape
>
(
v1
);
EXPECT
(
s3
==
s1
);
auto
s4
=
migraphx
::
from_value
<
shape
>
(
v2
);
EXPECT
(
s4
==
s2
);
EXPECT
(
s3
!=
s4
);
}
TEST_CASE
(
test_shape_packed
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
2
},
{
2
,
1
}};
...
...
test/simplify_reshapes_test.cpp
View file @
a0edd061
...
...
@@ -1141,4 +1141,138 @@ TEST_CASE(transpose_contiguous_reshape_binary_broadcast)
EXPECT
(
m1
==
m2
);
}
TEST_CASE
(
transpose_unsqueeze_concat
)
{
migraphx
::
module
m1
;
{
auto
l0
=
m1
.
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
1
,
1
}});
auto
lt0
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
3
,
1
}}}),
l0
);
auto
l1
=
m1
.
add_parameter
(
"1"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
1
,
1
}});
auto
lt1
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
3
,
1
}}}),
l1
);
auto
l2
=
m1
.
add_parameter
(
"2"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
1
,
1
}});
auto
lt2
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
3
,
1
}}}),
l2
);
std
::
vector
<
migraphx
::
instruction_ref
>
args
{
lt0
,
lt1
,
lt2
};
std
::
vector
<
migraphx
::
instruction_ref
>
unsqueezed_args
;
int64_t
axis
=
3
;
std
::
transform
(
args
.
begin
(),
args
.
end
(),
std
::
back_inserter
(
unsqueezed_args
),
[
&
](
migraphx
::
instruction_ref
arg
)
{
return
m1
.
add_instruction
(
migraphx
::
make_op
(
"unsqueeze"
,
{{
"axes"
,
{
axis
}}}),
arg
);
});
m1
.
add_instruction
(
migraphx
::
make_op
(
"concat"
,
{{
"axis"
,
axis
}}),
unsqueezed_args
);
}
// TODO: This could be simplified to a single transpose after concat
migraphx
::
module
m2
=
m1
;
run_pass
(
m1
);
EXPECT
(
m1
==
m2
);
}
TEST_CASE
(
transpose_slice
)
{
migraphx
::
module
m1
;
{
auto
x
=
m1
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
384
,
36
,
64
}});
auto
slice1
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
2
}},
{
"starts"
,
{
0
}},
{
"ends"
,
{
12
}}}),
x
);
auto
transpose1
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
1
,
3
}}}),
slice1
);
auto
slice2
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
2
}},
{
"starts"
,
{
12
}},
{
"ends"
,
{
24
}}}),
x
);
auto
transpose2
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
1
,
3
}}}),
slice2
);
auto
slice3
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
2
}},
{
"starts"
,
{
24
}},
{
"ends"
,
{
36
}}}),
x
);
auto
transpose3
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
1
,
3
}}}),
slice3
);
m1
.
add_return
({
transpose1
,
transpose2
,
transpose3
});
}
run_pass
(
m1
);
migraphx
::
module
m2
;
{
auto
x
=
m2
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
384
,
36
,
64
}});
auto
transpose
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
1
,
3
}}}),
x
);
auto
slice1
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
1
}},
{
"starts"
,
{
0
}},
{
"ends"
,
{
12
}}}),
transpose
);
auto
slice2
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
1
}},
{
"starts"
,
{
12
}},
{
"ends"
,
{
24
}}}),
transpose
);
auto
slice3
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
1
}},
{
"starts"
,
{
24
}},
{
"ends"
,
{
36
}}}),
transpose
);
m2
.
add_return
({
slice1
,
slice2
,
slice3
});
}
EXPECT
(
m1
==
m2
);
}
TEST_CASE
(
transpose_slice_diff_perm
)
{
migraphx
::
module
m1
;
{
auto
x
=
m1
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
384
,
36
,
64
}});
auto
slice1
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
2
}},
{
"starts"
,
{
0
}},
{
"ends"
,
{
12
}}}),
x
);
auto
transpose1
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
1
,
3
}}}),
slice1
);
auto
slice2
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
2
}},
{
"starts"
,
{
12
}},
{
"ends"
,
{
24
}}}),
x
);
auto
transpose2
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
3
,
1
}}}),
slice2
);
auto
slice3
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
2
}},
{
"starts"
,
{
24
}},
{
"ends"
,
{
36
}}}),
x
);
auto
transpose3
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
1
,
3
}}}),
slice3
);
m1
.
add_return
({
transpose1
,
transpose2
,
transpose3
});
}
run_pass
(
m1
);
migraphx
::
module
m2
;
{
auto
x
=
m2
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
384
,
36
,
64
}});
auto
transpose
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
1
,
3
}}}),
x
);
auto
slice1
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
1
}},
{
"starts"
,
{
0
}},
{
"ends"
,
{
12
}}}),
transpose
);
auto
slice2
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
1
}},
{
"starts"
,
{
12
}},
{
"ends"
,
{
24
}}}),
transpose
);
auto
transpose2
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
1
,
3
,
2
}}}),
slice2
);
auto
slice3
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
1
}},
{
"starts"
,
{
24
}},
{
"ends"
,
{
36
}}}),
transpose
);
m2
.
add_return
({
slice1
,
transpose2
,
slice3
});
}
EXPECT
(
m1
==
m2
);
}
TEST_CASE
(
transpose_slice_single_transpose
)
{
migraphx
::
module
m1
;
{
auto
x
=
m1
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
1
,
384
,
36
,
64
}});
auto
slice1
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
2
}},
{
"starts"
,
{
0
}},
{
"ends"
,
{
12
}}}),
x
);
auto
sqrt1
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"sqrt"
),
slice1
);
auto
slice2
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
2
}},
{
"starts"
,
{
12
}},
{
"ends"
,
{
24
}}}),
x
);
auto
transpose
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
1
,
3
}}}),
slice2
);
auto
slice3
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"slice"
,
{{
"axes"
,
{
2
}},
{
"starts"
,
{
24
}},
{
"ends"
,
{
36
}}}),
x
);
auto
sqrt3
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"sqrt"
),
slice3
);
m1
.
add_return
({
sqrt1
,
transpose
,
sqrt3
});
}
migraphx
::
module
m2
=
m1
;
run_pass
(
m1
);
EXPECT
(
m1
==
m2
);
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/verify/run_verify.cpp
View file @
a0edd061
...
...
@@ -30,6 +30,7 @@
#include <migraphx/ranges.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/load_save.hpp>
#include <migraphx/tmp_dir.hpp>
#include <migraphx/verify_args.hpp>
#include <set>
...
...
@@ -57,6 +58,15 @@ std::future<typename std::result_of<Function()>::type> detach_async(Function&& f
return
std
::
async
(
std
::
launch
::
deferred
,
std
::
forward
<
Function
>
(
f
));
}
inline
void
verify_load_save
(
const
migraphx
::
program
&
p
)
{
migraphx
::
tmp_dir
td
{
"migraphx_test"
};
auto
path
=
td
.
path
/
"test.mxr"
;
migraphx
::
save
(
p
,
path
.
string
());
auto
loaded
=
migraphx
::
load
(
path
.
string
());
EXPECT
(
p
==
loaded
);
}
inline
void
compile_check
(
migraphx
::
program
&
p
,
const
migraphx
::
target
&
t
,
bool
show_trace
=
false
)
{
auto
name
=
t
.
name
();
...
...
@@ -82,6 +92,8 @@ inline void compile_check(migraphx::program& p, const migraphx::target& t, bool
throw
std
::
runtime_error
(
"Compiling program with "
+
name
+
" alters its shape"
);
}
}
if
(
t
.
name
()
!=
"ref"
)
verify_load_save
(
p
);
}
target_info
run_verify
::
get_target_info
(
const
std
::
string
&
name
)
const
...
...
@@ -152,6 +164,7 @@ void run_verify::verify(const std::string& name, const migraphx::program& p) con
auto_print
::
set_terminate_handler
(
name
);
if
(
migraphx
::
enabled
(
MIGRAPHX_DUMP_TEST
{}))
migraphx
::
save
(
p
,
name
+
".mxr"
);
verify_load_save
(
p
);
std
::
vector
<
std
::
string
>
target_names
;
for
(
const
auto
&
tname
:
migraphx
::
get_targets
())
{
...
...
test/verify/test_conv_add_relu.cpp
0 → 100644
View file @
a0edd061
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/instruction.hpp>
struct
test_conv_add_relu
:
verify_program
<
test_conv_add_relu
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
input
=
mm
->
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}});
auto
weights
=
mm
->
add_parameter
(
"w"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}});
auto
bias_literal
=
migraphx
::
literal
{
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
4
}},
{
2.0
f
,
2.0
f
,
2.0
f
,
2.0
f
}};
auto
bias
=
mm
->
add_literal
(
bias_literal
);
auto
conv
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"convolution"
),
input
,
weights
);
auto
bcast_bias
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"broadcast"
,
{{
"axis"
,
1
},
{
"out_lens"
,
conv
->
get_shape
().
lens
()}}),
bias
);
auto
bias_add
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
conv
,
bcast_bias
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"relu"
),
bias_add
);
return
p
;
}
};
tools/include/target.hpp
View file @
a0edd061
...
...
@@ -37,6 +37,8 @@
#include <migraphx/compile_options.hpp>
#include <migraphx/argument.hpp>
#include <migraphx/rank.hpp>
#include <migraphx/support_metric.hpp>
#include <migraphx/instruction_ref.hpp>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
...
...
@@ -61,6 +63,13 @@ struct target
* @return The context to be used during compilation and execution.
*/
context
get_context
()
const
;
/**
* @brief Check how well an instruction is supported on a target with the given metric
* @param ins Instruction to check if it's supported
* @param metric Used to define how the return value should be interpreted
* @return The value based on the chosen metric. Negative numbers mean unsupported
*/
float
is_supported
(
T
&
,
instruction_ref
ins
,
support_metric
m
)
const
;
/**
* @brief copy an argument to the current target.
*
...
...
@@ -105,11 +114,18 @@ argument copy_from_target(T&, const argument& arg)
return
arg
;
}
template
<
class
T
>
float
target_is_supported
(
T
&
,
instruction_ref
,
support_metric
)
{
return
0
;
}
<%
interface
(
'
target
'
,
virtual
(
'
name
'
,
returns
=
'
std
::
string
'
,
const
=
True
),
virtual
(
'
get_passes
'
,
ctx
=
'
context
&
'
,
options
=
'
const
compile_options
&
'
,
returns
=
'
std
::
vector
<
pass
>
'
,
const
=
True
),
virtual
(
'
get_context
'
,
returns
=
'
context
'
,
const
=
True
),
virtual
(
'
is_supported
'
,
returns
=
'
float
'
,
ins
=
'
instruction_ref
'
,
m
=
'
support_metric
'
,
const
=
True
,
default
=
'
target_is_supported
'
),
virtual
(
'
copy_to
'
,
returns
=
'
argument
'
,
input
=
'
const
argument
&
'
,
...
...
tools/te.py
View file @
a0edd061
...
...
@@ -23,7 +23,7 @@
#####################################################################################
import
string
,
sys
,
re
trivial
=
[
'std::size_t'
,
'instruction_ref'
]
trivial
=
[
'std::size_t'
,
'instruction_ref'
,
'support_metric'
]
headers
=
'''
#include <algorithm>
...
...
Prev
1
2
3
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment