Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
ee80cee9
Unverified
Commit
ee80cee9
authored
Nov 07, 2018
by
Paul Fultz II
Committed by
GitHub
Nov 07, 2018
Browse files
Merge branch 'master' into gpu_slice_test
parents
6d06226d
f958d56f
Changes
159
Hide whitespace changes
Inline
Side-by-side
Showing
19 changed files
with
1156 additions
and
118 deletions
+1156
-118
src/targets/gpu/relu.cpp
src/targets/gpu/relu.cpp
+2
-1
src/targets/gpu/rocblas.cpp
src/targets/gpu/rocblas.cpp
+2
-1
src/targets/gpu/softmax.cpp
src/targets/gpu/softmax.cpp
+2
-1
src/targets/gpu/target.cpp
src/targets/gpu/target.cpp
+6
-0
src/targets/gpu/write_literals.cpp
src/targets/gpu/write_literals.cpp
+3
-1
test/cpu_ops_test.cpp
test/cpu_ops_test.cpp
+84
-42
test/eliminate_concat_test.cpp
test/eliminate_concat_test.cpp
+174
-0
test/eval_test.cpp
test/eval_test.cpp
+16
-0
test/fwd_conv_batchnorm_rewrite_test.cpp
test/fwd_conv_batchnorm_rewrite_test.cpp
+3
-3
test/gpu/miopen.cpp
test/gpu/miopen.cpp
+109
-9
test/include/basic_ops.hpp
test/include/basic_ops.hpp
+2
-0
test/include/test.hpp
test/include/test.hpp
+1
-1
test/memory_coloring_test.cpp
test/memory_coloring_test.cpp
+574
-55
test/onnx/globalavgpool_test.onnx
test/onnx/globalavgpool_test.onnx
+15
-0
test/onnx/globalmaxpool_test.onnx
test/onnx/globalmaxpool_test.onnx
+15
-0
test/onnx/onnx_test.cpp
test/onnx/onnx_test.cpp
+34
-4
test/output_alias.cpp
test/output_alias.cpp
+42
-0
tools/include/concat_opt.hpp
tools/include/concat_opt.hpp
+45
-0
tools/include/operation.hpp
tools/include/operation.hpp
+27
-0
No files found.
src/targets/gpu/relu.cpp
View file @
ee80cee9
...
...
@@ -5,6 +5,7 @@
#include <utility>
namespace
migraph
{
inline
namespace
MIGRAPH_INLINE_NS
{
namespace
gpu
{
shape
miopen_relu
::
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
...
...
@@ -33,5 +34,5 @@ argument miopen_relu::compute(context& ctx,
}
}
// namespace gpu
}
// namespace MIGRAPH_INLINE_NS
}
// namespace migraph
src/targets/gpu/rocblas.cpp
View file @
ee80cee9
#include <migraph/gpu/rocblas.hpp>
namespace
migraph
{
inline
namespace
MIGRAPH_INLINE_NS
{
namespace
gpu
{
rocblas_handle_ptr
create_rocblas_handle_ptr
()
...
...
@@ -18,5 +19,5 @@ rocblas_handle_ptr create_rocblas_handle_ptr(hipStream_t s)
}
}
// namespace gpu
}
// namespace MIGRAPH_INLINE_NS
}
// namespace migraph
src/targets/gpu/softmax.cpp
View file @
ee80cee9
...
...
@@ -5,6 +5,7 @@
#include <utility>
namespace
migraph
{
inline
namespace
MIGRAPH_INLINE_NS
{
namespace
gpu
{
shape
miopen_softmax
::
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
...
...
@@ -32,5 +33,5 @@ argument miopen_softmax::compute(context& ctx,
}
}
// namespace gpu
}
// namespace MIGRAPH_INLINE_NS
}
// namespace migraph
src/targets/gpu/target.cpp
View file @
ee80cee9
...
...
@@ -15,8 +15,11 @@
#include <migraph/eliminate_contiguous.hpp>
#include <migraph/common_subexpression_elimination.hpp>
#include <migraph/fwd_conv_batchnorm_rewrite.hpp>
#include <migraph/eliminate_concat.hpp>
#include <migraph/gpu/concat_gpu_opt.hpp>
namespace
migraph
{
inline
namespace
MIGRAPH_INLINE_NS
{
namespace
gpu
{
std
::
vector
<
pass
>
target
::
get_passes
(
migraph
::
context
&
gctx
)
const
...
...
@@ -38,6 +41,8 @@ std::vector<pass> target::get_passes(migraph::context& gctx) const
simplify_reshapes
{},
dead_code_elimination
{},
lowering
{
ctx
},
eliminate_concat
{
concat_gpu_optimization
{}},
dead_code_elimination
{},
eliminate_contiguous
{},
dead_code_elimination
{},
fuse_ops
{
&
ctx
},
...
...
@@ -56,4 +61,5 @@ std::string target::name() const { return "miopen"; }
migraph
::
context
target
::
get_context
()
const
{
return
context
{};
}
}
// namespace gpu
}
// namespace MIGRAPH_INLINE_NS
}
// namespace migraph
src/targets/gpu/write_literals.cpp
View file @
ee80cee9
...
...
@@ -5,7 +5,7 @@
#include <migraph/env.hpp>
namespace
migraph
{
inline
namespace
MIGRAPH_INLINE_NS
{
namespace
gpu
{
MIGRAPH_DECLARE_ENV_VAR
(
MIGRAPH_COPY_LITERALS
)
...
...
@@ -51,5 +51,7 @@ void write_literals::apply(program& p) const
}
}
}
}
// namespace gpu
}
// namespace MIGRAPH_INLINE_NS
}
// namespace migraph
test/cpu_ops_test.cpp
View file @
ee80cee9
...
...
@@ -3,7 +3,7 @@
#include <migraph/literal.hpp>
#include <migraph/operators.hpp>
#include <migraph/instruction.hpp>
#include <migraph/cpu/
cpu_
target.hpp>
#include <migraph/cpu/target.hpp>
#include <migraph/verify.hpp>
#include "test.hpp"
...
...
@@ -18,7 +18,7 @@ void slice_test()
p
.
add_instruction
(
migraph
::
op
::
slice
{{
2
},
{
1
},
{
3
}},
l0
);
migraph
::
shape
s2
{
migraph
::
shape
::
int32_type
,
{
2
,
2
,
2
},
{
6
,
3
,
1
}};
EXPECT
(
p
.
get_shape
()
==
s2
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
migraph
::
shape
sresult
{
migraph
::
shape
::
int32_type
,
{
2
,
2
,
2
},
{
4
,
2
,
1
}};
auto
result
=
p
.
eval
({});
std
::
vector
<
int
>
gold
=
{
1
,
2
,
4
,
5
,
7
,
8
,
10
,
11
};
...
...
@@ -36,7 +36,7 @@ void slice_test()
p
.
add_instruction
(
migraph
::
op
::
slice
{{
0
,
1
,
2
},
{
0
,
0
,
0
},
{
2
,
2
,
2
}},
l0
);
migraph
::
shape
s2
{
migraph
::
shape
::
int32_type
,
{
2
,
2
,
2
},
{
6
,
3
,
1
}};
EXPECT
(
p
.
get_shape
()
==
s2
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
migraph
::
shape
sresult
{
migraph
::
shape
::
int32_type
,
{
2
,
2
,
2
},
{
4
,
2
,
1
}};
auto
result
=
p
.
eval
({});
std
::
vector
<
int
>
gold
=
{
0
,
1
,
3
,
4
,
6
,
7
,
9
,
10
};
...
...
@@ -62,7 +62,7 @@ void concat_test()
auto
l1
=
p
.
add_literal
(
migraph
::
literal
{
s1
,
data1
});
auto
l2
=
p
.
add_literal
(
migraph
::
literal
{
s2
,
data2
});
p
.
add_instruction
(
migraph
::
op
::
concat
{
axis
},
l0
,
l1
,
l2
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
int
>
gold
=
{
0
,
1
,
2
,
3
,
4
,
10
,
5
,
6
,
7
,
8
,
9
,
20
};
std
::
vector
<
int
>
results_vector
(
2
*
6
);
...
...
@@ -85,7 +85,7 @@ void concat_test()
auto
l1
=
p
.
add_literal
(
migraph
::
literal
{
s1
,
data1
});
auto
l2
=
p
.
add_literal
(
migraph
::
literal
{
s2
,
data2
});
p
.
add_instruction
(
migraph
::
op
::
concat
{
axis
},
l0
,
l1
,
l2
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
int
>
gold
=
{
0
,
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
10
,
11
};
std
::
vector
<
int
>
results_vector
(
6
*
2
);
...
...
@@ -106,7 +106,7 @@ void squeeze_test()
migraph
::
shape
s2
{
migraph
::
shape
::
float_type
,
{
4
,
3
,
1
,
3
}};
auto
l0
=
p
.
add_literal
(
migraph
::
literal
{
s1
,
data
});
p
.
add_instruction
(
migraph
::
op
::
squeeze
{{
1
}},
l0
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
EXPECT
(
result
.
get_shape
()
==
s2
);
}
...
...
@@ -117,7 +117,7 @@ void squeeze_test()
migraph
::
shape
s2
{
migraph
::
shape
::
float_type
,
{
4
,
1
,
3
,
3
}};
auto
l0
=
p
.
add_literal
(
migraph
::
literal
{
s1
,
data
});
p
.
add_instruction
(
migraph
::
op
::
squeeze
{{
3
}},
l0
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
EXPECT
(
result
.
get_shape
()
==
s2
);
}
...
...
@@ -128,7 +128,7 @@ void squeeze_test()
migraph
::
shape
s2
{
migraph
::
shape
::
float_type
,
{
4
,
3
,
3
}};
auto
l0
=
p
.
add_literal
(
migraph
::
literal
{
s1
,
data
});
p
.
add_instruction
(
migraph
::
op
::
squeeze
{},
l0
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
EXPECT
(
result
.
get_shape
()
==
s2
);
}
...
...
@@ -143,7 +143,7 @@ void unsqueeze_test()
migraph
::
shape
s2
{
migraph
::
shape
::
float_type
,
{
4
,
1
,
3
,
3
}};
auto
l0
=
p
.
add_literal
(
migraph
::
literal
{
s1
,
data
});
p
.
add_instruction
(
migraph
::
op
::
unsqueeze
{{
1
}},
l0
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
EXPECT
(
result
.
get_shape
()
==
s2
);
}
...
...
@@ -154,12 +154,52 @@ void unsqueeze_test()
migraph
::
shape
s2
{
migraph
::
shape
::
float_type
,
{
4
,
3
,
1
,
3
}};
auto
l0
=
p
.
add_literal
(
migraph
::
literal
{
s1
,
data
});
p
.
add_instruction
(
migraph
::
op
::
unsqueeze
{{
2
}},
l0
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
EXPECT
(
result
.
get_shape
()
==
s2
);
}
}
void
globalavgpool_test
()
{
migraph
::
program
p
;
auto
s
=
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
1
,
3
,
2
,
2
}};
auto
op
=
migraph
::
op
::
pooling
{
"average"
};
auto
lens
=
s
.
lens
();
op
.
lengths
=
{
lens
[
2
],
lens
[
3
]};
std
::
vector
<
float
>
data
{
0.3
,
0.2
,
0.4
,
0.1
,
0.8
,
0.5
,
0.9
,
0.1
,
0.1
,
0.7
,
0.1
,
0.6
};
auto
l0
=
p
.
add_literal
(
migraph
::
literal
{
s
,
data
});
p
.
add_instruction
(
op
,
l0
);
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
{
0.25
,
0.575
,
0.375
};
EXPECT
(
migraph
::
verify_range
(
results_vector
,
gold
));
}
void
globalmaxpool_test
()
{
migraph
::
program
p
;
auto
s
=
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
1
,
3
,
2
,
2
}};
auto
op
=
migraph
::
op
::
pooling
{
"max"
};
auto
lens
=
s
.
lens
();
op
.
lengths
=
{
lens
[
2
],
lens
[
3
]};
std
::
vector
<
float
>
data
{
0.3
,
0.2
,
0.4
,
0.1
,
0.8
,
0.5
,
0.9
,
0.1
,
0.1
,
0.7
,
0.1
,
0.6
};
auto
l0
=
p
.
add_literal
(
migraph
::
literal
{
s
,
data
});
p
.
add_instruction
(
op
,
l0
);
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
{
0.4
,
0.9
,
0.7
};
EXPECT
(
migraph
::
verify_range
(
results_vector
,
gold
));
}
void
im2col_3x3_no_pad_identity_test
()
{
std
::
size_t
f
[
2
]
=
{
3
,
3
};
...
...
@@ -179,7 +219,7 @@ void im2col_3x3_no_pad_identity_test()
auto
l_image
=
p
.
add_literal
(
migraph
::
literal
{
s_image
,
input
});
auto
l_weights
=
p
.
add_literal
(
migraph
::
literal
{
s_weights
,
weights
});
p
.
add_instruction
(
migraph
::
op
::
im2col
{
padding
,
stride
,
dilation
},
l_image
,
l_weights
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
size_t
col_height
=
(
size
[
0
]
-
f
[
0
]
+
2
*
padding
[
0
])
/
stride
[
0
]
+
1
;
...
...
@@ -208,7 +248,7 @@ void im2col_3x3_no_pad_test()
auto
l_image
=
p
.
add_literal
(
migraph
::
literal
{
s_image
,
input
});
auto
l_weights
=
p
.
add_literal
(
migraph
::
literal
{
s_weights
,
weights
});
p
.
add_instruction
(
migraph
::
op
::
im2col
{
padding
,
stride
,
dilation
},
l_image
,
l_weights
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
int
>
correct
=
{
0
,
1
,
2
,
4
,
5
,
6
,
8
,
9
,
10
,
1
,
2
,
3
,
5
,
6
,
7
,
9
,
10
,
11
,
...
...
@@ -240,7 +280,7 @@ void im2col_3x3_stride_2_no_pad_test()
auto
l_image
=
p
.
add_literal
(
migraph
::
literal
{
s_image
,
input
});
auto
l_weights
=
p
.
add_literal
(
migraph
::
literal
{
s_weights
,
weights
});
p
.
add_instruction
(
migraph
::
op
::
im2col
{
padding
,
stride
,
dilation
},
l_image
,
l_weights
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
int
>
correct
=
{
0
,
1
,
2
,
6
,
7
,
8
,
12
,
13
,
14
,
2
,
3
,
4
,
...
...
@@ -273,7 +313,7 @@ void im2col_3x3_with_padding_test()
auto
l_image
=
p
.
add_literal
(
migraph
::
literal
{
s_image
,
input
});
auto
l_weights
=
p
.
add_literal
(
migraph
::
literal
{
s_weights
,
weights
});
p
.
add_instruction
(
migraph
::
op
::
im2col
{
padding
,
stride
,
dilation
},
l_image
,
l_weights
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
int
>
correct
=
{
0
,
0
,
0
,
0
,
0
,
1
,
0
,
2
,
3
,
0
,
0
,
0
,
0
,
1
,
0
,
2
,
3
,
0
,
...
...
@@ -315,7 +355,7 @@ void batch_norm_inference_test()
auto
variance
=
p
.
add_literal
(
migraph
::
literal
{
vars
,
variance_data
});
p
.
add_instruction
(
migraph
::
op
::
batch_norm_inference
{},
x
,
scale
,
bias
,
mean
,
variance
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
result_vector
(
width
*
height
*
channels
*
batches
);
...
...
@@ -345,7 +385,7 @@ void im2col_3x3_with_channels_identity_test()
auto
l_image
=
p
.
add_literal
(
migraph
::
literal
{
s_image
,
input
});
auto
l_weights
=
p
.
add_literal
(
migraph
::
literal
{
s_weights
,
weights
});
p
.
add_instruction
(
migraph
::
op
::
im2col
{
padding
,
stride
,
dilation
},
l_image
,
l_weights
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
size_t
col_height
=
(
size
[
0
]
-
f
[
0
]
+
2
*
padding
[
0
])
/
stride
[
0
]
+
1
;
...
...
@@ -361,7 +401,7 @@ void exp_test()
migraph
::
shape
s
{
migraph
::
shape
::
float_type
,
{
3
}};
auto
l
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
-
1
,
0
,
1
}});
p
.
add_instruction
(
migraph
::
op
::
exp
{},
l
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -375,7 +415,7 @@ void sin_test()
migraph
::
shape
s
{
migraph
::
shape
::
float_type
,
{
3
}};
auto
l
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
-
1
,
0
,
1
}});
p
.
add_instruction
(
migraph
::
op
::
sin
{},
l
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -389,7 +429,7 @@ void cos_test()
migraph
::
shape
s
{
migraph
::
shape
::
float_type
,
{
3
}};
auto
l
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
-
1
,
0
,
1
}});
p
.
add_instruction
(
migraph
::
op
::
cos
{},
l
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -403,7 +443,7 @@ void tan_test()
migraph
::
shape
s
{
migraph
::
shape
::
float_type
,
{
3
}};
auto
l
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
-
1
,
0
,
1
}});
p
.
add_instruction
(
migraph
::
op
::
tan
{},
l
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -418,7 +458,7 @@ void add_test()
auto
l1
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
-
1
,
0
,
1
}});
auto
l2
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
1
,
2
,
3
}});
p
.
add_instruction
(
migraph
::
op
::
add
{},
l1
,
l2
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -437,7 +477,7 @@ void broadcast_test()
auto
l1
=
p
.
add_literal
(
migraph
::
literal
{
a_shape
,
a_data
});
auto
l2
=
p
.
add_literal
(
migraph
::
literal
{
b_shape
,
b_data
});
p
.
add_instruction
(
migraph
::
op
::
broadcast
{
axis
,
l1
->
get_shape
()},
l2
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
auto
output
=
result
.
get
<
int32_t
>
();
EXPECT
(
output
(
0
,
0
)
==
-
2
);
...
...
@@ -457,7 +497,7 @@ void add_broadcast_test()
auto
l2
=
p
.
add_literal
(
migraph
::
literal
{
b_shape
,
b_data
});
auto
l3
=
p
.
add_instruction
(
migraph
::
op
::
broadcast
{
axis
,
l1
->
get_shape
()},
l2
);
p
.
add_instruction
(
migraph
::
op
::
add
{},
l1
,
l3
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
EXPECT
(
result
.
get_shape
().
packed
());
std
::
vector
<
float
>
results_vector
(
12
);
...
...
@@ -473,7 +513,7 @@ void sub_test()
auto
l1
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
-
1
,
0
,
1
}});
auto
l2
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
1
,
2
,
3
}});
p
.
add_instruction
(
migraph
::
op
::
sub
{},
l1
,
l2
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -488,7 +528,7 @@ void mul_test()
auto
l1
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
-
1
,
0
,
1
}});
auto
l2
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
1
,
2
,
3
}});
p
.
add_instruction
(
migraph
::
op
::
mul
{},
l1
,
l2
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -503,7 +543,7 @@ void div_test()
auto
l1
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
-
1.0
f
,
0.5
f
,
1.0
f
}});
auto
l2
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
1.0
f
,
2.0
f
,
4.0
f
}});
p
.
add_instruction
(
migraph
::
op
::
div
{},
l1
,
l2
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -516,8 +556,8 @@ void relu_test()
migraph
::
program
p
;
migraph
::
shape
s
{
migraph
::
shape
::
float_type
,
{
3
}};
auto
l
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
-
1.
f
,
0.
f
,
1.
f
}});
p
.
add_instruction
(
migraph
::
op
::
activation
{
"
relu
"
},
l
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
add_instruction
(
migraph
::
op
::
relu
{
},
l
);
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -531,7 +571,7 @@ void leaky_relu_test()
migraph
::
shape
s
{
migraph
::
shape
::
float_type
,
{
3
}};
auto
l
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
-
1.
f
,
0.
f
,
1.
f
}});
p
.
add_instruction
(
migraph
::
op
::
leaky_relu
{
0.01
},
l
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -565,7 +605,7 @@ void imagescaler_test()
migraph
::
literal
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
3
}},
{
0.01
,
0.02
,
0.03
}});
auto
bias_bcast
=
p
.
add_instruction
(
migraph
::
op
::
broadcast
{
1
,
s
},
bias_vals
);
p
.
add_instruction
(
migraph
::
op
::
add
{},
img_scaled
,
bias_bcast
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
12
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -596,7 +636,7 @@ void reshape_test()
auto
l
=
p
.
add_literal
(
migraph
::
literal
{
a_shape
,
data
});
std
::
vector
<
int64_t
>
new_shape
=
{
8
,
3
,
1
,
1
};
p
.
add_instruction
(
migraph
::
op
::
reshape
{
new_shape
},
l
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -607,7 +647,7 @@ void reshape_test()
auto
l
=
p
.
add_literal
(
migraph
::
literal
{
a_shape
,
data
});
std
::
vector
<
int64_t
>
new_shape
=
{
1
,
3
,
4
,
2
};
p
.
add_instruction
(
migraph
::
op
::
reshape
{
new_shape
},
l
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -618,7 +658,7 @@ void reshape_test()
auto
l
=
p
.
add_literal
(
migraph
::
literal
{
a_shape
,
data
});
std
::
vector
<
int64_t
>
new_shape
=
{
1
,
3
,
4
,
2
};
p
.
add_instruction
(
migraph
::
op
::
reshape
{
new_shape
},
l
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -666,7 +706,7 @@ void gemm_test()
migraph
::
shape
b_shape
{
migraph
::
shape
::
get_type
<
T
>
{},
{
5
,
3
}};
auto
bl
=
p
.
add_literal
(
migraph
::
literal
{
b_shape
,
b
});
p
.
add_instruction
(
migraph
::
op
::
dot
{},
al
,
bl
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
T
>
results_vector
(
12
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -721,7 +761,7 @@ void maxpool_test()
migraph
::
shape
a_shape
{
migraph
::
shape
::
float_type
,
{
2
,
3
,
6
,
6
}};
auto
al
=
p
.
add_literal
(
migraph
::
literal
{
a_shape
,
a
});
p
.
add_instruction
(
migraph
::
op
::
pooling
{
"max"
,
{{
0
,
0
}},
{{
2
,
2
}},
{{
3
,
2
}}},
al
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
cout
<<
result
.
get_shape
()
<<
std
::
endl
;
std
::
vector
<
float
>
results_vector
(
36
);
...
...
@@ -786,7 +826,7 @@ void softmax_test()
migraph
::
shape
a_shape
{
migraph
::
shape
::
float_type
,
{
5
,
3
,
4
,
2
}};
auto
al
=
p
.
add_literal
(
migraph
::
literal
{
a_shape
,
a
});
p
.
add_instruction
(
migraph
::
op
::
softmax
{},
al
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
120
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -848,7 +888,7 @@ void conv2d_test()
auto
cl
=
p
.
add_literal
(
migraph
::
literal
{
c_shape
,
c
});
p
.
add_instruction
(
migraph
::
op
::
convolution
{},
al
,
cl
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
16
);
...
...
@@ -904,7 +944,7 @@ void conv2d_padding_test()
auto
cl
=
p
.
add_literal
(
migraph
::
literal
{
c_shape
,
c
});
p
.
add_instruction
(
migraph
::
op
::
convolution
{{{
1
,
1
}},
{{
1
,
1
}}},
al
,
cl
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
64
);
...
...
@@ -965,7 +1005,7 @@ void conv2d_padding_stride_test()
auto
cl
=
p
.
add_literal
(
migraph
::
literal
{
c_shape
,
c
});
p
.
add_instruction
(
migraph
::
op
::
convolution
{{{
1
,
1
}},
{{
2
,
2
}}},
al
,
cl
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
16
);
...
...
@@ -984,7 +1024,7 @@ void transpose_test()
auto
l
=
p
.
add_literal
(
migraph
::
literal
{
a_shape
,
data
});
std
::
vector
<
int64_t
>
perm
=
{
0
,
3
,
1
,
2
};
p
.
add_instruction
(
migraph
::
op
::
transpose
{
perm
},
l
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
result
.
visit
([
&
](
auto
output
)
{
...
...
@@ -998,7 +1038,7 @@ void transpose_test()
std
::
vector
<
int64_t
>
perm
=
{
0
,
3
,
1
,
2
};
auto
result
=
p
.
add_instruction
(
migraph
::
op
::
transpose
{
perm
},
l
);
p
.
add_instruction
(
migraph
::
op
::
contiguous
{},
result
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result2
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
12
);
...
...
@@ -1017,7 +1057,7 @@ void contiguous_test()
migraph
::
program
p
;
auto
l
=
p
.
add_literal
(
migraph
::
literal
{
a_shape
,
data
});
p
.
add_instruction
(
migraph
::
op
::
contiguous
{},
l
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
12
);
...
...
@@ -1058,6 +1098,8 @@ int main()
conv2d_padding_test
();
conv2d_padding_stride_test
();
batch_norm_inference_test
();
globalavgpool_test
();
globalmaxpool_test
();
im2col_3x3_no_pad_identity_test
();
im2col_3x3_no_pad_test
();
im2col_3x3_stride_2_no_pad_test
();
...
...
test/eliminate_concat_test.cpp
0 → 100644
View file @
ee80cee9
#include <migraph/eliminate_concat.hpp>
#include <migraph/dead_code_elimination.hpp>
#include <migraph/operators.hpp>
#include <basic_ops.hpp>
#include <test.hpp>
struct
concat
{
concat
(
std
::
size_t
axis
)
{
op
.
axis
=
axis
;
}
migraph
::
op
::
concat
op
;
std
::
string
name
()
const
{
return
"eliminate_concat::concat"
;
}
migraph
::
shape
compute_shape
(
std
::
vector
<
migraph
::
shape
>
inputs
)
const
{
return
op
.
compute_shape
(
std
::
move
(
inputs
));
}
migraph
::
argument
compute
(
migraph
::
context
&
,
const
migraph
::
shape
&
output_shape
,
const
std
::
vector
<
migraph
::
argument
>&
)
const
{
return
{
output_shape
};
}
};
struct
concat_test_optimization
{
/// A unique name used to identify the concat optimization
std
::
string
name
()
const
{
return
"eliminate_concat::concat"
;
}
/// A unique name used to identify the allocate operator
std
::
string
allocate
()
const
{
return
"allocate"
;
}
/// Return the lowered concat operator
migraph
::
op
::
concat
get_concat
(
const
migraph
::
operation
&
op
)
const
{
return
migraph
::
any_cast
<
concat
>
(
op
).
op
;
}
};
struct
eliminate_concat_target
{
std
::
size_t
align
=
32
;
std
::
string
name
()
const
{
return
"eliminate_target"
;
}
std
::
vector
<
migraph
::
pass
>
get_passes
(
migraph
::
context
&
)
const
{
return
{
migraph
::
eliminate_concat
{
concat_test_optimization
{}},
migraph
::
dead_code_elimination
{}};
}
migraph
::
context
get_context
()
const
{
return
{};
}
};
struct
allocate
{
migraph
::
shape
s
{};
std
::
string
name
()
const
{
return
"allocate"
;
}
migraph
::
shape
compute_shape
(
const
std
::
vector
<
migraph
::
shape
>&
inputs
)
const
{
migraph
::
check_shapes
{
inputs
}.
has
(
0
);
return
s
;
}
migraph
::
argument
compute
(
migraph
::
context
&
,
const
migraph
::
shape
&
output_shape
,
const
std
::
vector
<
migraph
::
argument
>&
)
const
{
return
{
output_shape
};
}
};
struct
fred_op
{
std
::
string
name
()
const
{
return
"fred_op"
;
}
migraph
::
shape
compute_shape
(
const
std
::
vector
<
migraph
::
shape
>&
inputs
)
const
{
migraph
::
check_shapes
{
inputs
}.
has
(
1
);
return
inputs
.
at
(
0
);
}
migraph
::
argument
compute
(
migraph
::
context
&
,
const
migraph
::
shape
&
,
const
std
::
vector
<
migraph
::
argument
>&
args
)
const
{
return
args
.
at
(
0
);
}
};
void
basic
()
{
auto
create_test_program
=
[]()
{
migraph
::
program
p
;
auto
a1
=
p
.
add_instruction
(
allocate
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
1
,
2
,
8
,
8
}}});
auto
p1
=
p
.
add_instruction
(
fred_op
{},
a1
);
auto
a2
=
p
.
add_instruction
(
allocate
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
1
,
3
,
8
,
8
}}});
auto
p2
=
p
.
add_instruction
(
fred_op
{},
a2
);
auto
a3
=
p
.
add_instruction
(
allocate
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
1
,
5
,
8
,
8
}}});
auto
p3
=
p
.
add_instruction
(
fred_op
{},
a3
);
std
::
size_t
axis
=
1
;
auto
a4
=
p
.
add_instruction
(
allocate
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
1
,
10
,
8
,
8
}}});
p
.
add_instruction
(
concat
(
axis
),
p1
,
p2
,
p3
,
a4
);
return
p
;
};
auto
create_control_program
=
[]()
{
migraph
::
program
p
;
auto
a1
=
p
.
add_instruction
(
allocate
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
1
,
10
,
8
,
8
}}});
auto
l1
=
p
.
add_instruction
(
migraph
::
op
::
load
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
1
,
2
,
8
,
8
}},
0
},
{
a1
});
auto
p1
=
p
.
add_instruction
(
fred_op
{},
l1
);
auto
l2
=
p
.
add_instruction
(
migraph
::
op
::
load
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
1
,
3
,
8
,
8
}},
512
},
{
a1
});
auto
p2
=
p
.
add_instruction
(
fred_op
{},
l2
);
auto
l3
=
p
.
add_instruction
(
migraph
::
op
::
load
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
1
,
5
,
8
,
8
}},
1280
},
{
a1
});
auto
p3
=
p
.
add_instruction
(
fred_op
{},
l3
);
p
.
add_instruction
(
migraph
::
op
::
identity
{},
{
a1
,
p1
,
p2
,
p3
});
return
p
;
};
auto
p1
=
create_test_program
();
auto
p2
=
create_control_program
();
p1
.
compile
(
eliminate_concat_target
{});
EXPECT
(
p1
==
p2
);
}
void
wont_work
()
{
auto
create_test_program
=
[]()
{
migraph
::
program
p
;
auto
a1
=
p
.
add_instruction
(
allocate
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
2
,
2
,
8
,
8
}}});
auto
p1
=
p
.
add_instruction
(
fred_op
{},
a1
);
auto
a2
=
p
.
add_instruction
(
allocate
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
2
,
3
,
8
,
8
}}});
auto
p2
=
p
.
add_instruction
(
fred_op
{},
a2
);
auto
a3
=
p
.
add_instruction
(
allocate
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
2
,
5
,
8
,
8
}}});
auto
p3
=
p
.
add_instruction
(
fred_op
{},
a3
);
std
::
size_t
axis
=
1
;
auto
a4
=
p
.
add_instruction
(
allocate
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
2
,
10
,
8
,
8
}}});
p
.
add_instruction
(
concat
(
axis
),
p1
,
p2
,
p3
,
a4
);
return
p
;
};
auto
create_control_program
=
[]()
{
migraph
::
program
p
;
auto
a1
=
p
.
add_instruction
(
allocate
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
2
,
2
,
8
,
8
}}});
auto
p1
=
p
.
add_instruction
(
fred_op
{},
a1
);
auto
a2
=
p
.
add_instruction
(
allocate
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
2
,
3
,
8
,
8
}}});
auto
p2
=
p
.
add_instruction
(
fred_op
{},
a2
);
auto
a3
=
p
.
add_instruction
(
allocate
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
2
,
5
,
8
,
8
}}});
auto
p3
=
p
.
add_instruction
(
fred_op
{},
a3
);
std
::
size_t
axis
=
1
;
auto
a4
=
p
.
add_instruction
(
allocate
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
2
,
10
,
8
,
8
}}});
p
.
add_instruction
(
concat
(
axis
),
p1
,
p2
,
p3
,
a4
);
return
p
;
};
auto
p1
=
create_test_program
();
auto
p2
=
create_control_program
();
p1
.
compile
(
eliminate_concat_target
{});
EXPECT
(
p1
==
p2
);
}
int
main
()
{
basic
();
wont_work
();
}
test/eval_test.cpp
View file @
ee80cee9
...
...
@@ -104,6 +104,21 @@ void param_test()
EXPECT
(
result
!=
migraph
::
literal
{
4
});
}
void
param_error_test
()
{
migraph
::
program
p
;
auto
x
=
p
.
add_parameter
(
"x"
,
{
migraph
::
shape
::
int64_type
});
auto
y
=
p
.
add_parameter
(
"y"
,
{
migraph
::
shape
::
int64_type
});
p
.
add_instruction
(
sum_op
{},
x
,
y
);
EXPECT
(
test
::
throws
<
migraph
::
exception
>
(
[
&
]
{
p
.
eval
({{
"x"
,
migraph
::
literal
{
1
}.
get_argument
()}});
},
"Parameter not found: y"
));
}
void
replace_test
()
{
migraph
::
program
p
;
...
...
@@ -215,6 +230,7 @@ int main()
literal_test2
();
print_test
();
param_test
();
param_error_test
();
replace_test
();
replace_ins_test
();
replace_ins_test2
();
...
...
test/fwd_conv_batchnorm_rewrite_test.cpp
View file @
ee80cee9
#include <migraph/fwd_conv_batchnorm_rewrite.hpp>
#include <migraph/program.hpp>
#include <migraph/cpu/
cpu_
target.hpp>
#include <migraph/cpu/target.hpp>
#include <migraph/operators.hpp>
#include <migraph/instruction.hpp>
#include <test.hpp>
...
...
@@ -51,8 +51,8 @@ void fwd_conv_batchnorm_rewrite_test()
migraph
::
program
p2
=
create_program
();
migraph
::
fwd_conv_batchnorm_rewrite
opt
;
opt
.
apply
(
p2
);
p1
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p2
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p1
.
compile
(
migraph
::
cpu
::
target
{});
p2
.
compile
(
migraph
::
cpu
::
target
{});
auto
result1
=
p1
.
eval
({});
auto
result2
=
p2
.
eval
({});
...
...
test/gpu/miopen.cpp
View file @
ee80cee9
...
...
@@ -2,7 +2,7 @@
#include <migraph/program.hpp>
#include <migraph/operators.hpp>
#include <migraph/generate.hpp>
#include <migraph/cpu/
cpu_
target.hpp>
#include <migraph/cpu/target.hpp>
#include <migraph/gpu/target.hpp>
#include <migraph/gpu/miopen.hpp>
#include <migraph/gpu/hip.hpp>
...
...
@@ -100,7 +100,7 @@ migraph::argument run_cpu(migraph::program& p)
V
v
;
p
=
v
.
create_program
();
auto_print
pp
{
p
,
0
};
compile_check
(
p
,
migraph
::
cpu
::
cpu_
target
{});
compile_check
(
p
,
migraph
::
cpu
::
target
{});
migraph
::
program
::
parameter_map
m
;
for
(
auto
&&
x
:
p
.
get_parameter_shapes
())
{
...
...
@@ -158,7 +158,7 @@ struct test_literals
auto
weights
=
p
.
add_literal
(
generate_literal
(
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}}));
auto
conv
=
p
.
add_instruction
(
migraph
::
op
::
convolution
{},
input
,
weights
);
p
.
add_instruction
(
migraph
::
op
::
activation
{
"
relu
"
},
conv
);
p
.
add_instruction
(
migraph
::
op
::
relu
{
},
conv
);
return
p
;
}
};
...
...
@@ -407,7 +407,7 @@ struct test_conv_relu
auto
weights
=
p
.
add_parameter
(
"w"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}});
auto
conv
=
p
.
add_instruction
(
migraph
::
op
::
convolution
{},
input
,
weights
);
p
.
add_instruction
(
migraph
::
op
::
activation
{
"
relu
"
},
conv
);
p
.
add_instruction
(
migraph
::
op
::
relu
{
},
conv
);
return
p
;
}
};
...
...
@@ -421,7 +421,7 @@ struct test_conv_relu_half
auto
weights
=
p
.
add_parameter
(
"w"
,
migraph
::
shape
{
migraph
::
shape
::
half_type
,
{
4
,
3
,
3
,
3
}});
auto
conv
=
p
.
add_instruction
(
migraph
::
op
::
convolution
{},
input
,
weights
);
p
.
add_instruction
(
migraph
::
op
::
activation
{
"
relu
"
},
conv
);
p
.
add_instruction
(
migraph
::
op
::
relu
{
},
conv
);
return
p
;
}
};
...
...
@@ -434,7 +434,7 @@ struct test_add_relu
auto
x
=
p
.
add_parameter
(
"x"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}});
auto
y
=
p
.
add_parameter
(
"y"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}});
auto
add
=
p
.
add_instruction
(
migraph
::
op
::
add
{},
x
,
y
);
p
.
add_instruction
(
migraph
::
op
::
activation
{
"
relu
"
},
add
);
p
.
add_instruction
(
migraph
::
op
::
relu
{
},
add
);
return
p
;
}
};
...
...
@@ -461,7 +461,37 @@ struct test_conv_pooling
p
.
add_parameter
(
"w"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}});
auto
conv
=
p
.
add_instruction
(
migraph
::
op
::
convolution
{},
input
,
weights
);
auto
pooling
=
p
.
add_instruction
(
migraph
::
op
::
pooling
{
"max"
},
conv
);
p
.
add_instruction
(
migraph
::
op
::
activation
{
"relu"
},
pooling
);
p
.
add_instruction
(
migraph
::
op
::
relu
{},
pooling
);
return
p
;
}
};
struct
test_global_avg_pooling
{
migraph
::
program
create_program
()
const
{
migraph
::
program
p
;
auto
input
=
p
.
add_parameter
(
"x"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
1
,
3
,
16
,
16
}});
auto
op
=
migraph
::
op
::
pooling
{
"average"
};
auto
lens
=
input
->
get_shape
().
lens
();
op
.
lengths
=
{
lens
[
2
],
lens
[
3
]};
p
.
add_instruction
(
op
,
input
);
return
p
;
}
};
struct
test_global_max_pooling
{
migraph
::
program
create_program
()
const
{
migraph
::
program
p
;
auto
input
=
p
.
add_parameter
(
"x"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
1
,
3
,
16
,
16
}});
auto
op
=
migraph
::
op
::
pooling
{
"max"
};
auto
lens
=
input
->
get_shape
().
lens
();
op
.
lengths
=
{
lens
[
2
],
lens
[
3
]};
p
.
add_instruction
(
op
,
input
);
return
p
;
}
};
...
...
@@ -642,7 +672,7 @@ struct test_conv_bn_relu_pooling
auto
variance
=
p
.
add_literal
(
migraph
::
abs
(
migraph
::
generate_literal
(
vars
,
4
)));
auto
bn
=
p
.
add_instruction
(
migraph
::
op
::
batch_norm_inference
{},
conv
,
scale
,
bias
,
mean
,
variance
);
auto
relu
=
p
.
add_instruction
(
migraph
::
op
::
activation
{
"
relu
"
},
bn
);
auto
relu
=
p
.
add_instruction
(
migraph
::
op
::
relu
{
},
bn
);
p
.
add_instruction
(
migraph
::
op
::
pooling
{
"average"
,
{
1
,
1
},
{
2
,
2
},
{
3
,
3
}},
relu
);
return
p
;
}
...
...
@@ -682,6 +712,73 @@ struct test_concat2
}
};
struct
test_concat_relu
{
migraph
::
program
create_program
()
const
{
migraph
::
program
p
;
std
::
size_t
axis
=
0
;
migraph
::
shape
s0
{
migraph
::
shape
::
float_type
,
{
2
,
2
}};
migraph
::
shape
s1
{
migraph
::
shape
::
float_type
,
{
3
,
2
}};
migraph
::
shape
s2
{
migraph
::
shape
::
float_type
,
{
1
,
2
}};
auto
l0
=
p
.
add_parameter
(
"x"
,
s0
);
auto
l1
=
p
.
add_parameter
(
"y"
,
s1
);
auto
l2
=
p
.
add_parameter
(
"z"
,
s2
);
auto
r0
=
p
.
add_instruction
(
migraph
::
op
::
relu
{},
l0
);
auto
r1
=
p
.
add_instruction
(
migraph
::
op
::
relu
{},
l1
);
auto
r2
=
p
.
add_instruction
(
migraph
::
op
::
relu
{},
l2
);
auto
c0
=
p
.
add_instruction
(
migraph
::
op
::
concat
{
axis
},
r0
,
r1
,
r2
);
p
.
add_instruction
(
migraph
::
op
::
relu
{},
c0
);
return
p
;
}
};
void
manual_identity
()
{
migraph
::
program
p
;
std
::
vector
<
float
>
data0
=
{
0
,
1
,
2
,
3
};
migraph
::
shape
s0
{
migraph
::
shape
::
float_type
,
{
2
,
2
}};
auto
l0
=
p
.
add_literal
(
migraph
::
literal
{
s0
,
data0
});
p
.
add_instruction
(
migraph
::
op
::
identity
{},
l0
);
p
.
compile
(
migraph
::
gpu
::
target
{});
migraph
::
program
::
parameter_map
m
;
for
(
auto
&&
x
:
p
.
get_parameter_shapes
())
{
m
[
x
.
first
]
=
migraph
::
gpu
::
to_gpu
(
migraph
::
generate_argument
(
x
.
second
));
}
auto
result
=
migraph
::
gpu
::
from_gpu
(
p
.
eval
(
m
));
std
::
cout
<<
result
<<
std
::
endl
;
}
void
manual_test_concat_relu
()
{
migraph
::
program
p
;
std
::
size_t
axis
=
0
;
std
::
vector
<
float
>
data0
=
{
0
,
1
,
2
,
3
};
std
::
vector
<
float
>
data1
=
{
4
,
5
,
6
,
7
,
8
,
9
};
std
::
vector
<
float
>
data2
=
{
10
,
11
};
migraph
::
shape
s0
{
migraph
::
shape
::
float_type
,
{
2
,
2
}};
migraph
::
shape
s1
{
migraph
::
shape
::
float_type
,
{
3
,
2
}};
migraph
::
shape
s2
{
migraph
::
shape
::
float_type
,
{
1
,
2
}};
auto
l0
=
p
.
add_literal
(
migraph
::
literal
{
s0
,
data0
});
auto
l1
=
p
.
add_literal
(
migraph
::
literal
{
s1
,
data1
});
auto
l2
=
p
.
add_literal
(
migraph
::
literal
{
s2
,
data2
});
auto
r0
=
p
.
add_instruction
(
migraph
::
op
::
relu
{},
l0
);
auto
r1
=
p
.
add_instruction
(
migraph
::
op
::
relu
{},
l1
);
auto
r2
=
p
.
add_instruction
(
migraph
::
op
::
relu
{},
l2
);
auto
c0
=
p
.
add_instruction
(
migraph
::
op
::
concat
{
axis
},
r0
,
r1
,
r2
);
p
.
add_instruction
(
migraph
::
op
::
relu
{},
c0
);
p
.
compile
(
migraph
::
gpu
::
target
{});
migraph
::
program
::
parameter_map
m
;
for
(
auto
&&
x
:
p
.
get_parameter_shapes
())
{
m
[
x
.
first
]
=
migraph
::
gpu
::
to_gpu
(
migraph
::
generate_argument
(
x
.
second
));
}
auto
result
=
migraph
::
gpu
::
from_gpu
(
p
.
eval
(
m
));
std
::
cout
<<
result
<<
std
::
endl
;
}
struct
test_conv_bn_relu_pooling2
{
static
migraph
::
instruction_ref
...
...
@@ -712,7 +809,7 @@ struct test_conv_bn_relu_pooling2
auto
conv2
=
p
.
add_instruction
(
migraph
::
op
::
convolution
{{
0
,
0
},
{
2
,
2
},
{
1
,
1
}},
x2
,
w2
);
auto
bn2
=
add_bn
(
p
,
conv2
,
2048
);
auto
add
=
p
.
add_instruction
(
migraph
::
op
::
add
{},
bn1
,
bn2
);
auto
relu
=
p
.
add_instruction
(
migraph
::
op
::
activation
{
"
relu
"
},
add
);
auto
relu
=
p
.
add_instruction
(
migraph
::
op
::
relu
{
},
add
);
p
.
add_instruction
(
migraph
::
op
::
pooling
{
"average"
,
{
1
,
1
},
{
2
,
2
},
{
3
,
3
}},
relu
);
return
p
;
}
...
...
@@ -722,6 +819,7 @@ int main()
{
verify_program
<
test_concat
>
();
verify_program
<
test_concat2
>
();
verify_program
<
test_concat_relu
>
();
verify_program
<
test_add
>
();
verify_program
<
test_add_half
>
();
verify_program
<
test_mul
>
();
...
...
@@ -743,6 +841,8 @@ int main()
verify_program
<
test_add_relu
>
();
verify_program
<
test_leaky_relu
>
();
verify_program
<
test_conv_pooling
>
();
verify_program
<
test_global_avg_pooling
>
();
verify_program
<
test_global_max_pooling
>
();
verify_program
<
test_gemm
>
();
// verify_program<test_gemm_ld>();
verify_program
<
test_gemm_transposeb
>
();
...
...
test/include/basic_ops.hpp
View file @
ee80cee9
...
...
@@ -79,6 +79,7 @@ struct pass_op
return
{};
return
inputs
.
front
();
}
int
output_alias
(
const
std
::
vector
<
migraph
::
shape
>&
)
const
{
return
0
;
}
};
struct
pass_standard_op
...
...
@@ -103,6 +104,7 @@ struct pass_standard_op
return
{};
return
inputs
.
front
();
}
int
output_alias
(
const
std
::
vector
<
migraph
::
shape
>&
)
const
{
return
0
;
}
};
struct
nop
...
...
test/include/test.hpp
View file @
ee80cee9
...
...
@@ -140,7 +140,7 @@ bool throws(F f)
}
}
template
<
class
F
,
class
Exception
>
template
<
class
Exception
,
class
F
>
bool
throws
(
F
f
,
const
std
::
string
&
msg
=
""
)
{
try
...
...
test/memory_coloring_test.cpp
View file @
ee80cee9
#include <migraph/memory_coloring.hpp>
#include <migraph/operators.hpp>
#include <migraph/generate.hpp>
#include <migraph/instruction.hpp>
#include <basic_ops.hpp>
#include <test.hpp>
...
...
@@ -9,7 +10,7 @@ struct memory_coloring_target
std
::
string
name
()
const
{
return
"memory_coloring"
;
}
std
::
vector
<
migraph
::
pass
>
get_passes
(
migraph
::
context
&
)
const
{
return
{
migraph
::
memory_coloring
{
"allocate"
}};
return
{
migraph
::
memory_coloring
{
"allocate"
,
true
}};
}
migraph
::
context
get_context
()
const
{
return
{};
}
};
...
...
@@ -31,86 +32,570 @@ struct allocate
}
};
// A custom test operator that takes a single argument and an allocation
// This operator's output is an operand alias of argument 1
struct
pass_memory
migraph
::
instruction_ref
add_alloc
(
migraph
::
program
&
p
,
const
migraph
::
shape
&
s
)
{
std
::
string
name
()
const
{
return
"memory_coloring::pass_memory"
;
}
migraph
::
shape
compute_shape
(
const
std
::
vector
<
migraph
::
shape
>&
inputs
)
const
{
migraph
::
check_shapes
{
inputs
,
*
this
}.
has
(
2
);
return
inputs
.
at
(
1
);
}
migraph
::
argument
compute
(
migraph
::
context
&
,
const
migraph
::
shape
&
,
const
std
::
vector
<
migraph
::
argument
>&
args
)
const
{
return
args
[
1
];
}
};
auto
a0
=
p
.
add_outline
(
s
);
return
p
.
add_instruction
(
allocate
{},
a0
);
}
bool
no_allocate
(
const
migraph
::
program
&
p
)
{
return
std
::
none_of
(
p
.
begin
(),
p
.
end
(),
[](
auto
&&
ins
)
{
return
ins
.
name
()
==
"allocate"
;
});
}
// The previous existing test
void
test1
()
{
migraph
::
program
p
;
auto
a0
=
p
.
add_outline
(
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
a1
=
p
.
add_instruction
(
allocate
{},
a0
);
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a1
);
auto
a2
=
p
.
add_outline
(
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
40
}});
auto
p2
=
p
.
add_instruction
(
allocate
{},
a2
);
p
.
add_instruction
(
pass_op
{},
p2
,
p1
);
auto
a2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
p
.
add_instruction
(
pass_op
{},
a2
,
p1
);
p
.
compile
(
memory_coloring_target
{});
EXPECT
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
192
);
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
192
);
CHECK
(
no_allocate
(
p
));
}
// This test uses the pass_memory operator
void
test2
()
{
migraph
::
program
p
;
auto
input
=
p
.
add_parameter
(
"input"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
16
}});
auto
a0
=
p
.
add_outline
(
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
128
}});
auto
a1
=
p
.
add_instruction
(
allocate
{},
a0
);
auto
p1
=
p
.
add_instruction
(
pass_memory
{},
input
,
a1
);
auto
a2
=
p
.
add_outline
(
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
40
}});
auto
p2
=
p
.
add_instruction
(
allocate
{},
a2
);
p
.
add_instruction
(
pass_memory
{},
p1
,
p2
);
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
128
}});
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a1
,
input
);
auto
p2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
p
.
add_instruction
(
pass_op
{},
p2
,
p1
);
p
.
compile
(
memory_coloring_target
{});
EXPECT
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
672
);
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
672
);
CHECK
(
no_allocate
(
p
));
}
// This test uses the pass_memory operator with two memory allocation passed together.
// This is similar to allocations done for workspaces, that is one allocation is aliased and the
// other is just used
void
test3
()
{
migraph
::
program
p
;
auto
a0
=
p
.
add_outline
(
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
a1
=
p
.
add_instruction
(
allocate
{},
a0
);
auto
a2
=
p
.
add_outline
(
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
128
}});
auto
p2
=
p
.
add_instruction
(
allocate
{},
a2
);
auto
p1
=
p
.
add_instruction
(
pass_memory
{},
a1
,
p2
);
auto
a3
=
p
.
add_outline
(
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
40
}});
auto
p3
=
p
.
add_instruction
(
allocate
{},
a3
);
p
.
add_instruction
(
pass_memory
{},
p1
,
p3
);
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
p2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
128
}});
auto
p1
=
p
.
add_instruction
(
pass_op
{},
p2
,
a1
);
auto
p3
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
p
.
add_instruction
(
pass_op
{},
p3
,
p1
);
p
.
compile
(
memory_coloring_target
{});
EXPECT
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
704
);
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
704
);
// The optimal solution is actually 672
CHECK
(
no_allocate
(
p
));
}
// Like the previous test, but this tests a zero workspace memory allocation
void
test4
()
{
migraph
::
program
p
;
auto
a0
=
p
.
add_outline
(
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
0
}});
auto
a1
=
p
.
add_instruction
(
allocate
{},
a0
);
auto
a2
=
p
.
add_outline
(
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
128
}});
auto
p2
=
p
.
add_instruction
(
allocate
{},
a2
);
auto
p1
=
p
.
add_instruction
(
pass_memory
{},
a1
,
p2
);
auto
a3
=
p
.
add_outline
(
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
40
}});
auto
p3
=
p
.
add_instruction
(
allocate
{},
a3
);
p
.
add_instruction
(
pass_memory
{},
p1
,
p3
);
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
0
}});
auto
p2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
128
}});
auto
p1
=
p
.
add_instruction
(
pass_op
{},
p2
,
a1
);
auto
p3
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
p
.
add_instruction
(
pass_op
{},
p3
,
p1
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
672
);
CHECK
(
no_allocate
(
p
));
}
void
test5
()
{
migraph
::
program
p
;
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a1
);
auto
p2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
p
.
add_instruction
(
pass_op
{},
p2
,
p1
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
192
);
CHECK
(
no_allocate
(
p
));
}
void
test6
()
{
migraph
::
program
p
;
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a1
);
auto
p2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
auto
p3
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
p
.
add_instruction
(
pass_op
{},
p3
,
p2
,
p1
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
352
);
CHECK
(
no_allocate
(
p
));
}
void
test7
()
{
migraph
::
program
p
;
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a1
);
auto
p2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
auto
p3
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
p
.
add_instruction
(
pass_op
{},
p3
,
p2
,
p1
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
224
);
CHECK
(
no_allocate
(
p
));
}
void
test8
()
{
migraph
::
program
p
;
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a1
);
auto
p2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
auto
p3
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
192
}});
p
.
add_instruction
(
pass_op
{},
p3
,
p2
,
p1
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
960
);
CHECK
(
no_allocate
(
p
));
}
void
test9
()
{
migraph
::
program
p
;
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a1
);
auto
p2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
p3
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
p
.
add_instruction
(
pass_op
{},
p3
,
p2
,
p1
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
96
);
CHECK
(
no_allocate
(
p
));
}
void
test10
()
{
migraph
::
program
p
;
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
p
.
add_instruction
(
pass_op
{},
a1
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
32
);
CHECK
(
no_allocate
(
p
));
}
void
test11
()
{
migraph
::
program
p
;
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a1
);
auto
a2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
auto
a3
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
p2
=
p
.
add_instruction
(
pass_op
{},
a2
,
p1
);
p
.
add_instruction
(
pass_op
{},
a3
,
p2
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
224
);
CHECK
(
no_allocate
(
p
));
}
void
test12
()
{
migraph
::
program
p
;
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a1
);
auto
a2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
a3
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
auto
p2
=
p
.
add_instruction
(
pass_op
{},
a2
,
p1
);
p
.
add_instruction
(
pass_op
{},
a3
,
p2
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
352
);
CHECK
(
no_allocate
(
p
));
}
void
test13
()
{
migraph
::
program
p
;
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
a3
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a1
);
auto
a2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
auto
p2
=
p
.
add_instruction
(
pass_op
{},
a2
,
p1
);
p
.
add_instruction
(
pass_op
{},
a3
,
p2
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
224
);
CHECK
(
no_allocate
(
p
));
}
void
test14
()
{
migraph
::
program
p
;
auto
a3
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
a2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a1
);
auto
p2
=
p
.
add_instruction
(
pass_op
{},
a2
,
p1
);
p
.
add_instruction
(
pass_op
{},
a3
,
p2
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
224
);
CHECK
(
no_allocate
(
p
));
}
void
test15
()
{
migraph
::
program
p
;
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a1
);
auto
a2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
auto
p2
=
p
.
add_instruction
(
pass_op
{},
a2
);
auto
a3
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
p
.
add_instruction
(
pass_op
{},
a3
,
p1
,
p2
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
352
);
CHECK
(
no_allocate
(
p
));
}
void
test16
()
{
migraph
::
program
p
;
auto
a1
=
p
.
add_literal
(
migraph
::
generate_literal
({
migraph
::
shape
::
float_type
,
{
8
}}));
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a1
);
auto
a2
=
p
.
add_literal
(
migraph
::
generate_literal
({
migraph
::
shape
::
float_type
,
{
40
}}));
auto
p2
=
p
.
add_instruction
(
pass_op
{},
a2
);
auto
a3
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
p
.
add_instruction
(
pass_op
{},
a3
,
p1
,
p2
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
160
);
CHECK
(
no_allocate
(
p
));
}
void
test17
()
{
migraph
::
program
p
;
auto
a3
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
auto
a1
=
p
.
add_literal
(
migraph
::
generate_literal
({
migraph
::
shape
::
float_type
,
{
8
}}));
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a1
);
auto
a2
=
p
.
add_literal
(
migraph
::
generate_literal
({
migraph
::
shape
::
float_type
,
{
40
}}));
auto
p2
=
p
.
add_instruction
(
pass_op
{},
a2
);
p
.
add_instruction
(
pass_op
{},
a3
,
p1
,
p2
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
160
);
CHECK
(
no_allocate
(
p
));
}
void
test18
()
{
migraph
::
program
p
;
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a1
);
auto
p2
=
p
.
add_instruction
(
pass_op
{},
a1
,
p1
);
auto
p3
=
p
.
add_instruction
(
pass_op
{},
p2
,
p1
);
auto
a2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
p
.
add_instruction
(
pass_op
{},
a2
,
p1
,
p2
,
p3
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
192
);
CHECK
(
no_allocate
(
p
));
}
void
test19
()
{
migraph
::
program
p
;
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a1
);
auto
a2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
auto
p2
=
p
.
add_instruction
(
pass_op
{},
a2
,
p1
);
auto
a3
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
p
.
add_instruction
(
pass_op
{},
a3
,
p2
,
p1
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
352
);
CHECK
(
no_allocate
(
p
));
}
void
test20
()
{
migraph
::
program
p
;
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
32
}});
auto
a2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
32
}});
auto
a3
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
32
}});
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a1
,
a2
,
a3
);
auto
a4
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
32
}});
p
.
add_instruction
(
pass_op
{},
a4
,
p1
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
384
);
CHECK
(
no_allocate
(
p
));
}
void
test21
()
{
migraph
::
program
p
;
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
32
}});
auto
a2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
a3
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
32
}});
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a1
,
a2
,
a3
);
auto
a4
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
p
.
add_instruction
(
pass_op
{},
a4
,
p1
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
288
);
CHECK
(
no_allocate
(
p
));
}
void
test22
()
{
migraph
::
program
p
;
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
32
}});
auto
a2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
32
}});
auto
a3
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a1
,
a2
,
a3
);
auto
a4
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
p
.
add_instruction
(
pass_op
{},
a4
,
p1
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
288
);
CHECK
(
no_allocate
(
p
));
}
void
test23
()
{
migraph
::
program
p
;
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
a2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
32
}});
auto
a3
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
32
}});
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a1
,
a2
,
a3
);
auto
a4
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
p
.
add_instruction
(
pass_op
{},
a4
,
p1
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
288
);
CHECK
(
no_allocate
(
p
));
}
void
test24
()
{
migraph
::
program
p
;
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
32
}});
auto
a2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
32
}});
auto
a3
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
32
}});
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a1
,
a2
,
a3
);
auto
a4
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
p
.
add_instruction
(
pass_op
{},
a4
,
p1
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
384
);
CHECK
(
no_allocate
(
p
));
}
void
test25
()
{
migraph
::
program
p
;
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
p
.
add_instruction
(
nop
{});
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a1
);
p
.
add_instruction
(
nop
{});
auto
a2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
p
.
add_instruction
(
pass_op
{},
a2
,
p1
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
192
);
CHECK
(
no_allocate
(
p
));
}
void
test26
()
{
migraph
::
program
p
;
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
p
.
add_instruction
(
nop
{},
a1
);
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a1
);
p
.
add_instruction
(
nop
{},
a1
,
p1
);
auto
a2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
p
.
add_instruction
(
pass_op
{},
a2
,
p1
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
192
);
CHECK
(
no_allocate
(
p
));
}
void
test27
()
{
migraph
::
program
p
;
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a1
);
auto
a2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
p
.
add_instruction
(
nop
{},
a2
,
p1
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
192
);
CHECK
(
no_allocate
(
p
));
}
void
test28
()
{
migraph
::
program
p
;
auto
output
=
p
.
add_parameter
(
"output"
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a1
);
auto
a2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
auto
p2
=
p
.
add_instruction
(
pass_op
{},
a2
,
p1
);
p
.
add_instruction
(
pass_op
{},
p2
,
output
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
192
);
CHECK
(
no_allocate
(
p
));
}
void
test29
()
{
migraph
::
program
p
;
auto
output
=
p
.
add_parameter
(
"output"
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a1
);
auto
a2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
auto
p2
=
p
.
add_instruction
(
pass_op
{},
a2
,
p1
);
p
.
move_instruction
(
output
,
p2
);
p
.
add_instruction
(
pass_op
{},
p2
,
output
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
192
);
CHECK
(
no_allocate
(
p
));
}
void
test30
()
{
migraph
::
program
p
;
auto
output
=
p
.
add_parameter
(
"x"
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a1
);
auto
a2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
auto
p2
=
p
.
add_instruction
(
pass_op
{},
a2
,
p1
);
p
.
move_instruction
(
output
,
p2
);
p
.
add_instruction
(
pass_op
{},
p2
,
output
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
192
);
CHECK
(
no_allocate
(
p
));
}
void
test31
()
{
migraph
::
program
p
;
auto
output
=
p
.
add_parameter
(
"output"
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a1
);
auto
a2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
p
.
move_instruction
(
output
,
a2
);
p
.
add_instruction
(
pass_op
{},
a2
,
p1
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
192
);
CHECK
(
no_allocate
(
p
));
}
void
test32
()
{
migraph
::
program
p
;
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
a2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
auto
a3
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a2
,
a1
,
a3
);
auto
a5
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
p
.
add_instruction
(
pass_op
{},
a5
,
p1
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
352
);
CHECK
(
no_allocate
(
p
));
}
void
test33
()
{
migraph
::
program
p
;
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
a2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
a3
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a2
,
a1
,
a3
);
auto
a5
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
p
.
add_instruction
(
pass_op
{},
a5
,
p1
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
224
);
CHECK
(
no_allocate
(
p
));
}
void
test34
()
{
migraph
::
program
p
;
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
auto
a2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
auto
a3
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a2
,
a1
,
a3
);
auto
a5
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
p
.
add_instruction
(
pass_op
{},
a5
,
p1
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
480
);
CHECK
(
no_allocate
(
p
));
}
void
test35
()
{
migraph
::
program
p
;
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
auto
a2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
a3
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a2
,
a1
,
a3
);
auto
a5
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
8
}});
p
.
add_instruction
(
pass_op
{},
a5
,
p1
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
224
);
CHECK
(
no_allocate
(
p
));
}
void
test36
()
{
migraph
::
program
p
;
auto
output
=
p
.
add_parameter
(
"output"
,
{
migraph
::
shape
::
float_type
,
{
20
}});
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
0
}});
auto
a2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a2
,
a1
);
auto
a3
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
auto
p2
=
p
.
add_instruction
(
pass_op
{},
a3
,
p1
);
auto
a4
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
auto
p3
=
p
.
add_instruction
(
pass_op
{},
a4
,
p2
);
p
.
add_instruction
(
pass_op
{},
output
,
p3
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
320
);
CHECK
(
no_allocate
(
p
));
}
void
test37
()
{
migraph
::
program
p
;
auto
output
=
p
.
add_parameter
(
"output"
,
{
migraph
::
shape
::
float_type
,
{
20
}});
auto
a1
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
4
}});
auto
a2
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
auto
p1
=
p
.
add_instruction
(
pass_op
{},
a2
,
a1
);
auto
a3
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
auto
p2
=
p
.
add_instruction
(
pass_op
{},
a3
,
p1
);
auto
a4
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
40
}});
auto
p3
=
p
.
add_instruction
(
pass_op
{},
a4
,
p2
);
p
.
add_instruction
(
pass_op
{},
output
,
p3
);
p
.
compile
(
memory_coloring_target
{});
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
320
);
CHECK
(
no_allocate
(
p
));
}
void
test38
()
{
migraph
::
program
p
;
auto
output
=
p
.
add_parameter
(
"output"
,
{
migraph
::
shape
::
float_type
,
{
1
,
64
,
56
,
56
}});
auto
p29
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
0
}});
auto
p30
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
1
,
64
,
112
,
112
}});
auto
p31
=
p
.
add_instruction
(
pass_op
{},
p30
,
p29
);
auto
p32
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
1
,
64
,
112
,
112
}});
auto
p37
=
p
.
add_instruction
(
pass_op
{},
p32
,
p31
);
auto
p38
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
1
,
64
,
112
,
112
}});
auto
p39
=
p
.
add_instruction
(
pass_op
{},
p38
,
p37
);
auto
p40
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
1
,
64
,
56
,
56
}});
auto
p41
=
p
.
add_instruction
(
pass_op
{},
p40
,
p39
);
auto
p42
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
0
}});
auto
p43
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
1
,
64
,
56
,
56
}});
auto
p44
=
p
.
add_instruction
(
pass_op
{},
p43
,
p41
,
p42
);
auto
p45
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
1
,
64
,
56
,
56
}});
auto
p50
=
p
.
add_instruction
(
pass_op
{},
p45
,
p44
);
auto
p51
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
1
,
64
,
56
,
56
}});
auto
p52
=
p
.
add_instruction
(
pass_op
{},
p51
,
p50
);
auto
p53
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
0
}});
auto
p54
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
1
,
64
,
56
,
56
}});
auto
p55
=
p
.
add_instruction
(
pass_op
{},
p54
,
p52
,
p53
);
auto
p56
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
1
,
64
,
56
,
56
}});
auto
p61
=
p
.
add_instruction
(
pass_op
{},
p56
,
p55
);
auto
p62
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
1
,
64
,
56
,
56
}});
auto
p63
=
p
.
add_instruction
(
pass_op
{},
p62
,
p61
,
p41
);
auto
p64
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
0
}});
auto
p65
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
1
,
64
,
56
,
56
}});
auto
p66
=
p
.
add_instruction
(
pass_op
{},
p65
,
p63
,
p64
);
auto
p67
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
1
,
64
,
56
,
56
}});
auto
p72
=
p
.
add_instruction
(
pass_op
{},
p67
,
p66
);
auto
p73
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
1
,
64
,
56
,
56
}});
auto
p74
=
p
.
add_instruction
(
pass_op
{},
p73
,
p72
);
auto
p75
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
0
}});
auto
p76
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
1
,
64
,
56
,
56
}});
auto
p77
=
p
.
add_instruction
(
pass_op
{},
p76
,
p74
,
p75
);
auto
p78
=
add_alloc
(
p
,
{
migraph
::
shape
::
float_type
,
{
1
,
64
,
56
,
56
}});
auto
p83
=
p
.
add_instruction
(
pass_op
{},
p78
,
p77
);
p
.
add_instruction
(
pass_op
{},
output
,
p83
,
p63
);
p
.
compile
(
memory_coloring_target
{});
EXPECT
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
672
);
CHECK
(
p
.
get_parameter_shape
(
"scratch"
).
bytes
()
==
6422528
);
CHECK
(
no_allocate
(
p
));
}
void
literal_test
()
...
...
@@ -120,7 +605,7 @@ void literal_test()
p
.
add_literal
(
lit
);
p
.
compile
(
memory_coloring_target
{});
auto
result
=
p
.
eval
({});
EXP
EC
T
(
lit
==
result
);
CH
EC
K
(
lit
==
result
);
}
int
main
()
...
...
@@ -129,6 +614,40 @@ int main()
test2
();
test3
();
test4
();
test5
();
test6
();
test7
();
test8
();
test9
();
test10
();
test11
();
test12
();
test13
();
test14
();
test15
();
test16
();
test17
();
test18
();
test19
();
test20
();
test21
();
test22
();
test23
();
test24
();
test25
();
test26
();
test27
();
test28
();
test29
();
test30
();
test31
();
test32
();
test33
();
test34
();
test35
();
test36
();
test37
();
test38
();
literal_test
();
}
test/onnx/globalavgpool_test.onnx
0 → 100644
View file @
ee80cee9
globalavgpool-example:i
01"GlobalAveragePooltest-globalavgpoolZ
0
b
1
B
\ No newline at end of file
test/onnx/globalmaxpool_test.onnx
0 → 100644
View file @
ee80cee9
globalmaxpool-example:e
01" GlobalMaxPooltest-globalmaxpoolZ
0
b
1
B
\ No newline at end of file
test/onnx/onnx_test.cpp
View file @
ee80cee9
...
...
@@ -32,7 +32,7 @@ void pytorch_conv_relu_maxpool()
auto
l3
=
p
.
add_instruction
(
migraph
::
op
::
convolution
{},
l0
,
l1
);
auto
l4
=
p
.
add_instruction
(
migraph
::
op
::
broadcast
{
axis
,
l3
->
get_shape
()},
l2
);
auto
l5
=
p
.
add_instruction
(
migraph
::
op
::
add
{},
l3
,
l4
);
auto
l6
=
p
.
add_instruction
(
migraph
::
op
::
activation
{
"
relu
"
},
l5
);
auto
l6
=
p
.
add_instruction
(
migraph
::
op
::
relu
{
},
l5
);
p
.
add_instruction
(
migraph
::
op
::
pooling
{
"max"
,
{{
0
,
0
}},
{{
2
,
2
}},
{{
2
,
2
}}},
l6
);
auto
prog
=
migraph
::
parse_onnx
(
"conv_relu_maxpool.onnx"
);
...
...
@@ -55,7 +55,7 @@ void pytorch_conv_bn_relu_maxpool()
auto
l4
=
p
.
add_instruction
(
migraph
::
op
::
broadcast
{
axis
,
l3
->
get_shape
()},
l2
);
auto
l5
=
p
.
add_instruction
(
migraph
::
op
::
add
{},
l3
,
l4
);
auto
l6
=
p
.
add_instruction
(
migraph
::
op
::
batch_norm_inference
{
1.0e-5
f
},
l5
,
p3
,
p4
,
p5
,
p6
);
auto
l7
=
p
.
add_instruction
(
migraph
::
op
::
activation
{
"
relu
"
},
l6
);
auto
l7
=
p
.
add_instruction
(
migraph
::
op
::
relu
{
},
l6
);
p
.
add_instruction
(
migraph
::
op
::
pooling
{
"max"
,
{{
0
,
0
}},
{{
2
,
2
}},
{{
2
,
2
}}},
l7
);
auto
prog
=
migraph
::
parse_onnx
(
"conv_bn_relu_maxpool.onnx"
);
...
...
@@ -72,7 +72,7 @@ void pytorch_conv_relu_maxpool_x2()
auto
l3
=
p
.
add_instruction
(
migraph
::
op
::
convolution
{},
l0
,
l1
);
auto
l4
=
p
.
add_instruction
(
migraph
::
op
::
broadcast
{
axis
,
l3
->
get_shape
()},
l2
);
auto
l5
=
p
.
add_instruction
(
migraph
::
op
::
add
{},
l3
,
l4
);
auto
l6
=
p
.
add_instruction
(
migraph
::
op
::
activation
{
"
relu
"
},
l5
);
auto
l6
=
p
.
add_instruction
(
migraph
::
op
::
relu
{
},
l5
);
auto
l7
=
p
.
add_instruction
(
migraph
::
op
::
pooling
{
"max"
,
{{
0
,
0
}},
{{
2
,
2
}},
{{
2
,
2
}}},
l6
);
auto
l8
=
p
.
add_parameter
(
"3"
,
{
migraph
::
shape
::
float_type
,
{
1
,
5
,
5
,
5
}});
...
...
@@ -80,7 +80,7 @@ void pytorch_conv_relu_maxpool_x2()
auto
l10
=
p
.
add_instruction
(
migraph
::
op
::
convolution
{},
l7
,
l8
);
auto
l11
=
p
.
add_instruction
(
migraph
::
op
::
broadcast
{
axis
,
l10
->
get_shape
()},
l9
);
auto
l12
=
p
.
add_instruction
(
migraph
::
op
::
add
{},
l10
,
l11
);
auto
l13
=
p
.
add_instruction
(
migraph
::
op
::
activation
{
"
relu
"
},
l12
);
auto
l13
=
p
.
add_instruction
(
migraph
::
op
::
relu
{
},
l12
);
p
.
add_instruction
(
migraph
::
op
::
pooling
{
"max"
,
{{
0
,
0
}},
{{
2
,
2
}},
{{
2
,
2
}}},
l13
);
auto
prog
=
migraph
::
parse_onnx
(
"conv_relu_maxpoolX2.onnx"
);
...
...
@@ -118,6 +118,34 @@ void imagescaler_test()
EXPECT
(
p
==
prog
);
}
void
globalavgpool_test
()
{
migraph
::
program
p
;
auto
input
=
p
.
add_parameter
(
"0"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
1
,
3
,
16
,
16
}});
auto
op
=
migraph
::
op
::
pooling
{
"average"
};
auto
lens
=
input
->
get_shape
().
lens
();
op
.
lengths
=
{
lens
[
2
],
lens
[
3
]};
p
.
add_instruction
(
op
,
input
);
auto
prog
=
migraph
::
parse_onnx
(
"globalavgpool_test.onnx"
);
EXPECT
(
p
==
prog
);
}
void
globalmaxpool_test
()
{
migraph
::
program
p
;
auto
input
=
p
.
add_parameter
(
"0"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
1
,
3
,
16
,
16
}});
auto
op
=
migraph
::
op
::
pooling
{
"max"
};
auto
lens
=
input
->
get_shape
().
lens
();
op
.
lengths
=
{
lens
[
2
],
lens
[
3
]};
p
.
add_instruction
(
op
,
input
);
auto
prog
=
migraph
::
parse_onnx
(
"globalmaxpool_test.onnx"
);
EXPECT
(
p
==
prog
);
}
int
main
()
{
pytorch_conv_bias_test
();
...
...
@@ -126,4 +154,6 @@ int main()
pytorch_conv_relu_maxpool_x2
();
leaky_relu_test
();
imagescaler_test
();
globalavgpool_test
();
globalmaxpool_test
();
}
test/output_alias.cpp
0 → 100644
View file @
ee80cee9
#include <migraph/program.hpp>
#include <migraph/instruction.hpp>
#include <test.hpp>
#include <basic_ops.hpp>
void
simple_alias
()
{
migraph
::
program
p
;
auto
l
=
p
.
add_literal
(
1
);
auto
p1
=
p
.
add_instruction
(
pass_op
{},
l
);
EXPECT
(
bool
{
migraph
::
instruction
::
get_output_alias
(
l
)
==
l
});
EXPECT
(
bool
{
migraph
::
instruction
::
get_output_alias
(
p1
)
==
l
});
}
void
cascade_alias
()
{
migraph
::
program
p
;
auto
l
=
p
.
add_literal
(
1
);
auto
p1
=
p
.
add_instruction
(
pass_op
{},
l
);
auto
p2
=
p
.
add_instruction
(
pass_op
{},
p1
);
auto
p3
=
p
.
add_instruction
(
pass_op
{},
p2
);
EXPECT
(
bool
{
migraph
::
instruction
::
get_output_alias
(
l
)
==
l
});
EXPECT
(
bool
{
migraph
::
instruction
::
get_output_alias
(
p1
)
==
l
});
EXPECT
(
bool
{
migraph
::
instruction
::
get_output_alias
(
p2
)
==
l
});
EXPECT
(
bool
{
migraph
::
instruction
::
get_output_alias
(
p3
)
==
l
});
}
void
no_alias
()
{
migraph
::
program
p
;
auto
x
=
p
.
add_literal
(
1
);
auto
y
=
p
.
add_literal
(
2
);
auto
sum
=
p
.
add_instruction
(
sum_op
{},
x
,
y
);
EXPECT
(
bool
{
migraph
::
instruction
::
get_output_alias
(
sum
)
==
sum
});
}
int
main
()
{
simple_alias
();
cascade_alias
();
no_alias
();
}
tools/include/concat_opt.hpp
0 → 100644
View file @
ee80cee9
#ifndef MIGRAPH_GUARD_CONCAT_OPT_HPP
#define MIGRAPH_GUARD_CONCAT_OPT_HPP
#include <cassert>
#include <string>
#include <functional>
#include <memory>
#include <type_traits>
#include <utility>
#include <migraph/operation.hpp>
#include <migraph/operators.hpp>
namespace
migraph
{
struct
program
;
#ifdef DOXYGEN
/// An interface for target-dependent optimization for the concat instruction
struct
concat_optimization
{
/// The name of the target-dependent concat operator
std
::
string
name
()
const
;
/// A name of the target-dependent allocate operator
std
::
string
allocate
()
const
;
/// Return the target-independent concat operator
op
::
concat
get_concat
(
const
operation
&
op
)
const
;
};
#else
<%
interface
(
'
concat_optimization
'
,
virtual
(
'
name
'
,
returns
=
'
std
::
string
'
,
const
=
True
),
virtual
(
'
allocate
'
,
returns
=
'
std
::
string
'
,
const
=
True
),
virtual
(
'
get_concat
'
,
returns
=
'
op
::
concat
'
,
op
=
'
const
operation
&
'
,
const
=
True
)
)
%>
#endif
}
// namespace migraph
#endif
tools/include/operation.hpp
View file @
ee80cee9
...
...
@@ -43,6 +43,9 @@ struct operation
* the same the `output` shape.
*/
argument
compute
(
context
&
ctx
,
const
shape
&
output
,
const
std
::
vector
<
argument
>&
input
)
const
;
/// An optional method to return which argument the output will alias. If
/// there is no aliased output then -1 can be returned.
int
output_alias
(
const
std
::
vector
<
shape
>&
input
)
const
;
/// An optional stream operator to print the operation. When this is not
/// implemented, it will just print the operation's name.
friend
std
::
ostream
&
operator
<<
(
std
::
ostream
&
os
,
const
operation
&
op
);
...
...
@@ -108,10 +111,34 @@ compute_op(const T& x, context& ctx, const shape& output_shape, const std::vecto
return
compute_op
(
rank
<
1
>
{},
x
,
ctx
,
output_shape
,
input
);
}
template
<
class
T
>
int
output_alias_op
(
rank
<
0
>
,
const
T
&
,
const
std
::
vector
<
shape
>&
)
{
return
-
1
;
}
template
<
class
T
>
auto
output_alias_op
(
rank
<
1
>
,
const
T
&
x
,
const
std
::
vector
<
shape
>&
shapes
)
->
decltype
(
x
.
output_alias
(
shapes
))
{
return
x
.
output_alias
(
shapes
);
}
template
<
class
T
>
int
output_alias_op
(
const
T
&
x
,
const
std
::
vector
<
shape
>&
shapes
)
{
return
output_alias_op
(
rank
<
1
>
{},
x
,
shapes
);
}
<%
interface
(
'
operation
'
,
virtual
(
'
name
'
,
returns
=
'
std
::
string
'
,
const
=
True
),
virtual
(
'
output_alias
'
,
returns
=
'
int
'
,
input
=
'
const
std
::
vector
<
shape
>&
'
,
const
=
True
,
default
=
'
output_alias_op
'
),
virtual
(
'
compute_shape
'
,
returns
=
'
shape
'
,
input
=
'
const
std
::
vector
<
shape
>&
'
,
const
=
True
),
virtual
(
'
compute
'
,
returns
=
'
argument
'
,
...
...
Prev
1
…
4
5
6
7
8
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment