Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
d71254c5
Commit
d71254c5
authored
Nov 06, 2018
by
Paul
Browse files
Merge branch 'master' into mem-color-tests
parents
0cbb0368
7d972d2b
Changes
47
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
435 additions
and
66 deletions
+435
-66
src/targets/gpu/include/migraph/gpu/batchnorm.hpp
src/targets/gpu/include/migraph/gpu/batchnorm.hpp
+1
-0
src/targets/gpu/include/migraph/gpu/concat.hpp
src/targets/gpu/include/migraph/gpu/concat.hpp
+1
-0
src/targets/gpu/include/migraph/gpu/concat_gpu_opt.hpp
src/targets/gpu/include/migraph/gpu/concat_gpu_opt.hpp
+23
-0
src/targets/gpu/include/migraph/gpu/contiguous.hpp
src/targets/gpu/include/migraph/gpu/contiguous.hpp
+1
-0
src/targets/gpu/include/migraph/gpu/convolution.hpp
src/targets/gpu/include/migraph/gpu/convolution.hpp
+1
-0
src/targets/gpu/include/migraph/gpu/gemm.hpp
src/targets/gpu/include/migraph/gpu/gemm.hpp
+1
-0
src/targets/gpu/include/migraph/gpu/hip.hpp
src/targets/gpu/include/migraph/gpu/hip.hpp
+2
-0
src/targets/gpu/include/migraph/gpu/leaky_relu.hpp
src/targets/gpu/include/migraph/gpu/leaky_relu.hpp
+1
-0
src/targets/gpu/include/migraph/gpu/mul.hpp
src/targets/gpu/include/migraph/gpu/mul.hpp
+1
-0
src/targets/gpu/include/migraph/gpu/pooling.hpp
src/targets/gpu/include/migraph/gpu/pooling.hpp
+1
-0
src/targets/gpu/include/migraph/gpu/relu.hpp
src/targets/gpu/include/migraph/gpu/relu.hpp
+1
-0
src/targets/gpu/include/migraph/gpu/softmax.hpp
src/targets/gpu/include/migraph/gpu/softmax.hpp
+1
-0
src/targets/gpu/lowering.cpp
src/targets/gpu/lowering.cpp
+8
-12
src/targets/gpu/target.cpp
src/targets/gpu/target.cpp
+4
-0
test/cpu_ops_test.cpp
test/cpu_ops_test.cpp
+84
-42
test/eliminate_concat_test.cpp
test/eliminate_concat_test.cpp
+174
-0
test/eval_test.cpp
test/eval_test.cpp
+16
-0
test/fwd_conv_batchnorm_rewrite_test.cpp
test/fwd_conv_batchnorm_rewrite_test.cpp
+3
-3
test/gpu/miopen.cpp
test/gpu/miopen.cpp
+109
-9
test/include/basic_ops.hpp
test/include/basic_ops.hpp
+2
-0
No files found.
src/targets/gpu/include/migraph/gpu/batchnorm.hpp
View file @
d71254c5
...
...
@@ -27,6 +27,7 @@ struct miopen_batch_norm_inference
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
int
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
};
}
// namespace gpu
...
...
src/targets/gpu/include/migraph/gpu/concat.hpp
View file @
d71254c5
...
...
@@ -28,6 +28,7 @@ struct hip_concat
shape
compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
;
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
int
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
};
}
// namespace gpu
...
...
src/targets/gpu/include/migraph/gpu/concat_gpu_opt.hpp
0 → 100644
View file @
d71254c5
#ifndef MIGRAPH_GUARD_RTGLIB_CONCAT_GPU_OPT_HPP
#define MIGRAPH_GUARD_RTGLIB_CONCAT_GPU_OPT_HPP
#include <migraph/gpu/concat.hpp>
namespace
migraph
{
namespace
gpu
{
struct
concat_gpu_optimization
{
std
::
string
name
()
const
{
return
"gpu::concat"
;
}
std
::
string
allocate
()
const
{
return
"hip::allocate"
;
}
migraph
::
op
::
concat
get_concat
(
const
migraph
::
operation
&
op
)
const
{
return
migraph
::
any_cast
<
migraph
::
gpu
::
hip_concat
>
(
op
).
op
;
}
};
}
// namespace gpu
}
// namespace migraph
#endif
src/targets/gpu/include/migraph/gpu/contiguous.hpp
View file @
d71254c5
...
...
@@ -26,6 +26,7 @@ struct miopen_contiguous
std
::
string
name
()
const
{
return
"gpu::contiguous"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
compute
(
context
&
,
shape
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
int
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
};
}
// namespace gpu
...
...
src/targets/gpu/include/migraph/gpu/convolution.hpp
View file @
d71254c5
...
...
@@ -38,6 +38,7 @@ struct miopen_convolution
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
shape
compile
(
context
&
ctx
,
const
shape
&
output_shape
,
std
::
vector
<
instruction_ref
>
inputs
);
int
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
};
}
// namespace gpu
...
...
src/targets/gpu/include/migraph/gpu/gemm.hpp
View file @
d71254c5
...
...
@@ -27,6 +27,7 @@ struct miopen_gemm
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
int
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
};
}
// namespace gpu
...
...
src/targets/gpu/include/migraph/gpu/hip.hpp
View file @
d71254c5
...
...
@@ -67,6 +67,7 @@ struct hip_write
{
return
to_gpu
(
args
.
front
());
}
int
output_alias
(
const
std
::
vector
<
shape
>&
)
const
{
return
0
;
}
};
struct
hip_copy
...
...
@@ -82,6 +83,7 @@ struct hip_copy
copy_to_gpu
(
args
[
0
],
args
[
1
]);
return
args
[
1
];
}
int
output_alias
(
const
std
::
vector
<
shape
>&
)
const
{
return
1
;
}
};
}
// namespace gpu
}
// namespace migraph
...
...
src/targets/gpu/include/migraph/gpu/leaky_relu.hpp
View file @
d71254c5
...
...
@@ -27,6 +27,7 @@ struct miopen_leaky_relu
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
int
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
};
}
// namespace gpu
...
...
src/targets/gpu/include/migraph/gpu/mul.hpp
View file @
d71254c5
...
...
@@ -25,6 +25,7 @@ struct hip_mul
std
::
string
name
()
const
{
return
"gpu::mul"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
compute
(
context
&
,
const
shape
&
,
const
std
::
vector
<
argument
>&
args
)
const
;
int
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
};
}
// namespace gpu
...
...
src/targets/gpu/include/migraph/gpu/pooling.hpp
View file @
d71254c5
...
...
@@ -29,6 +29,7 @@ struct miopen_pooling
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
int
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
};
}
// namespace gpu
...
...
src/targets/gpu/include/migraph/gpu/relu.hpp
View file @
d71254c5
...
...
@@ -27,6 +27,7 @@ struct miopen_relu
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
int
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
};
}
// namespace gpu
...
...
src/targets/gpu/include/migraph/gpu/softmax.hpp
View file @
d71254c5
...
...
@@ -27,6 +27,7 @@ struct miopen_softmax
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
int
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
};
}
// namespace gpu
...
...
src/targets/gpu/lowering.cpp
View file @
d71254c5
...
...
@@ -50,9 +50,9 @@ struct miopen_apply
{
check_shape
(
s
,
apply_convolution
(
it
));
}
else
if
(
it
->
name
()
==
"
activation
"
)
else
if
(
it
->
name
()
==
"
relu
"
)
{
check_shape
(
s
,
apply_
activation
(
it
));
check_shape
(
s
,
apply_
relu
(
it
));
}
else
if
(
it
->
name
()
==
"leaky_relu"
)
{
...
...
@@ -131,17 +131,13 @@ struct miopen_apply
ins
,
miopen_pooling
{
op
,
std
::
move
(
pd
)},
ins
->
inputs
().
at
(
0
),
output
);
}
instruction_ref
apply_
activation
(
instruction_ref
ins
)
instruction_ref
apply_
relu
(
instruction_ref
ins
)
{
auto
&&
op
=
any_cast
<
op
::
activation
>
(
ins
->
get_operator
());
auto
ad
=
make_relu
();
if
(
op
.
mode
==
"relu"
)
{
auto
output
=
insert_allocation
(
ins
,
ins
->
get_shape
());
return
prog
->
replace_instruction
(
ins
,
miopen_relu
{
std
::
move
(
ad
)},
ins
->
inputs
().
at
(
0
),
output
);
}
return
ins
;
auto
ad
=
make_relu
();
auto
output
=
insert_allocation
(
ins
,
ins
->
get_shape
());
return
prog
->
replace_instruction
(
ins
,
miopen_relu
{
std
::
move
(
ad
)},
ins
->
inputs
().
at
(
0
),
output
);
}
instruction_ref
apply_leaky_relu
(
instruction_ref
ins
)
...
...
src/targets/gpu/target.cpp
View file @
d71254c5
...
...
@@ -15,6 +15,8 @@
#include <migraph/eliminate_contiguous.hpp>
#include <migraph/common_subexpression_elimination.hpp>
#include <migraph/fwd_conv_batchnorm_rewrite.hpp>
#include <migraph/eliminate_concat.hpp>
#include <migraph/gpu/concat_gpu_opt.hpp>
namespace
migraph
{
namespace
gpu
{
...
...
@@ -38,6 +40,8 @@ std::vector<pass> target::get_passes(migraph::context& gctx) const
simplify_reshapes
{},
dead_code_elimination
{},
lowering
{
ctx
},
eliminate_concat
{
concat_gpu_optimization
{}},
dead_code_elimination
{},
eliminate_contiguous
{},
dead_code_elimination
{},
fuse_ops
{
&
ctx
},
...
...
test/cpu_ops_test.cpp
View file @
d71254c5
...
...
@@ -3,7 +3,7 @@
#include <migraph/literal.hpp>
#include <migraph/operators.hpp>
#include <migraph/instruction.hpp>
#include <migraph/cpu/
cpu_
target.hpp>
#include <migraph/cpu/target.hpp>
#include <migraph/verify.hpp>
#include "test.hpp"
...
...
@@ -18,7 +18,7 @@ void slice_test()
p
.
add_instruction
(
migraph
::
op
::
slice
{{
2
},
{
1
},
{
3
}},
l0
);
migraph
::
shape
s2
{
migraph
::
shape
::
int32_type
,
{
2
,
2
,
2
},
{
6
,
3
,
1
}};
EXPECT
(
p
.
get_shape
()
==
s2
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
migraph
::
shape
sresult
{
migraph
::
shape
::
int32_type
,
{
2
,
2
,
2
},
{
4
,
2
,
1
}};
auto
result
=
p
.
eval
({});
std
::
vector
<
int
>
gold
=
{
1
,
2
,
4
,
5
,
7
,
8
,
10
,
11
};
...
...
@@ -36,7 +36,7 @@ void slice_test()
p
.
add_instruction
(
migraph
::
op
::
slice
{{
0
,
1
,
2
},
{
0
,
0
,
0
},
{
2
,
2
,
2
}},
l0
);
migraph
::
shape
s2
{
migraph
::
shape
::
int32_type
,
{
2
,
2
,
2
},
{
6
,
3
,
1
}};
EXPECT
(
p
.
get_shape
()
==
s2
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
migraph
::
shape
sresult
{
migraph
::
shape
::
int32_type
,
{
2
,
2
,
2
},
{
4
,
2
,
1
}};
auto
result
=
p
.
eval
({});
std
::
vector
<
int
>
gold
=
{
0
,
1
,
3
,
4
,
6
,
7
,
9
,
10
};
...
...
@@ -62,7 +62,7 @@ void concat_test()
auto
l1
=
p
.
add_literal
(
migraph
::
literal
{
s1
,
data1
});
auto
l2
=
p
.
add_literal
(
migraph
::
literal
{
s2
,
data2
});
p
.
add_instruction
(
migraph
::
op
::
concat
{
axis
},
l0
,
l1
,
l2
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
int
>
gold
=
{
0
,
1
,
2
,
3
,
4
,
10
,
5
,
6
,
7
,
8
,
9
,
20
};
std
::
vector
<
int
>
results_vector
(
2
*
6
);
...
...
@@ -85,7 +85,7 @@ void concat_test()
auto
l1
=
p
.
add_literal
(
migraph
::
literal
{
s1
,
data1
});
auto
l2
=
p
.
add_literal
(
migraph
::
literal
{
s2
,
data2
});
p
.
add_instruction
(
migraph
::
op
::
concat
{
axis
},
l0
,
l1
,
l2
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
int
>
gold
=
{
0
,
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
10
,
11
};
std
::
vector
<
int
>
results_vector
(
6
*
2
);
...
...
@@ -106,7 +106,7 @@ void squeeze_test()
migraph
::
shape
s2
{
migraph
::
shape
::
float_type
,
{
4
,
3
,
1
,
3
}};
auto
l0
=
p
.
add_literal
(
migraph
::
literal
{
s1
,
data
});
p
.
add_instruction
(
migraph
::
op
::
squeeze
{{
1
}},
l0
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
EXPECT
(
result
.
get_shape
()
==
s2
);
}
...
...
@@ -117,7 +117,7 @@ void squeeze_test()
migraph
::
shape
s2
{
migraph
::
shape
::
float_type
,
{
4
,
1
,
3
,
3
}};
auto
l0
=
p
.
add_literal
(
migraph
::
literal
{
s1
,
data
});
p
.
add_instruction
(
migraph
::
op
::
squeeze
{{
3
}},
l0
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
EXPECT
(
result
.
get_shape
()
==
s2
);
}
...
...
@@ -128,7 +128,7 @@ void squeeze_test()
migraph
::
shape
s2
{
migraph
::
shape
::
float_type
,
{
4
,
3
,
3
}};
auto
l0
=
p
.
add_literal
(
migraph
::
literal
{
s1
,
data
});
p
.
add_instruction
(
migraph
::
op
::
squeeze
{},
l0
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
EXPECT
(
result
.
get_shape
()
==
s2
);
}
...
...
@@ -143,7 +143,7 @@ void unsqueeze_test()
migraph
::
shape
s2
{
migraph
::
shape
::
float_type
,
{
4
,
1
,
3
,
3
}};
auto
l0
=
p
.
add_literal
(
migraph
::
literal
{
s1
,
data
});
p
.
add_instruction
(
migraph
::
op
::
unsqueeze
{{
1
}},
l0
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
EXPECT
(
result
.
get_shape
()
==
s2
);
}
...
...
@@ -154,12 +154,52 @@ void unsqueeze_test()
migraph
::
shape
s2
{
migraph
::
shape
::
float_type
,
{
4
,
3
,
1
,
3
}};
auto
l0
=
p
.
add_literal
(
migraph
::
literal
{
s1
,
data
});
p
.
add_instruction
(
migraph
::
op
::
unsqueeze
{{
2
}},
l0
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
EXPECT
(
result
.
get_shape
()
==
s2
);
}
}
void
globalavgpool_test
()
{
migraph
::
program
p
;
auto
s
=
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
1
,
3
,
2
,
2
}};
auto
op
=
migraph
::
op
::
pooling
{
"average"
};
auto
lens
=
s
.
lens
();
op
.
lengths
=
{
lens
[
2
],
lens
[
3
]};
std
::
vector
<
float
>
data
{
0.3
,
0.2
,
0.4
,
0.1
,
0.8
,
0.5
,
0.9
,
0.1
,
0.1
,
0.7
,
0.1
,
0.6
};
auto
l0
=
p
.
add_literal
(
migraph
::
literal
{
s
,
data
});
p
.
add_instruction
(
op
,
l0
);
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
{
0.25
,
0.575
,
0.375
};
EXPECT
(
migraph
::
verify_range
(
results_vector
,
gold
));
}
void
globalmaxpool_test
()
{
migraph
::
program
p
;
auto
s
=
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
1
,
3
,
2
,
2
}};
auto
op
=
migraph
::
op
::
pooling
{
"max"
};
auto
lens
=
s
.
lens
();
op
.
lengths
=
{
lens
[
2
],
lens
[
3
]};
std
::
vector
<
float
>
data
{
0.3
,
0.2
,
0.4
,
0.1
,
0.8
,
0.5
,
0.9
,
0.1
,
0.1
,
0.7
,
0.1
,
0.6
};
auto
l0
=
p
.
add_literal
(
migraph
::
literal
{
s
,
data
});
p
.
add_instruction
(
op
,
l0
);
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
{
0.4
,
0.9
,
0.7
};
EXPECT
(
migraph
::
verify_range
(
results_vector
,
gold
));
}
void
im2col_3x3_no_pad_identity_test
()
{
std
::
size_t
f
[
2
]
=
{
3
,
3
};
...
...
@@ -179,7 +219,7 @@ void im2col_3x3_no_pad_identity_test()
auto
l_image
=
p
.
add_literal
(
migraph
::
literal
{
s_image
,
input
});
auto
l_weights
=
p
.
add_literal
(
migraph
::
literal
{
s_weights
,
weights
});
p
.
add_instruction
(
migraph
::
op
::
im2col
{
padding
,
stride
,
dilation
},
l_image
,
l_weights
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
size_t
col_height
=
(
size
[
0
]
-
f
[
0
]
+
2
*
padding
[
0
])
/
stride
[
0
]
+
1
;
...
...
@@ -208,7 +248,7 @@ void im2col_3x3_no_pad_test()
auto
l_image
=
p
.
add_literal
(
migraph
::
literal
{
s_image
,
input
});
auto
l_weights
=
p
.
add_literal
(
migraph
::
literal
{
s_weights
,
weights
});
p
.
add_instruction
(
migraph
::
op
::
im2col
{
padding
,
stride
,
dilation
},
l_image
,
l_weights
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
int
>
correct
=
{
0
,
1
,
2
,
4
,
5
,
6
,
8
,
9
,
10
,
1
,
2
,
3
,
5
,
6
,
7
,
9
,
10
,
11
,
...
...
@@ -240,7 +280,7 @@ void im2col_3x3_stride_2_no_pad_test()
auto
l_image
=
p
.
add_literal
(
migraph
::
literal
{
s_image
,
input
});
auto
l_weights
=
p
.
add_literal
(
migraph
::
literal
{
s_weights
,
weights
});
p
.
add_instruction
(
migraph
::
op
::
im2col
{
padding
,
stride
,
dilation
},
l_image
,
l_weights
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
int
>
correct
=
{
0
,
1
,
2
,
6
,
7
,
8
,
12
,
13
,
14
,
2
,
3
,
4
,
...
...
@@ -273,7 +313,7 @@ void im2col_3x3_with_padding_test()
auto
l_image
=
p
.
add_literal
(
migraph
::
literal
{
s_image
,
input
});
auto
l_weights
=
p
.
add_literal
(
migraph
::
literal
{
s_weights
,
weights
});
p
.
add_instruction
(
migraph
::
op
::
im2col
{
padding
,
stride
,
dilation
},
l_image
,
l_weights
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
int
>
correct
=
{
0
,
0
,
0
,
0
,
0
,
1
,
0
,
2
,
3
,
0
,
0
,
0
,
0
,
1
,
0
,
2
,
3
,
0
,
...
...
@@ -315,7 +355,7 @@ void batch_norm_inference_test()
auto
variance
=
p
.
add_literal
(
migraph
::
literal
{
vars
,
variance_data
});
p
.
add_instruction
(
migraph
::
op
::
batch_norm_inference
{},
x
,
scale
,
bias
,
mean
,
variance
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
result_vector
(
width
*
height
*
channels
*
batches
);
...
...
@@ -345,7 +385,7 @@ void im2col_3x3_with_channels_identity_test()
auto
l_image
=
p
.
add_literal
(
migraph
::
literal
{
s_image
,
input
});
auto
l_weights
=
p
.
add_literal
(
migraph
::
literal
{
s_weights
,
weights
});
p
.
add_instruction
(
migraph
::
op
::
im2col
{
padding
,
stride
,
dilation
},
l_image
,
l_weights
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
size_t
col_height
=
(
size
[
0
]
-
f
[
0
]
+
2
*
padding
[
0
])
/
stride
[
0
]
+
1
;
...
...
@@ -361,7 +401,7 @@ void exp_test()
migraph
::
shape
s
{
migraph
::
shape
::
float_type
,
{
3
}};
auto
l
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
-
1
,
0
,
1
}});
p
.
add_instruction
(
migraph
::
op
::
exp
{},
l
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -375,7 +415,7 @@ void sin_test()
migraph
::
shape
s
{
migraph
::
shape
::
float_type
,
{
3
}};
auto
l
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
-
1
,
0
,
1
}});
p
.
add_instruction
(
migraph
::
op
::
sin
{},
l
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -389,7 +429,7 @@ void cos_test()
migraph
::
shape
s
{
migraph
::
shape
::
float_type
,
{
3
}};
auto
l
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
-
1
,
0
,
1
}});
p
.
add_instruction
(
migraph
::
op
::
cos
{},
l
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -403,7 +443,7 @@ void tan_test()
migraph
::
shape
s
{
migraph
::
shape
::
float_type
,
{
3
}};
auto
l
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
-
1
,
0
,
1
}});
p
.
add_instruction
(
migraph
::
op
::
tan
{},
l
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -418,7 +458,7 @@ void add_test()
auto
l1
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
-
1
,
0
,
1
}});
auto
l2
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
1
,
2
,
3
}});
p
.
add_instruction
(
migraph
::
op
::
add
{},
l1
,
l2
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -437,7 +477,7 @@ void broadcast_test()
auto
l1
=
p
.
add_literal
(
migraph
::
literal
{
a_shape
,
a_data
});
auto
l2
=
p
.
add_literal
(
migraph
::
literal
{
b_shape
,
b_data
});
p
.
add_instruction
(
migraph
::
op
::
broadcast
{
axis
,
l1
->
get_shape
()},
l2
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
auto
output
=
result
.
get
<
int32_t
>
();
EXPECT
(
output
(
0
,
0
)
==
-
2
);
...
...
@@ -457,7 +497,7 @@ void add_broadcast_test()
auto
l2
=
p
.
add_literal
(
migraph
::
literal
{
b_shape
,
b_data
});
auto
l3
=
p
.
add_instruction
(
migraph
::
op
::
broadcast
{
axis
,
l1
->
get_shape
()},
l2
);
p
.
add_instruction
(
migraph
::
op
::
add
{},
l1
,
l3
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
EXPECT
(
result
.
get_shape
().
packed
());
std
::
vector
<
float
>
results_vector
(
12
);
...
...
@@ -473,7 +513,7 @@ void sub_test()
auto
l1
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
-
1
,
0
,
1
}});
auto
l2
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
1
,
2
,
3
}});
p
.
add_instruction
(
migraph
::
op
::
sub
{},
l1
,
l2
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -488,7 +528,7 @@ void mul_test()
auto
l1
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
-
1
,
0
,
1
}});
auto
l2
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
1
,
2
,
3
}});
p
.
add_instruction
(
migraph
::
op
::
mul
{},
l1
,
l2
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -503,7 +543,7 @@ void div_test()
auto
l1
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
-
1.0
f
,
0.5
f
,
1.0
f
}});
auto
l2
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
1.0
f
,
2.0
f
,
4.0
f
}});
p
.
add_instruction
(
migraph
::
op
::
div
{},
l1
,
l2
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -516,8 +556,8 @@ void relu_test()
migraph
::
program
p
;
migraph
::
shape
s
{
migraph
::
shape
::
float_type
,
{
3
}};
auto
l
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
-
1.
f
,
0.
f
,
1.
f
}});
p
.
add_instruction
(
migraph
::
op
::
activation
{
"
relu
"
},
l
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
add_instruction
(
migraph
::
op
::
relu
{
},
l
);
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -531,7 +571,7 @@ void leaky_relu_test()
migraph
::
shape
s
{
migraph
::
shape
::
float_type
,
{
3
}};
auto
l
=
p
.
add_literal
(
migraph
::
literal
{
s
,
{
-
1.
f
,
0.
f
,
1.
f
}});
p
.
add_instruction
(
migraph
::
op
::
leaky_relu
{
0.01
},
l
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -565,7 +605,7 @@ void imagescaler_test()
migraph
::
literal
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
3
}},
{
0.01
,
0.02
,
0.03
}});
auto
bias_bcast
=
p
.
add_instruction
(
migraph
::
op
::
broadcast
{
1
,
s
},
bias_vals
);
p
.
add_instruction
(
migraph
::
op
::
add
{},
img_scaled
,
bias_bcast
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
12
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -596,7 +636,7 @@ void reshape_test()
auto
l
=
p
.
add_literal
(
migraph
::
literal
{
a_shape
,
data
});
std
::
vector
<
int64_t
>
new_shape
=
{
8
,
3
,
1
,
1
};
p
.
add_instruction
(
migraph
::
op
::
reshape
{
new_shape
},
l
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -607,7 +647,7 @@ void reshape_test()
auto
l
=
p
.
add_literal
(
migraph
::
literal
{
a_shape
,
data
});
std
::
vector
<
int64_t
>
new_shape
=
{
1
,
3
,
4
,
2
};
p
.
add_instruction
(
migraph
::
op
::
reshape
{
new_shape
},
l
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -618,7 +658,7 @@ void reshape_test()
auto
l
=
p
.
add_literal
(
migraph
::
literal
{
a_shape
,
data
});
std
::
vector
<
int64_t
>
new_shape
=
{
1
,
3
,
4
,
2
};
p
.
add_instruction
(
migraph
::
op
::
reshape
{
new_shape
},
l
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -666,7 +706,7 @@ void gemm_test()
migraph
::
shape
b_shape
{
migraph
::
shape
::
get_type
<
T
>
{},
{
5
,
3
}};
auto
bl
=
p
.
add_literal
(
migraph
::
literal
{
b_shape
,
b
});
p
.
add_instruction
(
migraph
::
op
::
dot
{},
al
,
bl
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
T
>
results_vector
(
12
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -721,7 +761,7 @@ void maxpool_test()
migraph
::
shape
a_shape
{
migraph
::
shape
::
float_type
,
{
2
,
3
,
6
,
6
}};
auto
al
=
p
.
add_literal
(
migraph
::
literal
{
a_shape
,
a
});
p
.
add_instruction
(
migraph
::
op
::
pooling
{
"max"
,
{{
0
,
0
}},
{{
2
,
2
}},
{{
3
,
2
}}},
al
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
cout
<<
result
.
get_shape
()
<<
std
::
endl
;
std
::
vector
<
float
>
results_vector
(
36
);
...
...
@@ -786,7 +826,7 @@ void softmax_test()
migraph
::
shape
a_shape
{
migraph
::
shape
::
float_type
,
{
5
,
3
,
4
,
2
}};
auto
al
=
p
.
add_literal
(
migraph
::
literal
{
a_shape
,
a
});
p
.
add_instruction
(
migraph
::
op
::
softmax
{},
al
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
120
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
...
...
@@ -848,7 +888,7 @@ void conv2d_test()
auto
cl
=
p
.
add_literal
(
migraph
::
literal
{
c_shape
,
c
});
p
.
add_instruction
(
migraph
::
op
::
convolution
{},
al
,
cl
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
16
);
...
...
@@ -904,7 +944,7 @@ void conv2d_padding_test()
auto
cl
=
p
.
add_literal
(
migraph
::
literal
{
c_shape
,
c
});
p
.
add_instruction
(
migraph
::
op
::
convolution
{{{
1
,
1
}},
{{
1
,
1
}}},
al
,
cl
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
64
);
...
...
@@ -965,7 +1005,7 @@ void conv2d_padding_stride_test()
auto
cl
=
p
.
add_literal
(
migraph
::
literal
{
c_shape
,
c
});
p
.
add_instruction
(
migraph
::
op
::
convolution
{{{
1
,
1
}},
{{
2
,
2
}}},
al
,
cl
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
16
);
...
...
@@ -984,7 +1024,7 @@ void transpose_test()
auto
l
=
p
.
add_literal
(
migraph
::
literal
{
a_shape
,
data
});
std
::
vector
<
int64_t
>
perm
=
{
0
,
3
,
1
,
2
};
p
.
add_instruction
(
migraph
::
op
::
transpose
{
perm
},
l
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
result
.
visit
([
&
](
auto
output
)
{
...
...
@@ -998,7 +1038,7 @@ void transpose_test()
std
::
vector
<
int64_t
>
perm
=
{
0
,
3
,
1
,
2
};
auto
result
=
p
.
add_instruction
(
migraph
::
op
::
transpose
{
perm
},
l
);
p
.
add_instruction
(
migraph
::
op
::
contiguous
{},
result
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result2
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
12
);
...
...
@@ -1017,7 +1057,7 @@ void contiguous_test()
migraph
::
program
p
;
auto
l
=
p
.
add_literal
(
migraph
::
literal
{
a_shape
,
data
});
p
.
add_instruction
(
migraph
::
op
::
contiguous
{},
l
);
p
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p
.
compile
(
migraph
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
12
);
...
...
@@ -1058,6 +1098,8 @@ int main()
conv2d_padding_test
();
conv2d_padding_stride_test
();
batch_norm_inference_test
();
globalavgpool_test
();
globalmaxpool_test
();
im2col_3x3_no_pad_identity_test
();
im2col_3x3_no_pad_test
();
im2col_3x3_stride_2_no_pad_test
();
...
...
test/eliminate_concat_test.cpp
0 → 100644
View file @
d71254c5
#include <migraph/eliminate_concat.hpp>
#include <migraph/dead_code_elimination.hpp>
#include <migraph/operators.hpp>
#include <basic_ops.hpp>
#include <test.hpp>
struct
concat
{
concat
(
std
::
size_t
axis
)
{
op
.
axis
=
axis
;
}
migraph
::
op
::
concat
op
;
std
::
string
name
()
const
{
return
"eliminate_concat::concat"
;
}
migraph
::
shape
compute_shape
(
std
::
vector
<
migraph
::
shape
>
inputs
)
const
{
return
op
.
compute_shape
(
std
::
move
(
inputs
));
}
migraph
::
argument
compute
(
migraph
::
context
&
,
const
migraph
::
shape
&
output_shape
,
const
std
::
vector
<
migraph
::
argument
>&
)
const
{
return
{
output_shape
};
}
};
struct
concat_test_optimization
{
/// A unique name used to identify the concat optimization
std
::
string
name
()
const
{
return
"eliminate_concat::concat"
;
}
/// A unique name used to identify the allocate operator
std
::
string
allocate
()
const
{
return
"allocate"
;
}
/// Return the lowered concat operator
migraph
::
op
::
concat
get_concat
(
const
migraph
::
operation
&
op
)
const
{
return
migraph
::
any_cast
<
concat
>
(
op
).
op
;
}
};
struct
eliminate_concat_target
{
std
::
size_t
align
=
32
;
std
::
string
name
()
const
{
return
"eliminate_target"
;
}
std
::
vector
<
migraph
::
pass
>
get_passes
(
migraph
::
context
&
)
const
{
return
{
migraph
::
eliminate_concat
{
concat_test_optimization
{}},
migraph
::
dead_code_elimination
{}};
}
migraph
::
context
get_context
()
const
{
return
{};
}
};
struct
allocate
{
migraph
::
shape
s
{};
std
::
string
name
()
const
{
return
"allocate"
;
}
migraph
::
shape
compute_shape
(
const
std
::
vector
<
migraph
::
shape
>&
inputs
)
const
{
migraph
::
check_shapes
{
inputs
}.
has
(
0
);
return
s
;
}
migraph
::
argument
compute
(
migraph
::
context
&
,
const
migraph
::
shape
&
output_shape
,
const
std
::
vector
<
migraph
::
argument
>&
)
const
{
return
{
output_shape
};
}
};
struct
fred_op
{
std
::
string
name
()
const
{
return
"fred_op"
;
}
migraph
::
shape
compute_shape
(
const
std
::
vector
<
migraph
::
shape
>&
inputs
)
const
{
migraph
::
check_shapes
{
inputs
}.
has
(
1
);
return
inputs
.
at
(
0
);
}
migraph
::
argument
compute
(
migraph
::
context
&
,
const
migraph
::
shape
&
,
const
std
::
vector
<
migraph
::
argument
>&
args
)
const
{
return
args
.
at
(
0
);
}
};
void
basic
()
{
auto
create_test_program
=
[]()
{
migraph
::
program
p
;
auto
a1
=
p
.
add_instruction
(
allocate
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
1
,
2
,
8
,
8
}}});
auto
p1
=
p
.
add_instruction
(
fred_op
{},
a1
);
auto
a2
=
p
.
add_instruction
(
allocate
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
1
,
3
,
8
,
8
}}});
auto
p2
=
p
.
add_instruction
(
fred_op
{},
a2
);
auto
a3
=
p
.
add_instruction
(
allocate
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
1
,
5
,
8
,
8
}}});
auto
p3
=
p
.
add_instruction
(
fred_op
{},
a3
);
std
::
size_t
axis
=
1
;
auto
a4
=
p
.
add_instruction
(
allocate
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
1
,
10
,
8
,
8
}}});
p
.
add_instruction
(
concat
(
axis
),
p1
,
p2
,
p3
,
a4
);
return
p
;
};
auto
create_control_program
=
[]()
{
migraph
::
program
p
;
auto
a1
=
p
.
add_instruction
(
allocate
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
1
,
10
,
8
,
8
}}});
auto
l1
=
p
.
add_instruction
(
migraph
::
op
::
load
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
1
,
2
,
8
,
8
}},
0
},
{
a1
});
auto
p1
=
p
.
add_instruction
(
fred_op
{},
l1
);
auto
l2
=
p
.
add_instruction
(
migraph
::
op
::
load
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
1
,
3
,
8
,
8
}},
512
},
{
a1
});
auto
p2
=
p
.
add_instruction
(
fred_op
{},
l2
);
auto
l3
=
p
.
add_instruction
(
migraph
::
op
::
load
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
1
,
5
,
8
,
8
}},
1280
},
{
a1
});
auto
p3
=
p
.
add_instruction
(
fred_op
{},
l3
);
p
.
add_instruction
(
migraph
::
op
::
identity
{},
{
a1
,
p1
,
p2
,
p3
});
return
p
;
};
auto
p1
=
create_test_program
();
auto
p2
=
create_control_program
();
p1
.
compile
(
eliminate_concat_target
{});
EXPECT
(
p1
==
p2
);
}
void
wont_work
()
{
auto
create_test_program
=
[]()
{
migraph
::
program
p
;
auto
a1
=
p
.
add_instruction
(
allocate
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
2
,
2
,
8
,
8
}}});
auto
p1
=
p
.
add_instruction
(
fred_op
{},
a1
);
auto
a2
=
p
.
add_instruction
(
allocate
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
2
,
3
,
8
,
8
}}});
auto
p2
=
p
.
add_instruction
(
fred_op
{},
a2
);
auto
a3
=
p
.
add_instruction
(
allocate
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
2
,
5
,
8
,
8
}}});
auto
p3
=
p
.
add_instruction
(
fred_op
{},
a3
);
std
::
size_t
axis
=
1
;
auto
a4
=
p
.
add_instruction
(
allocate
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
2
,
10
,
8
,
8
}}});
p
.
add_instruction
(
concat
(
axis
),
p1
,
p2
,
p3
,
a4
);
return
p
;
};
auto
create_control_program
=
[]()
{
migraph
::
program
p
;
auto
a1
=
p
.
add_instruction
(
allocate
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
2
,
2
,
8
,
8
}}});
auto
p1
=
p
.
add_instruction
(
fred_op
{},
a1
);
auto
a2
=
p
.
add_instruction
(
allocate
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
2
,
3
,
8
,
8
}}});
auto
p2
=
p
.
add_instruction
(
fred_op
{},
a2
);
auto
a3
=
p
.
add_instruction
(
allocate
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
2
,
5
,
8
,
8
}}});
auto
p3
=
p
.
add_instruction
(
fred_op
{},
a3
);
std
::
size_t
axis
=
1
;
auto
a4
=
p
.
add_instruction
(
allocate
{
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
2
,
10
,
8
,
8
}}});
p
.
add_instruction
(
concat
(
axis
),
p1
,
p2
,
p3
,
a4
);
return
p
;
};
auto
p1
=
create_test_program
();
auto
p2
=
create_control_program
();
p1
.
compile
(
eliminate_concat_target
{});
EXPECT
(
p1
==
p2
);
}
int
main
()
{
basic
();
wont_work
();
}
test/eval_test.cpp
View file @
d71254c5
...
...
@@ -104,6 +104,21 @@ void param_test()
EXPECT
(
result
!=
migraph
::
literal
{
4
});
}
void
param_error_test
()
{
migraph
::
program
p
;
auto
x
=
p
.
add_parameter
(
"x"
,
{
migraph
::
shape
::
int64_type
});
auto
y
=
p
.
add_parameter
(
"y"
,
{
migraph
::
shape
::
int64_type
});
p
.
add_instruction
(
sum_op
{},
x
,
y
);
EXPECT
(
test
::
throws
<
migraph
::
exception
>
(
[
&
]
{
p
.
eval
({{
"x"
,
migraph
::
literal
{
1
}.
get_argument
()}});
},
"Parameter not found: y"
));
}
void
replace_test
()
{
migraph
::
program
p
;
...
...
@@ -215,6 +230,7 @@ int main()
literal_test2
();
print_test
();
param_test
();
param_error_test
();
replace_test
();
replace_ins_test
();
replace_ins_test2
();
...
...
test/fwd_conv_batchnorm_rewrite_test.cpp
View file @
d71254c5
#include <migraph/fwd_conv_batchnorm_rewrite.hpp>
#include <migraph/program.hpp>
#include <migraph/cpu/
cpu_
target.hpp>
#include <migraph/cpu/target.hpp>
#include <migraph/operators.hpp>
#include <migraph/instruction.hpp>
#include <test.hpp>
...
...
@@ -51,8 +51,8 @@ void fwd_conv_batchnorm_rewrite_test()
migraph
::
program
p2
=
create_program
();
migraph
::
fwd_conv_batchnorm_rewrite
opt
;
opt
.
apply
(
p2
);
p1
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p2
.
compile
(
migraph
::
cpu
::
cpu_
target
{});
p1
.
compile
(
migraph
::
cpu
::
target
{});
p2
.
compile
(
migraph
::
cpu
::
target
{});
auto
result1
=
p1
.
eval
({});
auto
result2
=
p2
.
eval
({});
...
...
test/gpu/miopen.cpp
View file @
d71254c5
...
...
@@ -2,7 +2,7 @@
#include <migraph/program.hpp>
#include <migraph/operators.hpp>
#include <migraph/generate.hpp>
#include <migraph/cpu/
cpu_
target.hpp>
#include <migraph/cpu/target.hpp>
#include <migraph/gpu/target.hpp>
#include <migraph/gpu/miopen.hpp>
#include <migraph/gpu/hip.hpp>
...
...
@@ -100,7 +100,7 @@ migraph::argument run_cpu(migraph::program& p)
V
v
;
p
=
v
.
create_program
();
auto_print
pp
{
p
,
0
};
compile_check
(
p
,
migraph
::
cpu
::
cpu_
target
{});
compile_check
(
p
,
migraph
::
cpu
::
target
{});
migraph
::
program
::
parameter_map
m
;
for
(
auto
&&
x
:
p
.
get_parameter_shapes
())
{
...
...
@@ -158,7 +158,7 @@ struct test_literals
auto
weights
=
p
.
add_literal
(
generate_literal
(
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}}));
auto
conv
=
p
.
add_instruction
(
migraph
::
op
::
convolution
{},
input
,
weights
);
p
.
add_instruction
(
migraph
::
op
::
activation
{
"
relu
"
},
conv
);
p
.
add_instruction
(
migraph
::
op
::
relu
{
},
conv
);
return
p
;
}
};
...
...
@@ -392,7 +392,7 @@ struct test_conv_relu
auto
weights
=
p
.
add_parameter
(
"w"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}});
auto
conv
=
p
.
add_instruction
(
migraph
::
op
::
convolution
{},
input
,
weights
);
p
.
add_instruction
(
migraph
::
op
::
activation
{
"
relu
"
},
conv
);
p
.
add_instruction
(
migraph
::
op
::
relu
{
},
conv
);
return
p
;
}
};
...
...
@@ -406,7 +406,7 @@ struct test_conv_relu_half
auto
weights
=
p
.
add_parameter
(
"w"
,
migraph
::
shape
{
migraph
::
shape
::
half_type
,
{
4
,
3
,
3
,
3
}});
auto
conv
=
p
.
add_instruction
(
migraph
::
op
::
convolution
{},
input
,
weights
);
p
.
add_instruction
(
migraph
::
op
::
activation
{
"
relu
"
},
conv
);
p
.
add_instruction
(
migraph
::
op
::
relu
{
},
conv
);
return
p
;
}
};
...
...
@@ -419,7 +419,7 @@ struct test_add_relu
auto
x
=
p
.
add_parameter
(
"x"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}});
auto
y
=
p
.
add_parameter
(
"y"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}});
auto
add
=
p
.
add_instruction
(
migraph
::
op
::
add
{},
x
,
y
);
p
.
add_instruction
(
migraph
::
op
::
activation
{
"
relu
"
},
add
);
p
.
add_instruction
(
migraph
::
op
::
relu
{
},
add
);
return
p
;
}
};
...
...
@@ -446,7 +446,37 @@ struct test_conv_pooling
p
.
add_parameter
(
"w"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}});
auto
conv
=
p
.
add_instruction
(
migraph
::
op
::
convolution
{},
input
,
weights
);
auto
pooling
=
p
.
add_instruction
(
migraph
::
op
::
pooling
{
"max"
},
conv
);
p
.
add_instruction
(
migraph
::
op
::
activation
{
"relu"
},
pooling
);
p
.
add_instruction
(
migraph
::
op
::
relu
{},
pooling
);
return
p
;
}
};
struct
test_global_avg_pooling
{
migraph
::
program
create_program
()
const
{
migraph
::
program
p
;
auto
input
=
p
.
add_parameter
(
"x"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
1
,
3
,
16
,
16
}});
auto
op
=
migraph
::
op
::
pooling
{
"average"
};
auto
lens
=
input
->
get_shape
().
lens
();
op
.
lengths
=
{
lens
[
2
],
lens
[
3
]};
p
.
add_instruction
(
op
,
input
);
return
p
;
}
};
struct
test_global_max_pooling
{
migraph
::
program
create_program
()
const
{
migraph
::
program
p
;
auto
input
=
p
.
add_parameter
(
"x"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
1
,
3
,
16
,
16
}});
auto
op
=
migraph
::
op
::
pooling
{
"max"
};
auto
lens
=
input
->
get_shape
().
lens
();
op
.
lengths
=
{
lens
[
2
],
lens
[
3
]};
p
.
add_instruction
(
op
,
input
);
return
p
;
}
};
...
...
@@ -627,7 +657,7 @@ struct test_conv_bn_relu_pooling
auto
variance
=
p
.
add_literal
(
migraph
::
abs
(
migraph
::
generate_literal
(
vars
,
4
)));
auto
bn
=
p
.
add_instruction
(
migraph
::
op
::
batch_norm_inference
{},
conv
,
scale
,
bias
,
mean
,
variance
);
auto
relu
=
p
.
add_instruction
(
migraph
::
op
::
activation
{
"
relu
"
},
bn
);
auto
relu
=
p
.
add_instruction
(
migraph
::
op
::
relu
{
},
bn
);
p
.
add_instruction
(
migraph
::
op
::
pooling
{
"average"
,
{
1
,
1
},
{
2
,
2
},
{
3
,
3
}},
relu
);
return
p
;
}
...
...
@@ -667,6 +697,73 @@ struct test_concat2
}
};
struct
test_concat_relu
{
migraph
::
program
create_program
()
const
{
migraph
::
program
p
;
std
::
size_t
axis
=
0
;
migraph
::
shape
s0
{
migraph
::
shape
::
float_type
,
{
2
,
2
}};
migraph
::
shape
s1
{
migraph
::
shape
::
float_type
,
{
3
,
2
}};
migraph
::
shape
s2
{
migraph
::
shape
::
float_type
,
{
1
,
2
}};
auto
l0
=
p
.
add_parameter
(
"x"
,
s0
);
auto
l1
=
p
.
add_parameter
(
"y"
,
s1
);
auto
l2
=
p
.
add_parameter
(
"z"
,
s2
);
auto
r0
=
p
.
add_instruction
(
migraph
::
op
::
relu
{},
l0
);
auto
r1
=
p
.
add_instruction
(
migraph
::
op
::
relu
{},
l1
);
auto
r2
=
p
.
add_instruction
(
migraph
::
op
::
relu
{},
l2
);
auto
c0
=
p
.
add_instruction
(
migraph
::
op
::
concat
{
axis
},
r0
,
r1
,
r2
);
p
.
add_instruction
(
migraph
::
op
::
relu
{},
c0
);
return
p
;
}
};
void
manual_identity
()
{
migraph
::
program
p
;
std
::
vector
<
float
>
data0
=
{
0
,
1
,
2
,
3
};
migraph
::
shape
s0
{
migraph
::
shape
::
float_type
,
{
2
,
2
}};
auto
l0
=
p
.
add_literal
(
migraph
::
literal
{
s0
,
data0
});
p
.
add_instruction
(
migraph
::
op
::
identity
{},
l0
);
p
.
compile
(
migraph
::
gpu
::
target
{});
migraph
::
program
::
parameter_map
m
;
for
(
auto
&&
x
:
p
.
get_parameter_shapes
())
{
m
[
x
.
first
]
=
migraph
::
gpu
::
to_gpu
(
migraph
::
generate_argument
(
x
.
second
));
}
auto
result
=
migraph
::
gpu
::
from_gpu
(
p
.
eval
(
m
));
std
::
cout
<<
result
<<
std
::
endl
;
}
void
manual_test_concat_relu
()
{
migraph
::
program
p
;
std
::
size_t
axis
=
0
;
std
::
vector
<
float
>
data0
=
{
0
,
1
,
2
,
3
};
std
::
vector
<
float
>
data1
=
{
4
,
5
,
6
,
7
,
8
,
9
};
std
::
vector
<
float
>
data2
=
{
10
,
11
};
migraph
::
shape
s0
{
migraph
::
shape
::
float_type
,
{
2
,
2
}};
migraph
::
shape
s1
{
migraph
::
shape
::
float_type
,
{
3
,
2
}};
migraph
::
shape
s2
{
migraph
::
shape
::
float_type
,
{
1
,
2
}};
auto
l0
=
p
.
add_literal
(
migraph
::
literal
{
s0
,
data0
});
auto
l1
=
p
.
add_literal
(
migraph
::
literal
{
s1
,
data1
});
auto
l2
=
p
.
add_literal
(
migraph
::
literal
{
s2
,
data2
});
auto
r0
=
p
.
add_instruction
(
migraph
::
op
::
relu
{},
l0
);
auto
r1
=
p
.
add_instruction
(
migraph
::
op
::
relu
{},
l1
);
auto
r2
=
p
.
add_instruction
(
migraph
::
op
::
relu
{},
l2
);
auto
c0
=
p
.
add_instruction
(
migraph
::
op
::
concat
{
axis
},
r0
,
r1
,
r2
);
p
.
add_instruction
(
migraph
::
op
::
relu
{},
c0
);
p
.
compile
(
migraph
::
gpu
::
target
{});
migraph
::
program
::
parameter_map
m
;
for
(
auto
&&
x
:
p
.
get_parameter_shapes
())
{
m
[
x
.
first
]
=
migraph
::
gpu
::
to_gpu
(
migraph
::
generate_argument
(
x
.
second
));
}
auto
result
=
migraph
::
gpu
::
from_gpu
(
p
.
eval
(
m
));
std
::
cout
<<
result
<<
std
::
endl
;
}
struct
test_conv_bn_relu_pooling2
{
static
migraph
::
instruction_ref
...
...
@@ -697,7 +794,7 @@ struct test_conv_bn_relu_pooling2
auto
conv2
=
p
.
add_instruction
(
migraph
::
op
::
convolution
{{
0
,
0
},
{
2
,
2
},
{
1
,
1
}},
x2
,
w2
);
auto
bn2
=
add_bn
(
p
,
conv2
,
2048
);
auto
add
=
p
.
add_instruction
(
migraph
::
op
::
add
{},
bn1
,
bn2
);
auto
relu
=
p
.
add_instruction
(
migraph
::
op
::
activation
{
"
relu
"
},
add
);
auto
relu
=
p
.
add_instruction
(
migraph
::
op
::
relu
{
},
add
);
p
.
add_instruction
(
migraph
::
op
::
pooling
{
"average"
,
{
1
,
1
},
{
2
,
2
},
{
3
,
3
}},
relu
);
return
p
;
}
...
...
@@ -707,6 +804,7 @@ int main()
{
verify_program
<
test_concat
>
();
verify_program
<
test_concat2
>
();
verify_program
<
test_concat_relu
>
();
verify_program
<
test_add
>
();
verify_program
<
test_add_half
>
();
verify_program
<
test_mul
>
();
...
...
@@ -728,6 +826,8 @@ int main()
verify_program
<
test_add_relu
>
();
verify_program
<
test_leaky_relu
>
();
verify_program
<
test_conv_pooling
>
();
verify_program
<
test_global_avg_pooling
>
();
verify_program
<
test_global_max_pooling
>
();
verify_program
<
test_gemm
>
();
// verify_program<test_gemm_ld>();
verify_program
<
test_gemm_transposeb
>
();
...
...
test/include/basic_ops.hpp
View file @
d71254c5
...
...
@@ -79,6 +79,7 @@ struct pass_op
return
{};
return
inputs
.
front
();
}
int
output_alias
(
const
std
::
vector
<
migraph
::
shape
>&
)
const
{
return
0
;
}
};
struct
pass_standard_op
...
...
@@ -103,6 +104,7 @@ struct pass_standard_op
return
{};
return
inputs
.
front
();
}
int
output_alias
(
const
std
::
vector
<
migraph
::
shape
>&
)
const
{
return
0
;
}
};
struct
nop
...
...
Prev
1
2
3
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment