Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
5c0a7807
Commit
5c0a7807
authored
Sep 26, 2018
by
wsttiger
Browse files
Moved operators to op namespace
parent
c8017873
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
66 additions
and
66 deletions
+66
-66
src/targets/gpu/include/migraph/gpu/miopen.hpp
src/targets/gpu/include/migraph/gpu/miopen.hpp
+2
-2
src/targets/gpu/lowering.cpp
src/targets/gpu/lowering.cpp
+14
-14
test/gpu/miopen.cpp
test/gpu/miopen.cpp
+50
-50
No files found.
src/targets/gpu/include/migraph/gpu/miopen.hpp
View file @
5c0a7807
...
...
@@ -44,7 +44,7 @@ inline tensor_descriptor make_tensor(const migraph::shape& s)
return
t
;
}
inline
convolution_descriptor
make_conv
(
const
migraph
::
convolution
&
op
)
inline
convolution_descriptor
make_conv
(
const
migraph
::
op
::
convolution
&
op
)
{
auto
c
=
make_obj
<
convolution_descriptor
>
(
&
miopenCreateConvolutionDescriptor
);
miopenInitConvolutionDescriptor
(
c
.
get
(),
...
...
@@ -58,7 +58,7 @@ inline convolution_descriptor make_conv(const migraph::convolution& op)
return
c
;
}
inline
pooling_descriptor
make_pooling
(
const
migraph
::
pooling
&
op
)
inline
pooling_descriptor
make_pooling
(
const
migraph
::
op
::
pooling
&
op
)
{
miopenPoolingMode_t
mode
;
if
(
op
.
mode
==
"max"
)
...
...
src/targets/gpu/lowering.cpp
View file @
5c0a7807
...
...
@@ -20,7 +20,7 @@ namespace gpu {
struct
miopen_batch_norm_inference
{
batch_norm_inference
op
;
op
::
batch_norm_inference
op
;
std
::
string
name
()
const
{
return
"gpu::batch_norm_inference"
;
}
...
...
@@ -61,7 +61,7 @@ struct miopen_batch_norm_inference
struct
miopen_convolution
{
convolution
op
;
op
::
convolution
op
;
shared
<
convolution_descriptor
>
cd
;
miopenConvFwdAlgorithm_t
algo
{};
...
...
@@ -144,7 +144,7 @@ struct miopen_convolution
struct
miopen_pooling
{
pooling
op
;
op
::
pooling
op
;
shared
<
pooling_descriptor
>
pd
;
std
::
string
name
()
const
{
return
"gpu::pooling"
;
}
...
...
@@ -227,7 +227,7 @@ struct miopen_add
struct
miopen_gemm
{
gemm
op
;
op
::
gemm
op
;
std
::
string
name
()
const
{
return
"gpu::gemm"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
...
...
@@ -267,7 +267,7 @@ struct miopen_gemm
struct
miopen_contiguous
{
contiguous
op
;
op
::
contiguous
op
;
std
::
string
name
()
const
{
return
"gpu::contiguous"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
...
...
@@ -315,7 +315,7 @@ struct miopen_relu
struct
miopen_softmax
{
softmax
op
;
op
::
softmax
op
;
std
::
string
name
()
const
{
return
"gpu::softmax"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
...
...
@@ -409,7 +409,7 @@ struct miopen_apply
instruction_ref
apply_convolution
(
instruction_ref
ins
)
{
auto
&&
op
=
any_cast
<
convolution
>
(
ins
->
get_operator
());
auto
&&
op
=
any_cast
<
op
::
convolution
>
(
ins
->
get_operator
());
auto
conv
=
miopen_convolution
{
op
,
make_conv
(
op
)};
auto
ws
=
conv
.
compile
(
ctx
,
ins
->
get_shape
(),
ins
->
inputs
());
...
...
@@ -423,7 +423,7 @@ struct miopen_apply
instruction_ref
apply_pooling
(
instruction_ref
ins
)
{
auto
&&
op
=
any_cast
<
pooling
>
(
ins
->
get_operator
());
auto
&&
op
=
any_cast
<
op
::
pooling
>
(
ins
->
get_operator
());
auto
pd
=
make_pooling
(
op
);
auto
output
=
insert_allocation
(
ins
,
ins
->
get_shape
());
...
...
@@ -433,7 +433,7 @@ struct miopen_apply
instruction_ref
apply_activation
(
instruction_ref
ins
)
{
auto
&&
op
=
any_cast
<
activation
>
(
ins
->
get_operator
());
auto
&&
op
=
any_cast
<
op
::
activation
>
(
ins
->
get_operator
());
auto
ad
=
make_relu
();
if
(
op
.
mode
==
"relu"
)
{
...
...
@@ -446,7 +446,7 @@ struct miopen_apply
instruction_ref
apply_softmax
(
instruction_ref
ins
)
{
auto
&&
op
=
any_cast
<
softmax
>
(
ins
->
get_operator
());
auto
&&
op
=
any_cast
<
op
::
softmax
>
(
ins
->
get_operator
());
auto
output
=
insert_allocation
(
ins
,
ins
->
get_shape
());
return
prog
->
replace_instruction
(
ins
,
miopen_softmax
{
op
},
ins
->
inputs
().
at
(
0
),
output
);
}
...
...
@@ -460,7 +460,7 @@ struct miopen_apply
instruction_ref
apply_gemm
(
instruction_ref
ins
)
{
auto
&&
op
=
any_cast
<
gemm
>
(
ins
->
get_operator
());
auto
&&
op
=
any_cast
<
op
::
gemm
>
(
ins
->
get_operator
());
auto
output
=
insert_allocation
(
ins
,
ins
->
get_shape
());
return
prog
->
replace_instruction
(
ins
,
miopen_gemm
{
op
},
ins
->
inputs
().
at
(
0
),
ins
->
inputs
().
at
(
1
),
output
);
...
...
@@ -468,18 +468,18 @@ struct miopen_apply
instruction_ref
apply_contiguous
(
instruction_ref
ins
)
{
auto
&&
op
=
any_cast
<
contiguous
>
(
ins
->
get_operator
());
auto
&&
op
=
any_cast
<
op
::
contiguous
>
(
ins
->
get_operator
());
auto
output
=
insert_allocation
(
ins
,
ins
->
get_shape
());
return
prog
->
replace_instruction
(
ins
,
miopen_contiguous
{
op
},
ins
->
inputs
().
at
(
0
),
output
);
}
instruction_ref
apply_batch_norm_inference
(
instruction_ref
ins
)
{
auto
&&
op
=
any_cast
<
batch_norm_inference
>
(
ins
->
get_operator
());
auto
&&
op
=
any_cast
<
op
::
batch_norm_inference
>
(
ins
->
get_operator
());
auto
output
=
insert_allocation
(
ins
,
ins
->
get_shape
());
shape
old_shape
=
ins
->
inputs
().
at
(
1
)
->
get_shape
();
std
::
vector
<
int64_t
>
new_shape
{
1
,
static_cast
<
int64_t
>
(
old_shape
.
elements
()),
1
,
1
};
auto
reshape_op
=
reshape
{
new_shape
};
auto
reshape_op
=
op
::
reshape
{
new_shape
};
std
::
vector
<
instruction_ref
>
reshapes
;
std
::
transform
(
ins
->
inputs
().
begin
()
+
1
,
ins
->
inputs
().
end
(),
...
...
test/gpu/miopen.cpp
View file @
5c0a7807
...
...
@@ -157,8 +157,8 @@ struct test_literals
generate_literal
(
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}}));
auto
weights
=
p
.
add_literal
(
generate_literal
(
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}}));
auto
conv
=
p
.
add_instruction
(
migraph
::
convolution
{},
input
,
weights
);
p
.
add_instruction
(
migraph
::
activation
{
"relu"
},
conv
);
auto
conv
=
p
.
add_instruction
(
migraph
::
op
::
convolution
{},
input
,
weights
);
p
.
add_instruction
(
migraph
::
op
::
activation
{
"relu"
},
conv
);
return
p
;
}
};
...
...
@@ -171,7 +171,7 @@ struct test_add
migraph
::
shape
s
{
migraph
::
shape
::
float_type
,
{
3
}};
auto
x
=
p
.
add_parameter
(
"x"
,
s
);
auto
y
=
p
.
add_parameter
(
"y"
,
s
);
p
.
add_instruction
(
migraph
::
add
{},
x
,
y
);
p
.
add_instruction
(
migraph
::
op
::
add
{},
x
,
y
);
return
p
;
}
};
...
...
@@ -184,8 +184,8 @@ struct test_add_broadcast
migraph
::
shape
s
{
migraph
::
shape
::
float_type
,
{
3
}};
auto
x
=
p
.
add_parameter
(
"x"
,
{
migraph
::
shape
::
float_type
,
{
2
,
2
,
3
}});
auto
y
=
p
.
add_parameter
(
"y"
,
{
migraph
::
shape
::
float_type
,
{
2
,
2
}});
auto
by
=
p
.
add_instruction
(
migraph
::
broadcast
{
0
},
x
,
y
);
p
.
add_instruction
(
migraph
::
add
{},
x
,
by
);
auto
by
=
p
.
add_instruction
(
migraph
::
op
::
broadcast
{
0
},
x
,
y
);
p
.
add_instruction
(
migraph
::
op
::
add
{},
x
,
by
);
return
p
;
}
};
...
...
@@ -198,8 +198,8 @@ struct test_add_broadcast2
migraph
::
shape
s
{
migraph
::
shape
::
float_type
,
{
3
}};
auto
x
=
p
.
add_parameter
(
"x"
,
{
migraph
::
shape
::
float_type
,
{
2
,
3
,
4
}});
auto
y
=
p
.
add_parameter
(
"y"
,
{
migraph
::
shape
::
float_type
,
{
3
}});
auto
by
=
p
.
add_instruction
(
migraph
::
broadcast
{
1
},
x
,
y
);
p
.
add_instruction
(
migraph
::
add
{},
x
,
by
);
auto
by
=
p
.
add_instruction
(
migraph
::
op
::
broadcast
{
1
},
x
,
y
);
p
.
add_instruction
(
migraph
::
op
::
add
{},
x
,
by
);
return
p
;
}
};
...
...
@@ -212,8 +212,8 @@ struct test_add_broadcast3
migraph
::
shape
s
{
migraph
::
shape
::
float_type
,
{
3
}};
auto
x
=
p
.
add_parameter
(
"x"
,
{
migraph
::
shape
::
float_type
,
{
2
,
4
,
5
}});
auto
y
=
p
.
add_parameter
(
"y"
,
{
migraph
::
shape
::
float_type
,
{
4
}});
auto
by
=
p
.
add_instruction
(
migraph
::
broadcast
{
1
},
x
,
y
);
p
.
add_instruction
(
migraph
::
add
{},
x
,
by
);
auto
by
=
p
.
add_instruction
(
migraph
::
op
::
broadcast
{
1
},
x
,
y
);
p
.
add_instruction
(
migraph
::
op
::
add
{},
x
,
by
);
return
p
;
}
};
...
...
@@ -226,8 +226,8 @@ struct test_add_broadcast4
migraph
::
shape
s
{
migraph
::
shape
::
float_type
,
{
3
}};
auto
x
=
p
.
add_parameter
(
"x"
,
{
migraph
::
shape
::
float_type
,
{
2
,
3
,
5
}});
auto
y
=
p
.
add_parameter
(
"y"
,
{
migraph
::
shape
::
float_type
,
{
3
}});
auto
by
=
p
.
add_instruction
(
migraph
::
broadcast
{
1
},
x
,
y
);
p
.
add_instruction
(
migraph
::
add
{},
x
,
by
);
auto
by
=
p
.
add_instruction
(
migraph
::
op
::
broadcast
{
1
},
x
,
y
);
p
.
add_instruction
(
migraph
::
op
::
add
{},
x
,
by
);
return
p
;
}
};
...
...
@@ -240,8 +240,8 @@ struct test_add_broadcast5
migraph
::
shape
s
{
migraph
::
shape
::
float_type
,
{
3
}};
auto
x
=
p
.
add_parameter
(
"x"
,
{
migraph
::
shape
::
float_type
,
{
2
,
4
,
8
}});
auto
y
=
p
.
add_parameter
(
"y"
,
{
migraph
::
shape
::
float_type
,
{
4
}});
auto
by
=
p
.
add_instruction
(
migraph
::
broadcast
{
1
},
x
,
y
);
p
.
add_instruction
(
migraph
::
add
{},
x
,
by
);
auto
by
=
p
.
add_instruction
(
migraph
::
op
::
broadcast
{
1
},
x
,
y
);
p
.
add_instruction
(
migraph
::
op
::
add
{},
x
,
by
);
return
p
;
}
};
...
...
@@ -252,7 +252,7 @@ struct test_softmax
{
migraph
::
program
p
;
auto
x
=
p
.
add_parameter
(
"x"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
5
,
3
,
4
,
2
}});
p
.
add_instruction
(
migraph
::
softmax
{},
x
);
p
.
add_instruction
(
migraph
::
op
::
softmax
{},
x
);
return
p
;
}
};
...
...
@@ -263,7 +263,7 @@ struct test_softmax2
{
migraph
::
program
p
;
auto
x
=
p
.
add_parameter
(
"x"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
1
,
1000
,
1
,
1
}});
p
.
add_instruction
(
migraph
::
softmax
{},
x
);
p
.
add_instruction
(
migraph
::
op
::
softmax
{},
x
);
return
p
;
}
};
...
...
@@ -276,7 +276,7 @@ struct test_conv
auto
input
=
p
.
add_parameter
(
"x"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}});
auto
weights
=
p
.
add_parameter
(
"w"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}});
p
.
add_instruction
(
migraph
::
convolution
{},
input
,
weights
);
p
.
add_instruction
(
migraph
::
op
::
convolution
{},
input
,
weights
);
return
p
;
}
};
...
...
@@ -290,7 +290,7 @@ struct test_conv2
p
.
add_parameter
(
"x"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
1
,
512
,
28
,
28
}});
auto
weights
=
p
.
add_parameter
(
"w"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
256
,
512
,
1
,
1
}});
p
.
add_instruction
(
migraph
::
convolution
{{
0
,
0
},
{
1
,
1
},
{
1
,
1
}},
input
,
weights
);
p
.
add_instruction
(
migraph
::
op
::
convolution
{{
0
,
0
},
{
1
,
1
},
{
1
,
1
}},
input
,
weights
);
return
p
;
}
};
...
...
@@ -303,8 +303,8 @@ struct test_conv_relu
auto
input
=
p
.
add_parameter
(
"x"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}});
auto
weights
=
p
.
add_parameter
(
"w"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}});
auto
conv
=
p
.
add_instruction
(
migraph
::
convolution
{},
input
,
weights
);
p
.
add_instruction
(
migraph
::
activation
{
"relu"
},
conv
);
auto
conv
=
p
.
add_instruction
(
migraph
::
op
::
convolution
{},
input
,
weights
);
p
.
add_instruction
(
migraph
::
op
::
activation
{
"relu"
},
conv
);
return
p
;
}
};
...
...
@@ -316,8 +316,8 @@ struct test_add_relu
migraph
::
program
p
;
auto
x
=
p
.
add_parameter
(
"x"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}});
auto
y
=
p
.
add_parameter
(
"y"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}});
auto
add
=
p
.
add_instruction
(
migraph
::
add
{},
x
,
y
);
p
.
add_instruction
(
migraph
::
activation
{
"relu"
},
add
);
auto
add
=
p
.
add_instruction
(
migraph
::
op
::
add
{},
x
,
y
);
p
.
add_instruction
(
migraph
::
op
::
activation
{
"relu"
},
add
);
return
p
;
}
};
...
...
@@ -331,9 +331,9 @@ struct test_conv_pooling
p
.
add_parameter
(
"x"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
4
,
3
,
32
,
32
}});
auto
weights
=
p
.
add_parameter
(
"w"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}});
auto
conv
=
p
.
add_instruction
(
migraph
::
convolution
{},
input
,
weights
);
auto
pooling
=
p
.
add_instruction
(
migraph
::
pooling
{
"max"
},
conv
);
p
.
add_instruction
(
migraph
::
activation
{
"relu"
},
pooling
);
auto
conv
=
p
.
add_instruction
(
migraph
::
op
::
convolution
{},
input
,
weights
);
auto
pooling
=
p
.
add_instruction
(
migraph
::
op
::
pooling
{
"max"
},
conv
);
p
.
add_instruction
(
migraph
::
op
::
activation
{
"relu"
},
pooling
);
return
p
;
}
};
...
...
@@ -345,7 +345,7 @@ struct test_gemm
migraph
::
program
p
;
auto
a
=
p
.
add_parameter
(
"a"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
4
,
5
}});
auto
b
=
p
.
add_parameter
(
"b"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
5
,
3
}});
p
.
add_instruction
(
migraph
::
gemm
{},
a
,
b
);
p
.
add_instruction
(
migraph
::
op
::
gemm
{},
a
,
b
);
return
p
;
}
};
...
...
@@ -357,7 +357,7 @@ struct test_gemm_ld
migraph
::
program
p
;
auto
a
=
p
.
add_parameter
(
"a"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
4
,
5
},
{
10
,
1
}});
auto
b
=
p
.
add_parameter
(
"b"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
5
,
3
},
{
20
,
1
}});
p
.
add_instruction
(
migraph
::
gemm
{},
a
,
b
);
p
.
add_instruction
(
migraph
::
op
::
gemm
{},
a
,
b
);
return
p
;
}
};
...
...
@@ -369,8 +369,8 @@ struct test_gemm_transposeb
migraph
::
program
p
;
auto
a
=
p
.
add_parameter
(
"a"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
4
,
5
}});
auto
b
=
p
.
add_parameter
(
"b"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
3
,
5
}});
auto
bt
=
p
.
add_instruction
(
migraph
::
transpose
{{
1
,
0
}},
b
);
p
.
add_instruction
(
migraph
::
gemm
{},
a
,
bt
);
auto
bt
=
p
.
add_instruction
(
migraph
::
op
::
transpose
{{
1
,
0
}},
b
);
p
.
add_instruction
(
migraph
::
op
::
gemm
{},
a
,
bt
);
return
p
;
}
};
...
...
@@ -382,8 +382,8 @@ struct test_gemm_transposea
migraph
::
program
p
;
auto
a
=
p
.
add_parameter
(
"a"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
5
,
4
}});
auto
b
=
p
.
add_parameter
(
"b"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
5
,
3
}});
auto
at
=
p
.
add_instruction
(
migraph
::
transpose
{{
1
,
0
}},
a
);
p
.
add_instruction
(
migraph
::
gemm
{},
at
,
b
);
auto
at
=
p
.
add_instruction
(
migraph
::
op
::
transpose
{{
1
,
0
}},
a
);
p
.
add_instruction
(
migraph
::
op
::
gemm
{},
at
,
b
);
return
p
;
}
};
...
...
@@ -395,9 +395,9 @@ struct test_gemm_transposeab
migraph
::
program
p
;
auto
a
=
p
.
add_parameter
(
"a"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
5
,
4
}});
auto
b
=
p
.
add_parameter
(
"b"
,
migraph
::
shape
{
migraph
::
shape
::
float_type
,
{
3
,
5
}});
auto
at
=
p
.
add_instruction
(
migraph
::
transpose
{{
1
,
0
}},
a
);
auto
bt
=
p
.
add_instruction
(
migraph
::
transpose
{{
1
,
0
}},
b
);
p
.
add_instruction
(
migraph
::
gemm
{},
at
,
bt
);
auto
at
=
p
.
add_instruction
(
migraph
::
op
::
transpose
{{
1
,
0
}},
a
);
auto
bt
=
p
.
add_instruction
(
migraph
::
op
::
transpose
{{
1
,
0
}},
b
);
p
.
add_instruction
(
migraph
::
op
::
gemm
{},
at
,
bt
);
return
p
;
}
};
...
...
@@ -409,7 +409,7 @@ struct test_contiguous
migraph
::
program
p
;
migraph
::
shape
s
{
migraph
::
shape
::
float_type
,
{
4
,
4
,
4
,
3
},
{
48
,
4
,
1
,
16
}};
auto
x
=
p
.
add_parameter
(
"x"
,
s
);
p
.
add_instruction
(
migraph
::
contiguous
{},
x
);
p
.
add_instruction
(
migraph
::
op
::
contiguous
{},
x
);
EXPECT
(
p
.
get_shape
().
standard
());
return
p
;
}
...
...
@@ -423,8 +423,8 @@ struct test_transpose
migraph
::
shape
s
{
migraph
::
shape
::
float_type
,
{
4
,
3
,
4
,
4
}};
auto
x
=
p
.
add_parameter
(
"x"
,
s
);
std
::
vector
<
int64_t
>
perm
=
{
0
,
2
,
3
,
1
};
auto
l
=
p
.
add_instruction
(
migraph
::
transpose
{
perm
},
x
);
p
.
add_instruction
(
migraph
::
contiguous
{},
l
);
auto
l
=
p
.
add_instruction
(
migraph
::
op
::
transpose
{
perm
},
x
);
p
.
add_instruction
(
migraph
::
op
::
contiguous
{},
l
);
return
p
;
}
};
...
...
@@ -447,7 +447,7 @@ struct test_batchnorm_inference_2
auto
bias
=
p
.
add_literal
(
migraph
::
abs
(
migraph
::
generate_literal
(
vars
,
2
)));
auto
mean
=
p
.
add_literal
(
migraph
::
abs
(
migraph
::
generate_literal
(
vars
,
3
)));
auto
variance
=
p
.
add_literal
(
migraph
::
abs
(
migraph
::
generate_literal
(
vars
,
4
)));
p
.
add_instruction
(
migraph
::
batch_norm_inference
{},
x
,
scale
,
bias
,
mean
,
variance
);
p
.
add_instruction
(
migraph
::
op
::
batch_norm_inference
{},
x
,
scale
,
bias
,
mean
,
variance
);
return
p
;
}
};
...
...
@@ -470,7 +470,7 @@ struct test_batchnorm_inference
auto
bias
=
p
.
add_literal
(
migraph
::
abs
(
migraph
::
generate_literal
(
vars
,
2
)));
auto
mean
=
p
.
add_literal
(
migraph
::
abs
(
migraph
::
generate_literal
(
vars
,
3
)));
auto
variance
=
p
.
add_literal
(
migraph
::
abs
(
migraph
::
generate_literal
(
vars
,
4
)));
p
.
add_instruction
(
migraph
::
batch_norm_inference
{},
x
,
scale
,
bias
,
mean
,
variance
);
p
.
add_instruction
(
migraph
::
op
::
batch_norm_inference
{},
x
,
scale
,
bias
,
mean
,
variance
);
return
p
;
}
};
...
...
@@ -486,12 +486,12 @@ struct test_conv_bn
migraph
::
shape
vars
{
migraph
::
shape
::
float_type
,
{
64
}};
auto
x
=
p
.
add_parameter
(
"x"
,
xs
);
auto
w
=
p
.
add_parameter
(
"w"
,
ws
);
auto
conv
=
p
.
add_instruction
(
migraph
::
convolution
{{
3
,
3
},
{
2
,
2
},
{
1
,
1
}},
x
,
w
);
auto
conv
=
p
.
add_instruction
(
migraph
::
op
::
convolution
{{
3
,
3
},
{
2
,
2
},
{
1
,
1
}},
x
,
w
);
auto
scale
=
p
.
add_literal
(
migraph
::
abs
(
migraph
::
generate_literal
(
vars
,
1
)));
auto
bias
=
p
.
add_literal
(
migraph
::
abs
(
migraph
::
generate_literal
(
vars
,
2
)));
auto
mean
=
p
.
add_literal
(
migraph
::
abs
(
migraph
::
generate_literal
(
vars
,
3
)));
auto
variance
=
p
.
add_literal
(
migraph
::
abs
(
migraph
::
generate_literal
(
vars
,
4
)));
p
.
add_instruction
(
migraph
::
batch_norm_inference
{},
conv
,
scale
,
bias
,
mean
,
variance
);
p
.
add_instruction
(
migraph
::
op
::
batch_norm_inference
{},
conv
,
scale
,
bias
,
mean
,
variance
);
return
p
;
}
};
...
...
@@ -507,15 +507,15 @@ struct test_conv_bn_relu_pooling
migraph
::
shape
vars
{
migraph
::
shape
::
float_type
,
{
64
}};
auto
x
=
p
.
add_parameter
(
"x"
,
xs
);
auto
w
=
p
.
add_parameter
(
"w"
,
ws
);
auto
conv
=
p
.
add_instruction
(
migraph
::
convolution
{{
3
,
3
},
{
2
,
2
},
{
1
,
1
}},
x
,
w
);
auto
conv
=
p
.
add_instruction
(
migraph
::
op
::
convolution
{{
3
,
3
},
{
2
,
2
},
{
1
,
1
}},
x
,
w
);
auto
scale
=
p
.
add_literal
(
migraph
::
abs
(
migraph
::
generate_literal
(
vars
,
1
)));
auto
bias
=
p
.
add_literal
(
migraph
::
abs
(
migraph
::
generate_literal
(
vars
,
2
)));
auto
mean
=
p
.
add_literal
(
migraph
::
abs
(
migraph
::
generate_literal
(
vars
,
3
)));
auto
variance
=
p
.
add_literal
(
migraph
::
abs
(
migraph
::
generate_literal
(
vars
,
4
)));
auto
bn
=
p
.
add_instruction
(
migraph
::
batch_norm_inference
{},
conv
,
scale
,
bias
,
mean
,
variance
);
auto
relu
=
p
.
add_instruction
(
migraph
::
activation
{
"relu"
},
bn
);
p
.
add_instruction
(
migraph
::
pooling
{
"average"
,
{
1
,
1
},
{
2
,
2
},
{
3
,
3
}},
relu
);
p
.
add_instruction
(
migraph
::
op
::
batch_norm_inference
{},
conv
,
scale
,
bias
,
mean
,
variance
);
auto
relu
=
p
.
add_instruction
(
migraph
::
op
::
activation
{
"relu"
},
bn
);
p
.
add_instruction
(
migraph
::
op
::
pooling
{
"average"
,
{
1
,
1
},
{
2
,
2
},
{
3
,
3
}},
relu
);
return
p
;
}
};
...
...
@@ -530,7 +530,7 @@ struct test_conv_bn_relu_pooling2
auto
bias
=
p
.
add_literal
(
migraph
::
abs
(
migraph
::
generate_literal
(
vars
,
2
+
channels
)));
auto
mean
=
p
.
add_literal
(
migraph
::
abs
(
migraph
::
generate_literal
(
vars
,
3
+
channels
)));
auto
variance
=
p
.
add_literal
(
migraph
::
abs
(
migraph
::
generate_literal
(
vars
,
4
+
channels
)));
return
p
.
add_instruction
(
migraph
::
batch_norm_inference
{},
x
,
scale
,
bias
,
mean
,
variance
);
return
p
.
add_instruction
(
migraph
::
op
::
batch_norm_inference
{},
x
,
scale
,
bias
,
mean
,
variance
);
}
migraph
::
program
create_program
()
const
{
...
...
@@ -542,15 +542,15 @@ struct test_conv_bn_relu_pooling2
migraph
::
shape
ws2
{
migraph
::
shape
::
float_type
,
{
2048
,
1024
,
1
,
1
}};
auto
x1
=
p
.
add_parameter
(
"x1"
,
xs1
);
auto
w1
=
p
.
add_parameter
(
"w1"
,
ws1
);
auto
conv1
=
p
.
add_instruction
(
migraph
::
convolution
{{
0
,
0
},
{
1
,
1
},
{
1
,
1
}},
x1
,
w1
);
auto
conv1
=
p
.
add_instruction
(
migraph
::
op
::
convolution
{{
0
,
0
},
{
1
,
1
},
{
1
,
1
}},
x1
,
w1
);
auto
bn1
=
add_bn
(
p
,
conv1
,
2048
);
auto
x2
=
p
.
add_parameter
(
"x2"
,
xs2
);
auto
w2
=
p
.
add_parameter
(
"w2"
,
ws2
);
auto
conv2
=
p
.
add_instruction
(
migraph
::
convolution
{{
0
,
0
},
{
2
,
2
},
{
1
,
1
}},
x2
,
w2
);
auto
conv2
=
p
.
add_instruction
(
migraph
::
op
::
convolution
{{
0
,
0
},
{
2
,
2
},
{
1
,
1
}},
x2
,
w2
);
auto
bn2
=
add_bn
(
p
,
conv2
,
2048
);
auto
add
=
p
.
add_instruction
(
migraph
::
add
{},
bn1
,
bn2
);
auto
relu
=
p
.
add_instruction
(
migraph
::
activation
{
"relu"
},
add
);
p
.
add_instruction
(
migraph
::
pooling
{
"average"
,
{
1
,
1
},
{
2
,
2
},
{
3
,
3
}},
relu
);
auto
add
=
p
.
add_instruction
(
migraph
::
op
::
add
{},
bn1
,
bn2
);
auto
relu
=
p
.
add_instruction
(
migraph
::
op
::
activation
{
"relu"
},
add
);
p
.
add_instruction
(
migraph
::
op
::
pooling
{
"average"
,
{
1
,
1
},
{
2
,
2
},
{
3
,
3
}},
relu
);
return
p
;
}
};
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment