Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
a4e698cc
Commit
a4e698cc
authored
Jun 19, 2018
by
Paul
Browse files
Use map for transforming instructions
parent
a350dcc2
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
49 additions
and
125 deletions
+49
-125
src/targets/cpu/cpu_target.cpp
src/targets/cpu/cpu_target.cpp
+49
-123
test/cpu_ops_test.cpp
test/cpu_ops_test.cpp
+0
-2
No files found.
src/targets/cpu/cpu_target.cpp
View file @
a4e698cc
...
...
@@ -423,154 +423,80 @@ struct cpu_binary
struct
cpu_apply
{
program
*
prog
;
std
::
unordered_map
<
std
::
string
,
std
::
function
<
void
(
instruction_ref
)
>>
apply_map
{};
void
apply
()
{
for
(
auto
it
=
prog
->
begin
();
it
!=
prog
->
end
();
it
++
)
{
if
(
it
->
op
.
name
()
==
"convolution"
)
{
apply_convolution
(
it
);
}
else
if
(
it
->
op
.
name
()
==
"gemm"
)
{
apply_gemm
(
it
);
}
else
if
(
it
->
op
.
name
()
==
"reshape"
)
{
apply_reshape
(
it
);
}
else
if
(
it
->
op
.
name
()
==
"contiguous"
)
{
apply_contiguous
(
it
);
}
else
if
(
it
->
op
.
name
()
==
"transpose"
)
{
apply_transpose
(
it
);
}
else
if
(
it
->
op
.
name
()
==
"activation"
)
{
apply_activation
(
it
);
}
else
if
(
it
->
op
.
name
()
==
"identity"
)
{
apply_identity
(
it
);
}
else
if
(
it
->
op
.
name
()
==
"softmax"
)
{
apply_softmax
(
it
);
}
else
if
(
it
->
op
.
name
()
==
"tanh"
)
{
apply_tanh
(
it
);
}
else
if
(
it
->
op
.
name
()
==
"sigmoid"
)
{
apply_sigmoid
(
it
);
}
else
if
(
it
->
op
.
name
()
==
"exp"
)
{
apply_exp
(
it
);
}
else
if
(
it
->
op
.
name
()
==
"neg"
)
{
apply_neg
(
it
);
}
else
if
(
it
->
op
.
name
()
==
"sin"
)
{
apply_sin
(
it
);
}
else
if
(
it
->
op
.
name
()
==
"cos"
)
template
<
class
T
>
auto
simple_op
()
{
apply_cos
(
it
);
}
else
if
(
it
->
op
.
name
()
==
"tan"
)
return
[
this
](
instruction_ref
ins
)
{
apply_tan
(
it
);
}
}
}
void
apply_convolution
(
instruction_ref
ins
)
{
auto
&&
op
=
any_cast
<
convolution
>
(
ins
->
op
);
prog
->
replace_instruction
(
ins
,
cpu_convolution
{
op
},
ins
->
arguments
);
apply_simple_op
<
T
>
(
ins
);
};
}
void
apply_gemm
(
instruction_ref
ins
)
template
<
class
T
,
class
Op
>
auto
extend_op
()
{
auto
&&
op
=
any_cast
<
gemm
>
(
ins
->
op
);
prog
->
replace_instruction
(
ins
,
cpu_gemm
{
op
},
ins
->
arguments
);
}
void
apply_reshape
(
instruction_ref
ins
)
return
[
this
](
instruction_ref
ins
)
{
auto
&&
op
=
any_cast
<
reshape
>
(
ins
->
op
);
prog
->
replace_instruction
(
ins
,
cpu_reshape
{
op
},
ins
->
arguments
)
;
apply_extend_op
<
T
,
Op
>
(
ins
);
}
;
}
void
apply_contiguous
(
instruction_ref
ins
)
void
init
(
)
{
auto
&&
op
=
any_cast
<
contiguous
>
(
ins
->
op
);
prog
->
replace_instruction
(
ins
,
cpu_contiguous
{
op
},
ins
->
arguments
);
}
apply_map
[
"convolution"
]
=
extend_op
<
cpu_convolution
,
convolution
>
();
apply_map
[
"gemm"
]
=
extend_op
<
cpu_gemm
,
gemm
>
();
apply_map
[
"reshape"
]
=
extend_op
<
cpu_reshape
,
reshape
>
();
apply_map
[
"contiguous"
]
=
extend_op
<
cpu_contiguous
,
contiguous
>
();
apply_map
[
"transpose"
]
=
extend_op
<
cpu_transpose
,
transpose
>
();
void
apply_transpose
(
instruction_ref
ins
)
{
auto
&&
op
=
any_cast
<
transpose
>
(
ins
->
op
);
prog
->
replace_instruction
(
ins
,
cpu_transpose
{
op
},
ins
->
arguments
);
}
apply_map
[
"identity"
]
=
simple_op
<
cpu_unary
<
identity_op
>>
();
apply_map
[
"tanh"
]
=
simple_op
<
cpu_unary
<
tanh_op
>>
();
apply_map
[
"sigmoid"
]
=
simple_op
<
cpu_unary
<
sigmoid_op
>>
();
apply_map
[
"exp"
]
=
simple_op
<
cpu_unary
<
exp_op
>>
();
apply_map
[
"neg"
]
=
simple_op
<
cpu_unary
<
neg_op
>>
();
apply_map
[
"sin"
]
=
simple_op
<
cpu_unary
<
sin_op
>>
();
apply_map
[
"cos"
]
=
simple_op
<
cpu_unary
<
cos_op
>>
();
apply_map
[
"tan"
]
=
simple_op
<
cpu_unary
<
tan_op
>>
();
void
apply_activation
(
instruction_ref
ins
)
{
auto
&&
op
=
any_cast
<
activation
>
(
ins
->
op
);
if
(
op
.
mode
==
"relu"
)
prog
->
replace_instruction
(
ins
,
cpu_unary
<
relu_op
>
{},
ins
->
arguments
);
apply_map
[
"softmax"
]
=
simple_op
<
softmax2d
>
();
}
void
apply
_identity
(
instruction_ref
ins
)
void
apply
(
)
{
prog
->
replace_instruction
(
ins
,
cpu_unary
<
identity_op
>
{},
ins
->
arguments
);
}
void
apply_softmax
(
instruction_ref
ins
)
init
();
for
(
auto
it
=
prog
->
begin
();
it
!=
prog
->
end
();
it
++
)
{
prog
->
replace_instruction
(
ins
,
softmax2d
{},
ins
->
arguments
);
}
void
apply_tanh
(
instruction_ref
ins
)
if
(
it
->
op
.
name
()
==
"activation"
)
{
prog
->
replace_instruction
(
ins
,
cpu_unary
<
tanh_op
>
{},
ins
->
arguments
);
apply_activation
(
it
);
}
void
apply_sigmoid
(
instruction_ref
ins
)
else
if
(
apply_map
.
count
(
it
->
op
.
name
())
>
0
)
{
prog
->
replace_instruction
(
ins
,
cpu_unary
<
sigmoid_op
>
{},
ins
->
arguments
);
apply_map
.
at
(
it
->
op
.
name
())(
it
);
}
void
apply_exp
(
instruction_ref
ins
)
{
prog
->
replace_instruction
(
ins
,
cpu_unary
<
exp_op
>
{},
ins
->
arguments
);
}
void
apply_neg
(
instruction_ref
ins
)
{
prog
->
replace_instruction
(
ins
,
cpu_unary
<
neg_op
>
{},
ins
->
arguments
);
}
void
apply_sin
(
instruction_ref
ins
)
template
<
class
T
>
void
apply_simple_op
(
instruction_ref
ins
)
{
prog
->
replace_instruction
(
ins
,
cpu_unary
<
sin_op
>
{},
ins
->
arguments
);
prog
->
replace_instruction
(
ins
,
T
{},
ins
->
arguments
);
}
void
apply_cos
(
instruction_ref
ins
)
template
<
class
T
,
class
Op
>
void
apply_extend_op
(
instruction_ref
ins
)
{
prog
->
replace_instruction
(
ins
,
cpu_unary
<
cos_op
>
{},
ins
->
arguments
);
auto
&&
op
=
any_cast
<
Op
>
(
ins
->
op
);
prog
->
replace_instruction
(
ins
,
T
{
op
},
ins
->
arguments
);
}
void
apply_
t
an
(
instruction_ref
ins
)
void
apply_a
ctivatio
n
(
instruction_ref
ins
)
{
prog
->
replace_instruction
(
ins
,
cpu_unary
<
tan_op
>
{},
ins
->
arguments
);
auto
&&
op
=
any_cast
<
activation
>
(
ins
->
op
);
if
(
op
.
mode
==
"relu"
)
prog
->
replace_instruction
(
ins
,
cpu_unary
<
relu_op
>
{},
ins
->
arguments
);
}
};
...
...
test/cpu_ops_test.cpp
View file @
a4e698cc
...
...
@@ -424,8 +424,6 @@ void transpose_test()
std
::
vector
<
float
>
results_vector
(
12
);
result2
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
size_t
>
new_lens
=
{
1
,
3
,
2
,
2
};
std
::
vector
<
size_t
>
new_strides
=
{
12
,
1
,
6
,
3
};
std
::
vector
<
float
>
gold
=
{
0
,
3
,
6
,
9
,
1
,
4
,
7
,
10
,
2
,
5
,
8
,
11
};
EXPECT
(
test
::
verify_range
(
results_vector
,
gold
));
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment