Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
bf6f82d8
Commit
bf6f82d8
authored
May 16, 2019
by
Paul
Browse files
Merge from develop
parents
6a0797e2
b93f5320
Changes
92
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
316 additions
and
36 deletions
+316
-36
src/targets/gpu/include/migraphx/gpu/gemm.hpp
src/targets/gpu/include/migraphx/gpu/gemm.hpp
+11
-1
src/targets/gpu/include/migraphx/gpu/hip.hpp
src/targets/gpu/include/migraphx/gpu/hip.hpp
+16
-2
src/targets/gpu/include/migraphx/gpu/leaky_relu.hpp
src/targets/gpu/include/migraphx/gpu/leaky_relu.hpp
+11
-1
src/targets/gpu/include/migraphx/gpu/logsoftmax.hpp
src/targets/gpu/include/migraphx/gpu/logsoftmax.hpp
+11
-1
src/targets/gpu/include/migraphx/gpu/lrn.hpp
src/targets/gpu/include/migraphx/gpu/lrn.hpp
+11
-1
src/targets/gpu/include/migraphx/gpu/miopen.hpp
src/targets/gpu/include/migraphx/gpu/miopen.hpp
+32
-0
src/targets/gpu/include/migraphx/gpu/oper.hpp
src/targets/gpu/include/migraphx/gpu/oper.hpp
+27
-4
src/targets/gpu/include/migraphx/gpu/pad.hpp
src/targets/gpu/include/migraphx/gpu/pad.hpp
+10
-1
src/targets/gpu/include/migraphx/gpu/pooling.hpp
src/targets/gpu/include/migraphx/gpu/pooling.hpp
+10
-1
src/targets/gpu/include/migraphx/gpu/relu.hpp
src/targets/gpu/include/migraphx/gpu/relu.hpp
+11
-1
src/targets/gpu/include/migraphx/gpu/sigmoid.hpp
src/targets/gpu/include/migraphx/gpu/sigmoid.hpp
+11
-1
src/targets/gpu/include/migraphx/gpu/softmax.hpp
src/targets/gpu/include/migraphx/gpu/softmax.hpp
+11
-1
src/targets/gpu/include/migraphx/gpu/tanh.hpp
src/targets/gpu/include/migraphx/gpu/tanh.hpp
+11
-1
src/targets/gpu/lowering.cpp
src/targets/gpu/lowering.cpp
+2
-0
src/targets/gpu/tanh.cpp
src/targets/gpu/tanh.cpp
+1
-1
src/targets/gpu/write_literals.cpp
src/targets/gpu/write_literals.cpp
+7
-0
src/tf/tf.cpp
src/tf/tf.cpp
+85
-19
test/cpu_ops_test.cpp
test/cpu_ops_test.cpp
+17
-0
test/eliminate_allocation_test.cpp
test/eliminate_allocation_test.cpp
+7
-0
test/eliminate_concat_test.cpp
test/eliminate_concat_test.cpp
+14
-0
No files found.
src/targets/gpu/include/migraphx/gpu/gemm.hpp
View file @
bf6f82d8
...
...
@@ -13,11 +13,21 @@ struct context;
struct
miopen_gemm
{
op
::
dot
op
;
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
migraphx
::
reflect
(
self
.
op
,
f
);
}
std
::
string
name
()
const
{
return
"gpu::gemm"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
int
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
std
::
ptrdiff_t
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
};
}
// namespace gpu
...
...
src/targets/gpu/include/migraphx/gpu/hip.hpp
View file @
bf6f82d8
...
...
@@ -28,6 +28,13 @@ struct hip_allocate
{
shape
s
;
std
::
string
tag
{};
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
pack
(
f
(
self
.
s
,
"shape"
),
f
(
self
.
tag
,
"tag"
));
}
std
::
string
name
()
const
{
return
"hip::allocate"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
...
...
@@ -43,6 +50,13 @@ struct hip_allocate
struct
hip_sync
{
std
::
string
tag
{};
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
pack
(
f
(
self
.
tag
,
"tag"
));
}
std
::
string
name
()
const
{
return
"hip::sync"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
...
...
@@ -73,7 +87,7 @@ struct hip_write
{
return
to_gpu
(
args
.
front
());
}
in
t
output_alias
(
const
std
::
vector
<
shape
>&
)
const
{
return
0
;
}
std
::
ptrdiff_
t
output_alias
(
const
std
::
vector
<
shape
>&
)
const
{
return
0
;
}
};
struct
hip_copy
...
...
@@ -89,7 +103,7 @@ struct hip_copy
copy_to_gpu
(
args
[
0
],
args
[
1
]);
return
args
[
1
];
}
in
t
output_alias
(
const
std
::
vector
<
shape
>&
)
const
{
return
1
;
}
std
::
ptrdiff_
t
output_alias
(
const
std
::
vector
<
shape
>&
)
const
{
return
1
;
}
};
}
// namespace gpu
...
...
src/targets/gpu/include/migraphx/gpu/leaky_relu.hpp
View file @
bf6f82d8
...
...
@@ -13,11 +13,21 @@ struct context;
struct
miopen_leaky_relu
{
shared
<
activation_descriptor
>
ad
;
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
gpu
::
reflect
(
self
.
ad
.
get
(),
f
);
}
std
::
string
name
()
const
{
return
"gpu::leaky_relu"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
int
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
std
::
ptrdiff_t
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
};
}
// namespace gpu
...
...
src/targets/gpu/include/migraphx/gpu/logsoftmax.hpp
View file @
bf6f82d8
...
...
@@ -25,11 +25,21 @@ namespace gpu {
struct
hip_logsoftmax
{
op
::
logsoftmax
op
;
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
migraphx
::
reflect
(
self
.
op
,
f
);
}
std
::
string
name
()
const
{
return
"gpu::logsoftmax"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
int
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
std
::
ptrdiff_t
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
};
}
// namespace gpu
...
...
src/targets/gpu/include/migraphx/gpu/lrn.hpp
View file @
bf6f82d8
...
...
@@ -13,11 +13,21 @@ struct context;
struct
miopen_lrn
{
shared
<
lrn_descriptor
>
ldesc
;
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
gpu
::
reflect
(
self
.
ldesc
.
get
(),
f
);
}
std
::
string
name
()
const
{
return
"gpu::lrn"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
int
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
std
::
ptrdiff_t
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
};
}
// namespace gpu
...
...
src/targets/gpu/include/migraphx/gpu/miopen.hpp
View file @
bf6f82d8
...
...
@@ -162,6 +162,38 @@ inline fused_operator_args make_fused_args()
return
make_obj
<
fused_operator_args
>
(
&
miopenCreateOperatorArgs
);
}
template
<
class
F
>
auto
reflect
(
miopenActivationDescriptor_t
ad
,
F
f
)
{
assert
(
ad
!=
nullptr
);
miopenActivationMode_t
mode
=
miopenActivationPASTHRU
;
double
alpha
=
0.0
;
double
beta
=
0.0
;
double
gamma
=
0.0
;
miopenGetActivationDescriptor
(
ad
,
&
mode
,
&
alpha
,
&
beta
,
&
gamma
);
return
pack
(
f
(
std
::
move
(
mode
),
"mode"
),
// NOLINT
f
(
std
::
move
(
alpha
),
"alpha"
),
// NOLINT
f
(
std
::
move
(
beta
),
"beta"
),
// NOLINT
f
(
std
::
move
(
gamma
),
"gamma"
));
// NOLINT
}
template
<
class
F
>
auto
reflect
(
miopenLRNDescriptor_t
lrnd
,
F
f
)
{
assert
(
lrnd
!=
nullptr
);
miopenLRNMode_t
mode
=
miopenLRNWithinChannel
;
unsigned
int
n
=
0
;
double
alpha
=
0.0
;
double
beta
=
0.0
;
double
k
=
0.0
;
miopenGetLRNDescriptor
(
lrnd
,
&
mode
,
&
n
,
&
alpha
,
&
beta
,
&
k
);
return
pack
(
f
(
std
::
move
(
mode
),
"mode"
),
// NOLINT
f
(
std
::
move
(
n
),
"n"
),
// NOLINT
f
(
std
::
move
(
alpha
),
"alpha"
),
// NOLINT
f
(
std
::
move
(
beta
),
"beta"
),
// NOLINT
f
(
std
::
move
(
k
),
"k"
));
// NOLINT
}
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
...
...
src/targets/gpu/include/migraphx/gpu/oper.hpp
View file @
bf6f82d8
...
...
@@ -45,7 +45,15 @@ struct unary_device : oper<Derived>
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
check_shapes
{
inputs
,
*
this
}.
has
(
2
);
return
inputs
.
at
(
1
);
auto
s
=
inputs
.
at
(
0
);
if
(
s
.
packed
())
{
return
s
;
}
else
{
return
{
s
.
type
(),
s
.
lens
()};
}
}
argument
compute
(
context
&
ctx
,
const
shape
&
,
const
std
::
vector
<
argument
>&
args
)
const
...
...
@@ -54,7 +62,10 @@ struct unary_device : oper<Derived>
return
args
[
1
];
}
int
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
std
::
ptrdiff_t
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
};
template
<
class
Derived
,
void
(
*
F
)(
hipStream_t
,
const
argument
&
,
const
argument
&
,
const
argument
&
)>
...
...
@@ -63,7 +74,16 @@ struct binary_device : oper<Derived>
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
check_shapes
{
inputs
,
*
this
}.
has
(
3
);
return
inputs
.
at
(
2
);
auto
s0
=
inputs
.
at
(
0
);
auto
s1
=
inputs
.
at
(
1
);
if
(
s0
==
s1
and
s0
.
packed
())
{
return
s0
;
}
else
{
return
{
s0
.
type
(),
s0
.
lens
()};
}
}
argument
compute
(
context
&
ctx
,
const
shape
&
,
const
std
::
vector
<
argument
>&
args
)
const
...
...
@@ -72,7 +92,10 @@ struct binary_device : oper<Derived>
return
args
[
2
];
}
int
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
std
::
ptrdiff_t
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
};
}
// namespace gpu
...
...
src/targets/gpu/include/migraphx/gpu/pad.hpp
View file @
bf6f82d8
...
...
@@ -14,11 +14,20 @@ struct hip_pad
{
op
::
pad
op
;
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
migraphx
::
reflect
(
self
.
op
,
f
);
}
std
::
string
name
()
const
{
return
"gpu::pad"
;
}
shape
compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
;
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
int
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
std
::
ptrdiff_t
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
};
}
// namespace gpu
...
...
src/targets/gpu/include/migraphx/gpu/pooling.hpp
View file @
bf6f82d8
...
...
@@ -16,11 +16,20 @@ struct miopen_pooling
op
::
pooling
op
;
shared
<
pooling_descriptor
>
pd
;
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
migraphx
::
reflect
(
self
.
op
,
f
);
}
std
::
string
name
()
const
{
return
"gpu::pooling"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
int
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
std
::
ptrdiff_t
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
};
}
// namespace gpu
...
...
src/targets/gpu/include/migraphx/gpu/relu.hpp
View file @
bf6f82d8
...
...
@@ -13,11 +13,21 @@ struct context;
struct
miopen_relu
{
shared
<
activation_descriptor
>
ad
;
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
gpu
::
reflect
(
self
.
ad
.
get
(),
f
);
}
std
::
string
name
()
const
{
return
"gpu::relu"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
int
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
std
::
ptrdiff_t
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
};
}
// namespace gpu
...
...
src/targets/gpu/include/migraphx/gpu/sigmoid.hpp
View file @
bf6f82d8
...
...
@@ -13,11 +13,21 @@ struct context;
struct
miopen_sigmoid
{
shared
<
activation_descriptor
>
ad
;
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
gpu
::
reflect
(
self
.
ad
.
get
(),
f
);
}
std
::
string
name
()
const
{
return
"gpu::sigmoid"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
int
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
std
::
ptrdiff_t
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
};
}
// namespace gpu
...
...
src/targets/gpu/include/migraphx/gpu/softmax.hpp
View file @
bf6f82d8
...
...
@@ -13,11 +13,21 @@ struct context;
struct
miopen_softmax
{
op
::
softmax
op
;
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
migraphx
::
reflect
(
self
.
op
,
f
);
}
std
::
string
name
()
const
{
return
"gpu::softmax"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
int
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
std
::
ptrdiff_t
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
};
}
// namespace gpu
...
...
src/targets/gpu/include/migraphx/gpu/tanh.hpp
View file @
bf6f82d8
...
...
@@ -13,11 +13,21 @@ struct context;
struct
miopen_tanh
{
shared
<
activation_descriptor
>
ad
;
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
gpu
::
reflect
(
self
.
ad
.
get
(),
f
);
}
std
::
string
name
()
const
{
return
"gpu::tanh"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
int
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
std
::
ptrdiff_t
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
};
}
// namespace gpu
...
...
src/targets/gpu/lowering.cpp
View file @
bf6f82d8
...
...
@@ -45,6 +45,7 @@
#include <migraphx/gpu/pad.hpp>
#include <migraphx/gpu/gather.hpp>
#include <migraphx/gpu/lrn.hpp>
#include <migraphx/gpu/clip.hpp>
#include <utility>
#include <functional>
#include <algorithm>
...
...
@@ -101,6 +102,7 @@ struct miopen_apply
add_extend_op
<
hip_logsoftmax
,
op
::
logsoftmax
>
(
"logsoftmax"
);
add_extend_op
<
hip_gather
,
op
::
gather
>
(
"gather"
);
add_extend_op
<
hip_pad
,
op
::
pad
>
(
"pad"
);
add_extend_op
<
hip_clip
,
op
::
clip
>
(
"clip"
);
add_lrn_op
();
add_convolution_op
();
...
...
src/targets/gpu/tanh.cpp
View file @
bf6f82d8
...
...
@@ -7,7 +7,7 @@ namespace gpu {
shape
miopen_tanh
::
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
check_shapes
{
inputs
,
*
this
}.
has
(
2
).
not_broadcast
ed
();
check_shapes
{
inputs
,
*
this
}.
has
(
2
).
pack
ed
();
return
inputs
.
at
(
0
);
}
...
...
src/targets/gpu/write_literals.cpp
View file @
bf6f82d8
...
...
@@ -14,6 +14,13 @@ struct hip_load_literal
{
shape
s
;
std
::
size_t
n
=
0
;
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
pack
(
f
(
self
.
s
,
"shape"
),
f
(
self
.
n
,
"id"
));
}
std
::
string
name
()
const
{
return
"hip::load_literal"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
...
...
src/tf/tf.cpp
View file @
bf6f82d8
...
...
@@ -108,6 +108,7 @@ struct tf_parser
{
add_generic_op
(
"Identity"
,
op
::
identity
{});
add_generic_op
(
"Relu"
,
op
::
relu
{});
add_generic_op
(
"Relu6"
,
op
::
clip
{
6.0
,
0.0
});
add_binary_op
(
"Add"
,
op
::
add
{});
add_binary_op
(
"Mul"
,
op
::
mul
{});
...
...
@@ -117,6 +118,7 @@ struct tf_parser
add_mem_op
(
"ConcatV2"
,
&
tf_parser
::
parse_concat
);
add_mem_op
(
"Const"
,
&
tf_parser
::
parse_constant
);
add_mem_op
(
"Conv2D"
,
&
tf_parser
::
parse_conv
);
add_mem_op
(
"DepthwiseConv2dNative"
,
&
tf_parser
::
parse_depthwiseconv
);
add_mem_op
(
"FusedBatchNorm"
,
&
tf_parser
::
parse_batchnorm
);
add_mem_op
(
"MatMul"
,
&
tf_parser
::
parse_matmul
);
add_mem_op
(
"MaxPool"
,
&
tf_parser
::
parse_pooling
);
...
...
@@ -153,7 +155,7 @@ struct tf_parser
template
<
class
T
>
void
add_binary_op
(
std
::
string
name
,
T
x
)
{
add_op
(
name
,
[
this
,
x
](
attribute_map
attributes
,
std
::
vector
<
instruction_ref
>
args
)
{
add_op
(
name
,
[
this
,
x
](
const
attribute_map
&
attributes
,
std
::
vector
<
instruction_ref
>
args
)
{
if
(
args
.
size
()
!=
2
)
MIGRAPHX_THROW
(
"binary operators should have 2 operands"
);
auto
l0
=
args
[
1
];
...
...
@@ -215,7 +217,7 @@ struct tf_parser
template
<
class
T
>
void
add_generic_op
(
std
::
string
name
,
T
x
)
{
add_op
(
name
,
[
this
,
x
](
attribute_map
,
std
::
vector
<
instruction_ref
>
args
)
{
add_op
(
name
,
[
this
,
x
](
const
attribute_map
&
,
std
::
vector
<
instruction_ref
>
args
)
{
return
prog
.
add_instruction
(
x
,
args
);
});
}
...
...
@@ -339,6 +341,62 @@ struct tf_parser
return
prog
.
add_instruction
(
op
,
{
args
[
0
],
weights
});
}
instruction_ref
parse_depthwiseconv
(
const
std
::
string
&
,
attribute_map
attributes
,
std
::
vector
<
instruction_ref
>
args
)
{
op
::
convolution
op
;
size_t
num_channels
=
args
[
0
]
->
get_shape
().
lens
()[
1
];
op
.
group
=
num_channels
;
if
(
contains
(
attributes
,
"padding"
))
{
const
std
::
string
&
pad_mode
=
attributes
.
at
(
"padding"
).
s
();
if
(
pad_mode
.
find
(
"SAME"
)
!=
std
::
string
::
npos
)
{
op
.
padding_mode
=
op
::
padding_mode_t
::
same
;
}
}
if
(
contains
(
attributes
,
"strides"
))
{
std
::
vector
<
size_t
>
stride
;
copy
(
attributes
.
at
(
"strides"
).
list
().
i
(),
std
::
back_inserter
(
stride
));
reorder_data
(
stride
);
if
(
stride
.
size
()
!=
4
)
{
MIGRAPHX_THROW
(
"strides should have 4 values"
);
}
op
.
stride
[
0
]
=
stride
[
2
];
op
.
stride
[
1
]
=
stride
[
3
];
}
auto
weights
=
args
[
1
];
// check if weights are from a constant
if
(
weights
->
name
()
!=
"@param"
)
{
if
(
is_nhwc
)
{
weights
=
prog
.
add_instruction
(
op
::
transpose
{{
1
,
3
,
0
,
2
}},
args
[
1
]);
}
else
{
weights
=
prog
.
add_instruction
(
op
::
transpose
{{
3
,
2
,
0
,
1
}},
args
[
1
]);
}
}
std
::
vector
<
int64_t
>
new_weights_shape
;
copy
(
weights
->
get_shape
().
lens
(),
std
::
back_inserter
(
new_weights_shape
));
// weight format is (out_channels, in_channels, h, w), but in depthwise_conv,
// out_channels is equal to the multiplier. Adjust by inserting a reshape and
// setting in_channels to 1
int64_t
multiplier
=
new_weights_shape
[
0
];
int64_t
out_channels
=
num_channels
*
multiplier
;
new_weights_shape
[
0
]
=
out_channels
;
new_weights_shape
[
1
]
=
1
;
auto
new_weights
=
prog
.
add_instruction
(
op
::
reshape
{
new_weights_shape
},
weights
);
return
prog
.
add_instruction
(
op
,
{
args
[
0
],
new_weights
});
}
instruction_ref
parse_matmul
(
const
std
::
string
&
,
attribute_map
attributes
,
std
::
vector
<
instruction_ref
>
args
)
{
...
...
@@ -741,10 +799,6 @@ struct tf_parser
static
literal
parse_tensor
(
const
tensorflow
::
TensorProto
&
t
)
{
std
::
vector
<
size_t
>
dims
=
parse_dims
(
t
.
tensor_shape
());
if
(
dims
.
empty
())
{
dims
=
{
1
};
}
size_t
shape_size
=
std
::
accumulate
(
dims
.
begin
(),
dims
.
end
(),
1
,
std
::
multiplies
<
size_t
>
());
if
(
!
t
.
tensor_content
().
empty
())
// has raw data
{
...
...
@@ -755,17 +809,17 @@ struct tf_parser
case
tensorflow
::
DataType
::
DT_FLOAT
:
return
literal
{{
shape
::
float_type
,
dims
},
s
.
data
()};
case
tensorflow
::
DataType
::
DT_UINT8
:
throw
std
::
runtime_error
(
""
);
case
tensorflow
::
DataType
::
DT_INT8
:
return
literal
{{
shape
::
int
32
_type
,
dims
},
s
.
data
()};
case
tensorflow
::
DataType
::
DT_INT8
:
return
literal
{{
shape
::
int
8
_type
,
dims
},
s
.
data
()};
case
tensorflow
::
DataType
::
DT_UINT16
:
return
literal
{{
shape
::
int
32
_type
,
dims
},
s
.
data
()};
return
literal
{{
shape
::
u
int
16
_type
,
dims
},
s
.
data
()};
case
tensorflow
::
DataType
::
DT_INT16
:
return
literal
{{
shape
::
int
32
_type
,
dims
},
s
.
data
()};
return
literal
{{
shape
::
int
16
_type
,
dims
},
s
.
data
()};
case
tensorflow
::
DataType
::
DT_INT32
:
return
literal
{{
shape
::
int32_type
,
dims
},
s
.
data
()};
case
tensorflow
::
DataType
::
DT_INT64
:
return
literal
{{
shape
::
int64_type
,
dims
},
s
.
data
()};
case
tensorflow
::
DataType
::
DT_STRING
:
throw
std
::
runtime_error
(
""
);
case
tensorflow
::
DataType
::
DT_BOOL
:
return
literal
{{
shape
::
int
32
_type
,
dims
},
s
.
data
()};
case
tensorflow
::
DataType
::
DT_BOOL
:
return
literal
{{
shape
::
int
8
_type
,
dims
},
s
.
data
()};
case
tensorflow
::
DataType
::
DT_HALF
:
return
literal
{{
shape
::
half_type
,
dims
},
s
.
data
()};
case
tensorflow
::
DataType
::
DT_DOUBLE
:
return
literal
{{
shape
::
double_type
,
dims
},
s
.
data
()};
...
...
@@ -815,21 +869,23 @@ struct tf_parser
{
case
tensorflow
::
DataType
::
DT_INVALID
:
throw
std
::
runtime_error
(
""
);
case
tensorflow
::
DataType
::
DT_FLOAT
:
return
literal
{{
shape
::
float_type
,
dims
},
get_data_vals
(
t
.
float_val
(),
shape_size
)};
return
create_literal
(
shape
::
float_type
,
dims
,
get_data_vals
(
t
.
float_val
(),
shape_size
));
case
tensorflow
::
DataType
::
DT_UINT8
:
throw
std
::
runtime_error
(
""
);
case
tensorflow
::
DataType
::
DT_INT8
:
return
literal
{{
shape
::
int
32
_type
,
dims
}
,
get_data_vals
(
t
.
int_val
(),
shape_size
)
}
;
return
create_
literal
(
shape
::
int
8
_type
,
dims
,
get_data_vals
(
t
.
int_val
(),
shape_size
)
)
;
case
tensorflow
::
DataType
::
DT_UINT16
:
return
literal
{{
shape
::
int
32
_type
,
dims
}
,
get_data_vals
(
t
.
int_val
(),
shape_size
)
}
;
return
create_
literal
(
shape
::
u
int
16
_type
,
dims
,
get_data_vals
(
t
.
int_val
(),
shape_size
)
)
;
case
tensorflow
::
DataType
::
DT_INT16
:
return
literal
{{
shape
::
int
32
_type
,
dims
}
,
get_data_vals
(
t
.
int_val
(),
shape_size
)
}
;
return
create_
literal
(
shape
::
int
16
_type
,
dims
,
get_data_vals
(
t
.
int_val
(),
shape_size
)
)
;
case
tensorflow
::
DataType
::
DT_INT32
:
return
literal
{{
shape
::
int32_type
,
dims
}
,
get_data_vals
(
t
.
int_val
(),
shape_size
)
}
;
return
create_
literal
(
shape
::
int32_type
,
dims
,
get_data_vals
(
t
.
int_val
(),
shape_size
)
)
;
case
tensorflow
::
DataType
::
DT_INT64
:
return
literal
{{
shape
::
int64_type
,
dims
},
get_data_vals
(
t
.
int64_val
(),
shape_size
)};
return
create_literal
(
shape
::
int64_type
,
dims
,
get_data_vals
(
t
.
int64_val
(),
shape_size
));
case
tensorflow
::
DataType
::
DT_STRING
:
throw
std
::
runtime_error
(
""
);
case
tensorflow
::
DataType
::
DT_BOOL
:
return
literal
{{
shape
::
int32_type
,
dims
}
,
get_data_vals
(
t
.
bool_val
(),
shape_size
)
}
;
return
create_
literal
(
shape
::
int32_type
,
dims
,
get_data_vals
(
t
.
bool_val
(),
shape_size
)
)
;
case
tensorflow
::
DataType
::
DT_HALF
:
{
std
::
vector
<
int
>
data_int32
=
get_data_vals
(
t
.
half_val
(),
shape_size
);
...
...
@@ -839,7 +895,7 @@ struct tf_parser
data_uint16
.
end
(),
std
::
back_inserter
(
data_half
),
[](
uint16_t
raw_val
)
{
return
*
reinterpret_cast
<
half
*>
(
&
raw_val
);
});
return
literal
{{
shape
::
half_type
,
dims
}
,
data_half
}
;
return
create_
literal
(
shape
::
half_type
,
dims
,
data_half
)
;
}
case
tensorflow
::
DataType
::
DT_DOUBLE
:
return
literal
{{
shape
::
double_type
,
dims
},
get_data_vals
(
t
.
double_val
(),
shape_size
)};
...
...
@@ -908,9 +964,19 @@ struct tf_parser
std
::
transform
(
input_dims
.
begin
(),
input_dims
.
end
(),
std
::
back_inserter
(
dims
),
[](
tensorflow
::
TensorShapeProto_Dim
dim
)
{
return
dim
.
size
();
});
[](
const
tensorflow
::
TensorShapeProto_Dim
&
dim
)
{
return
dim
.
size
();
});
return
dims
;
}
template
<
class
T
>
static
literal
create_literal
(
shape
::
type_t
shape_type
,
const
std
::
vector
<
size_t
>&
dims
,
std
::
vector
<
T
>
data
)
{
// assume if explicit value is mentioned in protobuf and dim size <= 1, treat as scalar
if
(
dims
.
empty
()
or
(
dims
.
size
()
==
1
and
dims
.
front
()
==
1
))
return
literal
{{
shape_type
},
data
};
return
literal
{{
shape_type
,
dims
},
data
};
}
};
program
parse_tf
(
const
std
::
string
&
name
,
bool
is_nhwc
)
...
...
test/cpu_ops_test.cpp
View file @
bf6f82d8
...
...
@@ -1557,4 +1557,21 @@ TEST_CASE(fp16_test)
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
gold
));
}
TEST_CASE
(
clip_test
)
{
migraphx
::
program
p
;
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
3
}};
auto
l
=
p
.
add_literal
(
migraphx
::
literal
{
s
,
{
-
1.0
,
0.0
,
10.0
}});
migraphx
::
op
::
clip
op
;
op
.
max_val
=
6.0
;
op
.
min_val
=
0.0
;
p
.
add_instruction
(
op
,
l
);
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
=
{
0.0
,
0.0
,
6.0
};
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
gold
));
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/eliminate_allocation_test.cpp
View file @
bf6f82d8
...
...
@@ -20,6 +20,13 @@ struct eliminate_allocation_target
struct
allocate
{
migraphx
::
shape
s
{};
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
migraphx
::
pack
(
f
(
self
.
s
,
"shape"
));
}
std
::
string
name
()
const
{
return
"allocate"
;
}
migraphx
::
shape
compute_shape
(
const
std
::
vector
<
migraphx
::
shape
>&
inputs
)
const
{
...
...
test/eliminate_concat_test.cpp
View file @
bf6f82d8
...
...
@@ -10,6 +10,13 @@ struct concat
{
concat
(
std
::
size_t
axis
)
{
op
.
axis
=
axis
;
}
migraphx
::
op
::
concat
op
;
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
migraphx
::
reflect
(
self
.
op
,
f
);
}
std
::
string
name
()
const
{
return
"eliminate_concat::concat"
;
}
migraphx
::
shape
compute_shape
(
std
::
vector
<
migraphx
::
shape
>
inputs
)
const
{
...
...
@@ -51,6 +58,13 @@ struct eliminate_concat_target
struct
allocate
{
migraphx
::
shape
s
{};
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
migraphx
::
pack
(
f
(
self
.
s
,
"shape"
));
}
std
::
string
name
()
const
{
return
"allocate"
;
}
migraphx
::
shape
compute_shape
(
const
std
::
vector
<
migraphx
::
shape
>&
inputs
)
const
{
...
...
Prev
1
2
3
4
5
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment