Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
90a37082
Unverified
Commit
90a37082
authored
Jan 29, 2019
by
Paul Fultz II
Committed by
GitHub
Jan 29, 2019
Browse files
Merge pull request #160 from ROCmSoftwarePlatform/pad_op
Pad op
parents
b5090737
ae3f1925
Changes
14
Hide whitespace changes
Inline
Side-by-side
Showing
14 changed files
with
284 additions
and
0 deletions
+284
-0
src/include/migraphx/operators.hpp
src/include/migraphx/operators.hpp
+36
-0
src/onnx/onnx.cpp
src/onnx/onnx.cpp
+23
-0
src/targets/cpu/lowering.cpp
src/targets/cpu/lowering.cpp
+27
-0
src/targets/gpu/CMakeLists.txt
src/targets/gpu/CMakeLists.txt
+2
-0
src/targets/gpu/device/include/migraphx/gpu/device/nary.hpp
src/targets/gpu/device/include/migraphx/gpu/device/nary.hpp
+6
-0
src/targets/gpu/device/pad.cpp
src/targets/gpu/device/pad.cpp
+43
-0
src/targets/gpu/include/migraphx/gpu/device/pad.hpp
src/targets/gpu/include/migraphx/gpu/device/pad.hpp
+25
-0
src/targets/gpu/include/migraphx/gpu/pad.hpp
src/targets/gpu/include/migraphx/gpu/pad.hpp
+40
-0
src/targets/gpu/lowering.cpp
src/targets/gpu/lowering.cpp
+3
-0
src/targets/gpu/pad.cpp
src/targets/gpu/pad.cpp
+25
-0
test/cpu_ops_test.cpp
test/cpu_ops_test.cpp
+14
-0
test/gpu/miopen.cpp
test/gpu/miopen.cpp
+20
-0
test/onnx/onnx_test.cpp
test/onnx/onnx_test.cpp
+8
-0
test/onnx/pad_test.onnx
test/onnx/pad_test.onnx
+12
-0
No files found.
src/include/migraphx/operators.hpp
View file @
90a37082
...
@@ -610,6 +610,42 @@ struct reshape
...
@@ -610,6 +610,42 @@ struct reshape
int
output_alias
(
const
std
::
vector
<
shape
>&
)
const
{
return
0
;
}
int
output_alias
(
const
std
::
vector
<
shape
>&
)
const
{
return
0
;
}
};
};
struct
pad
{
std
::
vector
<
int64_t
>
pads
;
float
value
=
0.0
f
;
enum
padding_mode_t
{
constant_pad
,
reflect_pad
,
edge_pad
};
padding_mode_t
mode
=
constant_pad
;
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
pack
(
f
(
self
.
mode
,
"mode"
),
f
(
self
.
pads
,
"pads"
),
f
(
self
.
value
,
"value"
));
}
std
::
string
name
()
const
{
return
"pad"
;
}
shape
compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
{
check_shapes
{
inputs
,
*
this
}.
has
(
1
);
auto
&&
idims
=
inputs
.
front
().
lens
();
std
::
vector
<
std
::
size_t
>
rdims
(
idims
.
begin
(),
idims
.
end
());
std
::
size_t
num_dims
=
rdims
.
size
();
for
(
std
::
size_t
i
=
0
;
i
<
num_dims
;
i
++
)
{
rdims
[
i
]
+=
pads
[
i
]
+
pads
[
i
+
num_dims
];
}
shape
s
{
inputs
.
front
().
type
(),
rdims
};
return
s
;
}
};
struct
as_shape
struct
as_shape
{
{
shape
s
;
shape
s
;
...
...
src/onnx/onnx.cpp
View file @
90a37082
...
@@ -85,6 +85,7 @@ struct onnx_parser
...
@@ -85,6 +85,7 @@ struct onnx_parser
add_mem_op
(
"Shape"
,
&
onnx_parser
::
parse_shape
);
add_mem_op
(
"Shape"
,
&
onnx_parser
::
parse_shape
);
add_mem_op
(
"ConstantFill"
,
&
onnx_parser
::
parse_constant_fill
);
add_mem_op
(
"ConstantFill"
,
&
onnx_parser
::
parse_constant_fill
);
add_mem_op
(
"Transpose"
,
&
onnx_parser
::
parse_transpose
);
add_mem_op
(
"Transpose"
,
&
onnx_parser
::
parse_transpose
);
add_mem_op
(
"Pad"
,
&
onnx_parser
::
parse_pad
);
}
}
template
<
class
F
>
template
<
class
F
>
...
@@ -548,6 +549,28 @@ struct onnx_parser
...
@@ -548,6 +549,28 @@ struct onnx_parser
return
prog
.
add_instruction
(
migraphx
::
op
::
transpose
{
perm
},
args
.
front
());
return
prog
.
add_instruction
(
migraphx
::
op
::
transpose
{
perm
},
args
.
front
());
}
}
instruction_ref
parse_pad
(
const
std
::
string
&
,
attribute_map
attributes
,
std
::
vector
<
instruction_ref
>
args
)
{
std
::
vector
<
int64_t
>
pads
{};
float
value
=
0.0
f
;
if
(
contains
(
attributes
,
"pads"
))
{
auto
&&
pad_vals
=
attributes
[
"pads"
].
ints
();
pads
=
std
::
vector
<
int64_t
>
(
pad_vals
.
begin
(),
pad_vals
.
end
());
}
if
(
contains
(
attributes
,
"value"
))
{
value
=
parse_value
(
attributes
.
at
(
"value"
)).
at
<
float
>
();
}
if
(
contains
(
attributes
,
"mode"
))
{
auto
mode
=
attributes
.
at
(
"mode"
).
s
();
if
(
mode
!=
"constant"
)
MIGRAPHX_THROW
(
"migraphx currently only supports constant padding"
);
}
return
prog
.
add_instruction
(
migraphx
::
op
::
pad
{
pads
,
value
},
args
.
front
());
}
// Use a literal instruction to replace the shape since, output of
// Use a literal instruction to replace the shape since, output of
// shape operator are literals in migraphx
// shape operator are literals in migraphx
instruction_ref
instruction_ref
...
...
src/targets/cpu/lowering.cpp
View file @
90a37082
...
@@ -298,6 +298,32 @@ struct cpu_contiguous
...
@@ -298,6 +298,32 @@ struct cpu_contiguous
}
}
};
};
struct
cpu_pad
{
op
::
pad
op
;
std
::
string
name
()
const
{
return
"cpu::contiguous"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
return
op
.
compute_shape
(
inputs
);
}
argument
compute
(
context
&
,
const
shape
&
output_shape
,
std
::
vector
<
argument
>
args
)
const
{
assert
(
output_shape
.
standard
());
argument
result
{
output_shape
};
result
.
visit
([
&
](
auto
output
)
{
std
::
fill
(
output
.
begin
(),
output
.
end
(),
op
.
value
);
});
visit_all
(
result
,
args
[
0
])([
&
](
auto
output
,
auto
input
)
{
shape_for_each
(
input
.
get_shape
(),
[
&
](
const
auto
&
idx
)
{
std
::
vector
<
std
::
size_t
>
new_idx
(
idx
.
size
());
std
::
transform
(
idx
.
begin
(),
idx
.
end
(),
op
.
pads
.
begin
(),
new_idx
.
begin
(),
[](
auto
i
,
auto
j
)
{
return
i
+
j
;
});
output
(
new_idx
.
begin
(),
new_idx
.
end
())
=
input
(
idx
.
begin
(),
idx
.
end
());
});
});
return
result
;
}
};
struct
cpu_concat
struct
cpu_concat
{
{
op
::
concat
op
;
op
::
concat
op
;
...
@@ -663,6 +689,7 @@ struct cpu_apply
...
@@ -663,6 +689,7 @@ struct cpu_apply
apply_map
[
"batch_norm_inference"
]
=
apply_map
[
"batch_norm_inference"
]
=
extend_op
<
cpu_batch_norm_inference
,
op
::
batch_norm_inference
>
();
extend_op
<
cpu_batch_norm_inference
,
op
::
batch_norm_inference
>
();
apply_map
[
"contiguous"
]
=
extend_op
<
cpu_contiguous
,
op
::
contiguous
>
();
apply_map
[
"contiguous"
]
=
extend_op
<
cpu_contiguous
,
op
::
contiguous
>
();
apply_map
[
"pad"
]
=
extend_op
<
cpu_pad
,
op
::
pad
>
();
apply_map
[
"concat"
]
=
extend_op
<
cpu_concat
,
op
::
concat
>
();
apply_map
[
"concat"
]
=
extend_op
<
cpu_concat
,
op
::
concat
>
();
apply_map
[
"gather"
]
=
extend_op
<
cpu_gather
,
op
::
gather
>
();
apply_map
[
"gather"
]
=
extend_op
<
cpu_gather
,
op
::
gather
>
();
apply_map
[
"leaky_relu"
]
=
extend_op
<
cpu_unary
<
leaky_relu_op
>
,
op
::
leaky_relu
>
();
apply_map
[
"leaky_relu"
]
=
extend_op
<
cpu_unary
<
leaky_relu_op
>
,
op
::
leaky_relu
>
();
...
...
src/targets/gpu/CMakeLists.txt
View file @
90a37082
...
@@ -28,6 +28,7 @@ add_library(migraphx_device
...
@@ -28,6 +28,7 @@ add_library(migraphx_device
device/contiguous.cpp
device/contiguous.cpp
device/mul.cpp
device/mul.cpp
device/concat.cpp
device/concat.cpp
device/pad.cpp
device/gather.cpp
device/gather.cpp
)
)
set_target_properties
(
migraphx_device PROPERTIES EXPORT_NAME device
)
set_target_properties
(
migraphx_device PROPERTIES EXPORT_NAME device
)
...
@@ -57,6 +58,7 @@ add_library(migraphx_gpu
...
@@ -57,6 +58,7 @@ add_library(migraphx_gpu
sigmoid.cpp
sigmoid.cpp
abs.cpp
abs.cpp
elu.cpp
elu.cpp
pad.cpp
gather.cpp
gather.cpp
)
)
set_target_properties
(
migraphx_gpu PROPERTIES EXPORT_NAME gpu
)
set_target_properties
(
migraphx_gpu PROPERTIES EXPORT_NAME gpu
)
...
...
src/targets/gpu/device/include/migraphx/gpu/device/nary.hpp
View file @
90a37082
...
@@ -313,6 +313,12 @@ void nary_impl(hipStream_t stream, F f, argument result, Arguments... args)
...
@@ -313,6 +313,12 @@ void nary_impl(hipStream_t stream, F f, argument result, Arguments... args)
nary_nonstandard_impl
(
stream
,
f
,
result
,
args
...);
nary_nonstandard_impl
(
stream
,
f
,
result
,
args
...);
}
}
template
<
class
F
>
void
nary_impl
(
hipStream_t
stream
,
F
f
,
argument
result
)
{
nary_standard_impl
(
stream
,
f
,
result
);
}
template
<
class
...
Arguments
>
template
<
class
...
Arguments
>
auto
nary_nonstandard
(
hipStream_t
stream
,
argument
result
,
Arguments
...
args
)
auto
nary_nonstandard
(
hipStream_t
stream
,
argument
result
,
Arguments
...
args
)
{
{
...
...
src/targets/gpu/device/pad.cpp
0 → 100644
View file @
90a37082
#include <migraphx/shape.hpp>
#include <migraphx/argument.hpp>
#include <migraphx/gpu/device/nary.hpp>
#include <migraphx/gpu/device/pad.hpp>
#include <migraphx/gpu/device/tensor.hpp>
#include <migraphx/gpu/device/launch.hpp>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
namespace
device
{
argument
pad
(
hipStream_t
stream
,
argument
result
,
argument
arg1
,
float
value
,
std
::
vector
<
std
::
int64_t
>
pads
)
{
std
::
size_t
nelements
=
arg1
.
get_shape
().
elements
();
nary
(
stream
,
result
)([
=
]
{
return
value
;
});
visit_all
(
result
,
arg1
)([
&
](
auto
output
,
auto
input
)
{
visit_tensor_size
(
result
.
get_shape
().
lens
().
size
(),
[
&
](
auto
ndim
)
{
std
::
size_t
offsets
[
ndim
];
std
::
copy
(
pads
.
begin
(),
pads
.
begin
()
+
ndim
,
offsets
);
auto
*
outptr
=
output
.
data
();
const
auto
*
inptr
=
input
.
data
();
hip_tensor_descriptor
<
ndim
>
desc_input
(
input
.
get_shape
());
hip_tensor_descriptor
<
ndim
>
desc_output
(
output
.
get_shape
());
gs_launch
(
stream
,
nelements
)([
=
](
auto
i
)
{
auto
idx
=
desc_input
.
multi
(
i
);
for
(
std
::
size_t
j
=
0
;
j
<
ndim
;
j
++
)
{
idx
[
j
]
+=
offsets
[
j
];
}
outptr
[
desc_output
.
linear
(
idx
)]
=
inptr
[
i
];
});
});
});
return
result
;
}
}
// namespace device
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
src/targets/gpu/include/migraphx/gpu/device/pad.hpp
0 → 100644
View file @
90a37082
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_PAD_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_PAD_HPP
#include <migraphx/argument.hpp>
#include <migraphx/config.hpp>
#include <hip/hip_runtime_api.h>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
namespace
device
{
argument
pad
(
hipStream_t
stream
,
argument
result
,
argument
arg1
,
float
value
,
std
::
vector
<
std
::
int64_t
>
pads
);
}
// namespace device
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
#endif
src/targets/gpu/include/migraphx/gpu/pad.hpp
0 → 100644
View file @
90a37082
#ifndef MIGRAPHX_GUARD_RTGLIB_PAD_HPP
#define MIGRAPHX_GUARD_RTGLIB_PAD_HPP
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/config.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/gpu/device/pad.hpp>
#include <migraphx/gpu/device/add.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/context.hpp>
#include <utility>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
struct
hip_pad
{
op
::
pad
op
;
std
::
string
name
()
const
{
return
"gpu::pad"
;
}
shape
compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
;
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
int
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
};
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
#endif
src/targets/gpu/lowering.cpp
View file @
90a37082
...
@@ -40,6 +40,7 @@
...
@@ -40,6 +40,7 @@
#include <migraphx/gpu/pooling.hpp>
#include <migraphx/gpu/pooling.hpp>
#include <migraphx/gpu/gemm.hpp>
#include <migraphx/gpu/gemm.hpp>
#include <migraphx/gpu/concat.hpp>
#include <migraphx/gpu/concat.hpp>
#include <migraphx/gpu/pad.hpp>
#include <migraphx/gpu/gather.hpp>
#include <migraphx/gpu/gather.hpp>
#include <utility>
#include <utility>
#include <functional>
#include <functional>
...
@@ -92,6 +93,8 @@ struct miopen_apply
...
@@ -92,6 +93,8 @@ struct miopen_apply
add_extend_op
<
hip_concat
,
op
::
concat
>
(
"concat"
);
add_extend_op
<
hip_concat
,
op
::
concat
>
(
"concat"
);
add_extend_op
<
miopen_softmax
,
op
::
softmax
>
(
"softmax"
);
add_extend_op
<
miopen_softmax
,
op
::
softmax
>
(
"softmax"
);
add_extend_op
<
hip_gather
,
op
::
gather
>
(
"gather"
);
add_extend_op
<
hip_gather
,
op
::
gather
>
(
"gather"
);
add_extend_op
<
hip_pad
,
op
::
pad
>
(
"pad"
);
add_convolution_op
();
add_convolution_op
();
add_pooling_op
();
add_pooling_op
();
add_batch_norm_inference_op
();
add_batch_norm_inference_op
();
...
...
src/targets/gpu/pad.cpp
0 → 100644
View file @
90a37082
#include <migraphx/gpu/pad.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/device/pad.hpp>
#include <utility>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
shape
hip_pad
::
compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
{
inputs
.
pop_back
();
return
op
.
compute_shape
(
inputs
);
}
argument
hip_pad
::
compute
(
context
&
ctx
,
const
shape
&
,
const
std
::
vector
<
argument
>&
args
)
const
{
return
device
::
pad
(
ctx
.
get_stream
().
get
(),
args
.
back
(),
args
.
front
(),
op
.
value
,
op
.
pads
);
}
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
test/cpu_ops_test.cpp
View file @
90a37082
...
@@ -1346,4 +1346,18 @@ TEST_CASE(min_test)
...
@@ -1346,4 +1346,18 @@ TEST_CASE(min_test)
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
gold
));
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
gold
));
}
}
TEST_CASE
(
pad_test
)
{
migraphx
::
program
p
;
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
2
}};
auto
l0
=
p
.
add_literal
(
migraphx
::
literal
{
s
,
{
1
,
2
,
3
,
4
}});
p
.
add_instruction
(
migraphx
::
op
::
pad
{{
1
,
1
,
1
,
1
}},
l0
);
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
(
16
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
{
0
,
0
,
0
,
0
,
0
,
1
,
2
,
0
,
0
,
3
,
4
,
0
,
0
,
0
,
0
,
0
};
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
gold
));
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/gpu/miopen.cpp
View file @
90a37082
...
@@ -934,6 +934,25 @@ struct test_concat_relu
...
@@ -934,6 +934,25 @@ struct test_concat_relu
}
}
};
};
struct
test_pad
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
migraphx
::
shape
s0
{
migraphx
::
shape
::
int32_type
,
{
1
,
96
,
165
,
165
}};
std
::
vector
<
int64_t
>
pads0
=
{
0
,
0
,
0
,
0
,
0
,
0
,
1
,
1
};
std
::
vector
<
int64_t
>
pads1
=
{
0
,
0
,
0
,
0
,
1
,
1
,
1
,
1
};
std
::
vector
<
int64_t
>
pads2
=
{
1
,
1
,
1
,
1
,
0
,
0
,
0
,
0
};
std
::
vector
<
int64_t
>
pads3
=
{
1
,
0
,
1
,
0
,
1
,
0
,
2
,
0
};
auto
l0
=
p
.
add_parameter
(
"x"
,
s0
);
p
.
add_instruction
(
migraphx
::
op
::
pad
{
pads0
},
l0
);
p
.
add_instruction
(
migraphx
::
op
::
pad
{
pads1
},
l0
);
p
.
add_instruction
(
migraphx
::
op
::
pad
{
pads2
},
l0
);
p
.
add_instruction
(
migraphx
::
op
::
pad
{
pads3
},
l0
);
return
p
;
}
};
struct
test_gather
struct
test_gather
{
{
migraphx
::
program
create_program
()
const
migraphx
::
program
create_program
()
const
...
@@ -1055,6 +1074,7 @@ int main()
...
@@ -1055,6 +1074,7 @@ int main()
verify_program
<
test_concat
>
();
verify_program
<
test_concat
>
();
verify_program
<
test_concat2
>
();
verify_program
<
test_concat2
>
();
verify_program
<
test_concat_relu
>
();
verify_program
<
test_concat_relu
>
();
verify_program
<
test_pad
>
();
verify_program
<
test_add
>
();
verify_program
<
test_add
>
();
verify_program
<
test_add_half
>
();
verify_program
<
test_add_half
>
();
verify_program
<
test_mul
>
();
verify_program
<
test_mul
>
();
...
...
test/onnx/onnx_test.cpp
View file @
90a37082
...
@@ -561,4 +561,12 @@ TEST_CASE(group_conv_test)
...
@@ -561,4 +561,12 @@ TEST_CASE(group_conv_test)
migraphx
::
parse_onnx
(
"group_conv_test.onnx"
);
migraphx
::
parse_onnx
(
"group_conv_test.onnx"
);
}
}
TEST_CASE
(
pad_test
)
{
migraphx
::
program
p
;
auto
l0
=
p
.
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
2
,
2
}});
p
.
add_instruction
(
migraphx
::
op
::
pad
{{
1
,
1
,
1
,
1
}},
l0
);
migraphx
::
parse_onnx
(
"pad_test.onnx"
);
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/onnx/pad_test.onnx
0 → 100644
View file @
90a37082
pad-example:T
01"Pad*
pads@@@@test-padZ
0
b
1
B
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment