Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
75d5c660
Commit
75d5c660
authored
Jun 12, 2019
by
Khalique
Browse files
progress on fixing padding
parent
4fd8c544
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
119 additions
and
16 deletions
+119
-16
src/include/migraphx/pad_calc.hpp
src/include/migraphx/pad_calc.hpp
+8
-0
src/onnx/onnx.cpp
src/onnx/onnx.cpp
+1
-1
src/targets/gpu/device/pad.cpp
src/targets/gpu/device/pad.cpp
+19
-0
src/tf/tf.cpp
src/tf/tf.cpp
+71
-15
test/gpu/miopen.cpp
test/gpu/miopen.cpp
+20
-0
No files found.
src/include/migraphx/pad_calc.hpp
View file @
75d5c660
...
@@ -11,6 +11,14 @@ inline std::size_t calculate_padding(std::size_t weight_dim, std::size_t dilatio
...
@@ -11,6 +11,14 @@ inline std::size_t calculate_padding(std::size_t weight_dim, std::size_t dilatio
return
(
dilation
*
(
weight_dim
-
1
))
/
2
;
return
(
dilation
*
(
weight_dim
-
1
))
/
2
;
}
}
inline
void
calculate_padding
(
int64_t
idx
,
std
::
vector
<
int64_t
>&
pads
,
int64_t
input_dim
,
int64_t
stride
,
int64_t
dilation
,
int64_t
weight_dim
)
{
int64_t
output_dim
=
input_dim
/
stride
;
int64_t
pad
=
std
::
max
(
static_cast
<
int64_t
>
(
0
),
(
output_dim
-
1
)
*
stride
+
dilation
*
weight_dim
-
input_dim
);
pads
[
idx
]
=
pad
/
2
;
pads
[
idx
+
2
]
=
pad
-
pad
/
2
;
}
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
}
// namespace migraphx
...
...
src/onnx/onnx.cpp
View file @
75d5c660
...
@@ -352,7 +352,7 @@ struct onnx_parser
...
@@ -352,7 +352,7 @@ struct onnx_parser
{
{
// insert zeros for pad op (args[0] has 4 dims)
// insert zeros for pad op (args[0] has 4 dims)
padding
=
{
0
,
0
,
padding
[
0
],
padding
[
1
],
0
,
0
,
padding
[
2
],
padding
[
3
]};
padding
=
{
0
,
0
,
padding
[
0
],
padding
[
1
],
0
,
0
,
padding
[
2
],
padding
[
3
]};
l0
=
prog
.
add_instruction
(
op
::
pad
{
padding
},
l0
);
l0
=
prog
.
add_instruction
(
op
::
pad
{
padding
,
std
::
numeric_limits
<
float
>::
lowest
()
},
l0
);
}
}
else
else
{
{
...
...
src/targets/gpu/device/pad.cpp
View file @
75d5c660
...
@@ -14,6 +14,25 @@ argument
...
@@ -14,6 +14,25 @@ argument
pad
(
hipStream_t
stream
,
argument
result
,
argument
arg1
,
float
value
,
std
::
vector
<
std
::
int64_t
>
pads
)
pad
(
hipStream_t
stream
,
argument
result
,
argument
arg1
,
float
value
,
std
::
vector
<
std
::
int64_t
>
pads
)
{
{
std
::
size_t
nelements
=
arg1
.
get_shape
().
elements
();
std
::
size_t
nelements
=
arg1
.
get_shape
().
elements
();
// if(value == std::numeric_limits<float>::lowest())
// {
// visit_all(result)([&](auto output) {
// auto* outptr = output.data();
// gs_launch(stream, nelements)([=](auto i) {
// outptr[i] = std::numeric_limits<typename decltype(output)::value_type>::lowest();
// });
// });
// }
// else
// {
// visit_all(result)([&](auto output) {
// auto* outptr = output.data();
// gs_launch(stream, nelements)([=](auto i) {
// outptr[i] = static_cast<typename decltype(output)::value_type>(value);
// });
// });
// }
nary
(
stream
,
result
)([
=
]
{
return
value
;
});
nary
(
stream
,
result
)([
=
]
{
return
value
;
});
visit_all
(
result
,
arg1
)([
&
](
auto
output
,
auto
input
)
{
visit_all
(
result
,
arg1
)([
&
](
auto
output
,
auto
input
)
{
...
...
src/tf/tf.cpp
View file @
75d5c660
...
@@ -317,17 +317,34 @@ struct tf_parser
...
@@ -317,17 +317,34 @@ struct tf_parser
}
}
}
}
auto
l0
=
args
[
0
];
if
(
contains
(
attributes
,
"padding"
))
if
(
contains
(
attributes
,
"padding"
))
{
{
const
std
::
string
&
pad_mode
=
attributes
.
at
(
"padding"
).
s
();
const
std
::
string
&
pad_mode
=
attributes
.
at
(
"padding"
).
s
();
if
(
pad_mode
.
find
(
"SAME"
)
!=
std
::
string
::
npos
)
if
(
pad_mode
.
find
(
"SAME"
)
!=
std
::
string
::
npos
)
{
{
op
.
padding_mode
=
op
::
padding_mode_t
::
same
;
//
op.padding_mode = op::padding_mode_t::same;
std
::
vector
<
size_t
>
weight_dims
=
weights
->
get_shape
().
lens
();
std
::
vector
<
size_t
>
weight_dims
=
weights
->
get_shape
().
lens
();
size_t
weight_h
=
weight_dims
[
2
];
size_t
weight_h
=
weight_dims
[
2
];
size_t
weight_w
=
weight_dims
[
3
];
size_t
weight_w
=
weight_dims
[
3
];
op
.
padding
[
0
]
=
calculate_padding
(
weight_h
,
op
.
dilation
[
0
]);
op
.
padding
[
1
]
=
calculate_padding
(
weight_w
,
op
.
dilation
[
1
]);
auto
input_dims
=
l0
->
get_shape
().
lens
();
size_t
input_h
=
input_dims
[
2
];
size_t
input_w
=
input_dims
[
3
];
std
::
vector
<
int64_t
>
pads
(
input_dims
.
size
());
calculate_padding
(
0
,
pads
,
input_h
,
op
.
stride
[
0
],
op
.
dilation
[
0
],
weight_h
);
calculate_padding
(
1
,
pads
,
input_w
,
op
.
stride
[
1
],
op
.
dilation
[
1
],
weight_w
);
if
(
pads
[
0
]
!=
pads
[
2
]
||
pads
[
1
]
!=
pads
[
3
])
{
std
::
vector
<
int64_t
>
padding
=
{
0
,
0
,
pads
[
0
],
pads
[
1
],
0
,
0
,
pads
[
2
],
pads
[
3
]};
l0
=
prog
.
add_instruction
(
migraphx
::
op
::
pad
{
padding
},
l0
);
}
else
{
op
.
padding
[
0
]
=
pads
[
0
];
op
.
padding
[
1
]
=
pads
[
1
];
}
}
}
else
if
(
pad_mode
.
find
(
"VALID"
)
!=
std
::
string
::
npos
)
else
if
(
pad_mode
.
find
(
"VALID"
)
!=
std
::
string
::
npos
)
{
{
...
@@ -350,7 +367,7 @@ struct tf_parser
...
@@ -350,7 +367,7 @@ struct tf_parser
}
}
}
}
return
prog
.
add_instruction
(
op
,
{
args
[
0
]
,
weights
});
return
prog
.
add_instruction
(
op
,
{
l0
,
weights
});
}
}
instruction_ref
parse_depthwiseconv
(
const
std
::
string
&
,
instruction_ref
parse_depthwiseconv
(
const
std
::
string
&
,
...
@@ -400,17 +417,35 @@ struct tf_parser
...
@@ -400,17 +417,35 @@ struct tf_parser
}
}
}
}
auto
l0
=
args
[
0
];
if
(
contains
(
attributes
,
"padding"
))
if
(
contains
(
attributes
,
"padding"
))
{
{
const
std
::
string
&
pad_mode
=
attributes
.
at
(
"padding"
).
s
();
const
std
::
string
&
pad_mode
=
attributes
.
at
(
"padding"
).
s
();
std
::
vector
<
size_t
>
weight_dims
=
weights
->
get_shape
().
lens
();
size_t
weight_h
=
weight_dims
[
2
];
size_t
weight_w
=
weight_dims
[
3
];
if
(
pad_mode
.
find
(
"SAME"
)
!=
std
::
string
::
npos
)
if
(
pad_mode
.
find
(
"SAME"
)
!=
std
::
string
::
npos
)
{
{
op
.
padding_mode
=
op
::
padding_mode_t
::
same
;
// op.padding_mode = op::padding_mode_t::same;
op
.
padding
[
0
]
=
calculate_padding
(
weight_h
,
op
.
dilation
[
0
]);
std
::
vector
<
size_t
>
weight_dims
=
weights
->
get_shape
().
lens
();
op
.
padding
[
1
]
=
calculate_padding
(
weight_w
,
op
.
dilation
[
1
]);
size_t
weight_h
=
weight_dims
[
2
];
size_t
weight_w
=
weight_dims
[
3
];
auto
input_dims
=
l0
->
get_shape
().
lens
();
size_t
input_h
=
input_dims
[
2
];
size_t
input_w
=
input_dims
[
3
];
std
::
vector
<
int64_t
>
pads
(
input_dims
.
size
());
calculate_padding
(
0
,
pads
,
input_h
,
op
.
stride
[
0
],
op
.
dilation
[
0
],
weight_h
);
calculate_padding
(
1
,
pads
,
input_w
,
op
.
stride
[
1
],
op
.
dilation
[
1
],
weight_w
);
if
(
pads
[
0
]
!=
pads
[
2
]
||
pads
[
1
]
!=
pads
[
3
])
{
std
::
vector
<
int64_t
>
padding
=
{
0
,
0
,
pads
[
0
],
pads
[
1
],
0
,
0
,
pads
[
2
],
pads
[
3
]};
l0
=
prog
.
add_instruction
(
migraphx
::
op
::
pad
{
padding
},
l0
);
}
else
{
op
.
padding
[
0
]
=
pads
[
0
];
op
.
padding
[
1
]
=
pads
[
1
];
}
}
}
else
if
(
pad_mode
.
find
(
"VALID"
)
!=
std
::
string
::
npos
)
else
if
(
pad_mode
.
find
(
"VALID"
)
!=
std
::
string
::
npos
)
{
{
...
@@ -432,7 +467,7 @@ struct tf_parser
...
@@ -432,7 +467,7 @@ struct tf_parser
auto
cweights
=
prog
.
add_instruction
(
op
::
contiguous
{},
weights
);
auto
cweights
=
prog
.
add_instruction
(
op
::
contiguous
{},
weights
);
auto
new_weights
=
prog
.
add_instruction
(
op
::
reshape
{
new_weights_shape
},
cweights
);
auto
new_weights
=
prog
.
add_instruction
(
op
::
reshape
{
new_weights_shape
},
cweights
);
return
prog
.
add_instruction
(
op
,
{
args
[
0
]
,
new_weights
});
return
prog
.
add_instruction
(
op
,
{
l0
,
new_weights
});
}
}
instruction_ref
instruction_ref
...
@@ -567,21 +602,42 @@ struct tf_parser
...
@@ -567,21 +602,42 @@ struct tf_parser
op
.
lengths
[
0
]
=
ksize
[
2
];
op
.
lengths
[
0
]
=
ksize
[
2
];
op
.
lengths
[
1
]
=
ksize
[
3
];
op
.
lengths
[
1
]
=
ksize
[
3
];
}
}
auto
l0
=
args
[
0
];
if
(
contains
(
attributes
,
"padding"
))
if
(
contains
(
attributes
,
"padding"
))
{
{
const
std
::
string
&
pad_mode
=
attributes
.
at
(
"padding"
).
s
();
const
std
::
string
&
pad_mode
=
attributes
.
at
(
"padding"
).
s
();
if
(
pad_mode
.
find
(
"SAME"
)
!=
std
::
string
::
npos
)
if
(
pad_mode
.
find
(
"SAME"
)
!=
std
::
string
::
npos
)
{
{
op
.
padding_mode
=
op
::
padding_mode_t
::
same
;
//op.padding_mode = op::padding_mode_t::same;
op
.
padding
[
0
]
=
calculate_padding
(
op
.
lengths
[
0
],
1
);
auto
input_dims
=
l0
->
get_shape
().
lens
();
op
.
padding
[
1
]
=
calculate_padding
(
op
.
lengths
[
1
],
1
);
size_t
input_h
=
input_dims
[
2
];
size_t
input_w
=
input_dims
[
3
];
std
::
vector
<
int64_t
>
pads
(
input_dims
.
size
());
calculate_padding
(
0
,
pads
,
input_h
,
op
.
stride
[
0
],
1
,
op
.
lengths
[
0
]);
calculate_padding
(
1
,
pads
,
input_w
,
op
.
stride
[
1
],
1
,
op
.
lengths
[
1
]);
// for(auto pad : pads)
// {
// std::cout << pad << std::endl;
// }
if
(
pads
[
0
]
!=
pads
[
2
]
||
pads
[
1
]
!=
pads
[
3
])
{
std
::
vector
<
int64_t
>
padding
=
{
0
,
0
,
pads
[
0
],
pads
[
1
],
0
,
0
,
pads
[
2
],
pads
[
3
]};
l0
=
prog
.
add_instruction
(
migraphx
::
op
::
pad
{
padding
,
std
::
numeric_limits
<
float
>::
lowest
()},
l0
);
}
else
{
op
.
padding
[
0
]
=
pads
[
0
];
op
.
padding
[
1
]
=
pads
[
1
];
}
}
}
else
if
(
pad_mode
.
find
(
"VALID"
)
!=
std
::
string
::
npos
)
else
if
(
pad_mode
.
find
(
"VALID"
)
!=
std
::
string
::
npos
)
{
{
op
.
padding_mode
=
op
::
padding_mode_t
::
valid
;
op
.
padding_mode
=
op
::
padding_mode_t
::
valid
;
}
}
}
}
return
prog
.
add_instruction
(
op
,
args
[
0
]
);
return
prog
.
add_instruction
(
op
,
l0
);
}
}
instruction_ref
instruction_ref
...
...
test/gpu/miopen.cpp
View file @
75d5c660
...
@@ -1573,6 +1573,26 @@ void manual_identity()
...
@@ -1573,6 +1573,26 @@ void manual_identity()
std
::
cout
<<
result
<<
std
::
endl
;
std
::
cout
<<
result
<<
std
::
endl
;
}
}
void
pad_test
()
{
migraphx
::
program
p
;
std
::
vector
<
int8_t
>
data0
=
{
0
,
1
,
2
,
3
};
migraphx
::
shape
s0
{
migraphx
::
shape
::
float_type
,
{
2
,
2
}};
auto
l0
=
p
.
add_literal
(
migraphx
::
literal
{
s0
,
data0
});
migraphx
::
op
::
pad
op
{};
op
.
value
=
std
::
numeric_limits
<
int8_t
>::
lowest
();
op
.
pads
=
{
0
,
0
,
1
,
1
};
p
.
add_instruction
(
op
,
l0
);
p
.
compile
(
migraphx
::
gpu
::
target
{});
migraphx
::
program
::
parameter_map
m
;
for
(
auto
&&
x
:
p
.
get_parameter_shapes
())
{
m
[
x
.
first
]
=
migraphx
::
gpu
::
to_gpu
(
migraphx
::
generate_argument
(
x
.
second
));
}
auto
result
=
migraphx
::
gpu
::
from_gpu
(
p
.
eval
(
m
));
std
::
cout
<<
result
<<
std
::
endl
;
}
void
manual_test_concat_relu
()
void
manual_test_concat_relu
()
{
{
migraphx
::
program
p
;
migraphx
::
program
p
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment