Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
1504d5d5
Unverified
Commit
1504d5d5
authored
Dec 05, 2022
by
Umang Yadav
Committed by
GitHub
Dec 05, 2022
Browse files
Merge branch 'develop' into dynamic_reduce
parents
0f8ea2e4
fdc3f00a
Changes
18
Show whitespace changes
Inline
Side-by-side
Showing
18 changed files
with
976 additions
and
396 deletions
+976
-396
src/include/migraphx/int_divide.hpp
src/include/migraphx/int_divide.hpp
+0
-48
src/include/migraphx/literal.hpp
src/include/migraphx/literal.hpp
+6
-15
src/include/migraphx/op/pooling.hpp
src/include/migraphx/op/pooling.hpp
+115
-36
src/include/migraphx/shape_for_each.hpp
src/include/migraphx/shape_for_each.hpp
+3
-1
src/insert_pad.cpp
src/insert_pad.cpp
+2
-2
src/onnx/parse_pooling.cpp
src/onnx/parse_pooling.cpp
+82
-38
test/literal_test.cpp
test/literal_test.cpp
+19
-0
test/onnx/averagepool_dyn_asym_padding_error_test.onnx
test/onnx/averagepool_dyn_asym_padding_error_test.onnx
+0
-0
test/onnx/averagepool_dyn_autopad_error_test.onnx
test/onnx/averagepool_dyn_autopad_error_test.onnx
+0
-0
test/onnx/averagepool_dyn_cip_error_test.onnx
test/onnx/averagepool_dyn_cip_error_test.onnx
+0
-0
test/onnx/averagepool_dyn_test.onnx
test/onnx/averagepool_dyn_test.onnx
+0
-0
test/onnx/gen_onnx.py
test/onnx/gen_onnx.py
+103
-0
test/onnx/globalavgpool_dyn_test.onnx
test/onnx/globalavgpool_dyn_test.onnx
+0
-0
test/onnx/globallppool_dyn_test.onnx
test/onnx/globallppool_dyn_test.onnx
+0
-0
test/onnx/globalmaxpool_dyn_test.onnx
test/onnx/globalmaxpool_dyn_test.onnx
+0
-0
test/onnx/onnx_test.cpp
test/onnx/onnx_test.cpp
+112
-0
test/op_shape_test.cpp
test/op_shape_test.cpp
+100
-3
test/ref_ops_test.cpp
test/ref_ops_test.cpp
+434
-253
No files found.
src/include/migraphx/int_divide.hpp
deleted
100644 → 0
View file @
0f8ea2e4
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_RTGLIB_INT_DIVIDE_HPP
#define MIGRAPHX_GUARD_RTGLIB_INT_DIVIDE_HPP
#include <migraphx/config.hpp>
#include <cmath>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
template
<
class
R
,
class
T
,
class
U
>
R
floor_divide
(
T
x
,
U
y
)
{
return
R
(
std
::
floor
(
double
(
x
)
/
double
(
y
)));
}
template
<
class
R
,
class
T
,
class
U
>
R
ceil_divide
(
T
x
,
U
y
)
{
return
R
(
std
::
ceil
(
double
(
x
)
/
double
(
y
)));
}
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
#endif
src/include/migraphx/literal.hpp
View file @
1504d5d5
...
...
@@ -80,6 +80,7 @@ struct literal : raw_data<literal>
fill
(
start
,
end
);
}
// Directly copies buffer of x
template
<
class
T
,
MIGRAPHX_REQUIRES
(
sizeof
(
T
)
==
1
)>
literal
(
const
shape
&
s
,
T
*
x
)
:
buffer
(
make_shared_array
<
char
>
(
s
.
bytes
())),
m_shape
(
s
)
{
...
...
@@ -107,25 +108,15 @@ struct literal : raw_data<literal>
std
::
shared_ptr
<
char
>
buffer
;
shape
m_shape
;
// Keeps the same data ordering as the given container
template
<
class
Iterator
>
void
fill
(
Iterator
start
,
Iterator
end
)
{
assert
(
std
::
distance
(
start
,
end
)
==
m_shape
.
elements
());
if
(
m_shape
.
standard
())
{
m_shape
.
visit_type
([
&
](
auto
as
)
{
std
::
copy
(
start
,
end
,
as
.
from
(
buffer
.
get
()));
});
}
else
{
auto
it
=
start
;
m_shape
.
visit_type
([
&
](
auto
as
)
{
auto
output
=
make_view
(
m_shape
,
as
.
from
(
buffer
.
get
()));
shape_for_each
(
output
.
get_shape
(),
[
&
](
const
auto
&
idx
)
{
output
(
idx
.
begin
(),
idx
.
end
())
=
*
it
;
// NOLINT(bugprone-signed-char-misuse)
it
++
;
std
::
copy
(
start
,
end
,
output
.
begin
());
});
});
}
}
};
...
...
src/include/migraphx/op/pooling.hpp
View file @
1504d5d5
...
...
@@ -31,7 +31,7 @@
#include <migraphx/argument.hpp>
#include <migraphx/par_for.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/
int_divide
.hpp>
#include <migraphx/
dyn_output
.hpp>
#include <cmath>
#include <utility>
...
...
@@ -49,6 +49,9 @@ struct pooling
bool
ceil_mode
=
false
;
int
lp_order
=
2
;
// Global pooling with dynamic shape input
bool
dyn_global
=
false
;
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
...
...
@@ -57,7 +60,8 @@ struct pooling
f
(
self
.
stride
,
"stride"
),
f
(
self
.
lengths
,
"lengths"
),
f
(
self
.
ceil_mode
,
"ceil_mode"
),
f
(
self
.
lp_order
,
"lp_order"
));
f
(
self
.
lp_order
,
"lp_order"
),
f
(
self
.
dyn_global
,
"dyn_global"
));
}
std
::
string
name
()
const
{
return
"pooling"
;
}
...
...
@@ -65,51 +69,111 @@ struct pooling
void
check_attribute_size
()
const
{
if
((
padding
.
size
()
!=
stride
.
size
()
and
(
padding
.
size
()
/
2
)
!=
stride
.
size
())
or
stride
.
size
()
!=
lengths
.
size
())
(
not
dyn_global
and
stride
.
size
()
!=
lengths
.
size
())
)
{
MIGRAPHX_THROW
(
"POOLING: inconsistent attribute sizes"
);
}
}
size_t
kdims
()
const
{
check_attribute_size
();
return
stride
.
size
();
}
value
attributes
()
const
{
return
{{
"normalize_padding"
,
"padding"
}};
}
std
::
vector
<
std
::
size_t
>
calc_spatial_dim_out
(
const
std
::
vector
<
std
::
size_t
>&
input_lens
,
std
::
size_t
kdims
)
const
{
std
::
vector
<
std
::
size_t
>
output_lens
{};
for
(
size_t
i
=
0
;
i
<
kdims
;
++
i
)
{
if
(
input_lens
[
i
+
2
]
==
0
)
{
// handle opt = 0
output_lens
.
push_back
(
0
);
}
else
{
std
::
size_t
padding_factor
=
2
*
padding
[
i
];
if
(
padding
.
size
()
==
2
*
kdims
)
padding_factor
=
padding
[
i
]
+
padding
[
i
+
kdims
];
assert
(
input_lens
[
i
+
2
]
+
padding_factor
>=
lengths
[
i
]);
std
::
size_t
dim_size
=
input_lens
[
i
+
2
]
+
padding_factor
-
lengths
[
i
];
std
::
size_t
len
=
(
ceil_mode
)
?
dim_size
/
stride
[
i
]
+
static_cast
<
std
::
size_t
>
((
dim_size
%
stride
[
i
]
!=
0
))
// ceil uint divide
:
dim_size
/
stride
[
i
];
// floor divide
output_lens
.
push_back
(
len
+
1
);
}
}
return
output_lens
;
}
shape
normalize_compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
{
check_shapes
{
inputs
,
*
this
}.
has
(
1
);
check_shapes
{
inputs
,
*
this
,
true
}.
has
(
1
);
check_attribute_size
();
const
shape
&
input
=
inputs
.
at
(
0
);
auto
input_lens
=
input
.
lens
();
size_t
kdims
=
input_lens
.
size
()
-
2
;
auto
input_size
=
inputs
[
0
].
lens
().
size
();
auto
padding_size
=
padding
.
size
();
if
(
input_size
!=
padding_size
/
2
+
2
and
input_size
!=
padding_size
+
2
)
size_t
kdims
=
input
.
ndim
()
-
2
;
if
(
input
.
ndim
()
!=
padding_size
/
2
+
2
and
input
.
ndim
()
!=
padding_size
+
2
)
{
MIGRAPHX_THROW
(
"POOLING: input and attribute size mismatch!"
);
}
std
::
vector
<
std
::
size_t
>
output_lens
(
input_lens
.
begin
(),
input_lens
.
begin
()
+
2
);
for
(
size_t
i
=
0
;
i
<
kdims
;
i
++
)
if
(
input
.
dynamic
())
{
std
::
ptrdiff_t
dim_size
;
auto
padding_factor
=
2
*
padding
[
i
];
if
(
padding_size
==
2
*
kdims
)
padding_factor
=
padding
[
i
]
+
padding
[
i
+
kdims
];
dim_size
=
input_lens
[
i
+
2
]
+
padding_factor
-
lengths
[
i
];
assert
(
dim_size
>=
0
);
std
::
size_t
len
=
(
ceil_mode
)
?
ceil_divide
<
std
::
ptrdiff_t
>
(
dim_size
,
stride
[
i
])
:
floor_divide
<
std
::
ptrdiff_t
>
(
dim_size
,
stride
[
i
]);
output_lens
.
push_back
(
std
::
size_t
(
std
::
max
<
std
::
ptrdiff_t
>
(
1
,
len
+
1
)));
auto
input_dyn_dims
=
input
.
dyn_dims
();
std
::
vector
<
shape
::
dynamic_dimension
>
output_dyn_dims
(
input_dyn_dims
.
begin
(),
input_dyn_dims
.
begin
()
+
2
);
if
(
dyn_global
)
{
for
(
size_t
i
=
0
;
i
<
kdims
;
++
i
)
{
output_dyn_dims
.
push_back
(
shape
::
dynamic_dimension
{
1
,
1
,
1
});
}
return
inputs
[
0
].
with_lens
(
output_lens
);
return
{
input
.
type
(),
output_dyn_dims
};
}
else
{
auto
min_spatial_dims
=
calc_spatial_dim_out
(
input
.
min_lens
(),
kdims
);
auto
max_spatial_dims
=
calc_spatial_dim_out
(
input
.
max_lens
(),
kdims
);
auto
opt_spatial_dims
=
calc_spatial_dim_out
(
input
.
opt_lens
(),
kdims
);
for
(
size_t
i
=
0
;
i
<
kdims
;
++
i
)
{
output_dyn_dims
.
push_back
(
shape
::
dynamic_dimension
{
min_spatial_dims
[
i
],
max_spatial_dims
[
i
],
opt_spatial_dims
[
i
]});
}
return
{
input
.
type
(),
output_dyn_dims
};
}
}
else
{
auto
input_lens
=
input
.
lens
();
size_t
kdims
()
const
std
::
vector
<
std
::
size_t
>
output_lens
(
input_lens
.
begin
(),
input_lens
.
begin
()
+
2
);
// Used for when normalize_compute_shape() is called again at model eval time
// for an originally dynamic shape. Since kernel shape is not used with dyn_global.
if
(
dyn_global
)
{
check_attribute_size
();
return
stride
.
size
();
for
(
size_t
i
=
0
;
i
<
kdims
;
++
i
)
{
output_lens
.
push_back
(
1
);
}
return
{
input
.
type
(),
output_lens
};
}
else
{
auto
output_spatial_lens
=
calc_spatial_dim_out
(
input_lens
,
kdims
);
output_lens
.
insert
(
output_lens
.
end
(),
output_spatial_lens
.
begin
(),
output_spatial_lens
.
end
());
return
inputs
[
0
].
with_lens
(
output_lens
);
}
}
}
struct
lpnorm_pool
...
...
@@ -158,7 +222,11 @@ struct pooling
};
template
<
class
Type
,
class
Out
,
class
In
,
class
Op
>
void
calc_pooling
(
const
shape
&
output_shape
,
Out
&
output
,
const
In
&
input
,
Op
op
)
const
void
calc_pooling
(
const
shape
&
output_shape
,
Out
&
output
,
const
In
&
input
,
const
std
::
vector
<
std
::
size_t
>&
kernel_dims
,
Op
op
)
const
{
auto
in_s
=
input
.
get_shape
();
auto
in_lens
=
in_s
.
lens
();
...
...
@@ -172,7 +240,7 @@ struct pooling
auto
d_2
=
dim
-
2
;
int
start
=
static_cast
<
int
>
(
idx_o
[
dim
]
*
stride
[
d_2
])
-
static_cast
<
int
>
(
padding
[
d_2
]);
int
end
=
std
::
min
(
start
+
length
s
[
d_2
],
in_lens
[
dim
]);
int
end
=
std
::
min
(
start
+
kernel_dim
s
[
d_2
],
in_lens
[
dim
]);
start
=
std
::
max
(
start
,
0
);
win_start
.
push_back
(
start
);
win_size
.
push_back
(
end
-
start
);
...
...
@@ -198,21 +266,32 @@ struct pooling
});
}
argument
compute
(
const
shape
&
output_shape
,
std
::
vector
<
argument
>
args
)
const
argument
compute
(
const
dyn_output
&
dyn_out
,
std
::
vector
<
argument
>
args
)
const
{
argument
result
{
dyn_out
.
computed_shape
};
auto
input_lens
=
args
[
0
].
get_shape
().
lens
();
std
::
vector
<
std
::
size_t
>
kernel_dims
;
if
(
dyn_global
)
{
argument
result
{
output_shape
};
kernel_dims
.
insert
(
kernel_dims
.
end
(),
input_lens
.
begin
()
+
2
,
input_lens
.
end
());
}
else
{
kernel_dims
=
this
->
lengths
;
}
visit_all
(
result
,
args
[
0
])([
&
](
auto
output
,
auto
input
)
{
using
type
=
typename
decltype
(
output
)
::
value_type
;
switch
(
mode
)
{
case
migraphx
::
op
::
pooling_mode
::
average
:
calc_pooling
<
type
>
(
out
put_shape
,
output
,
input
,
avg_pool
{});
calc_pooling
<
type
>
(
dyn_out
.
com
put
ed
_shape
,
output
,
input
,
kernel_dims
,
avg_pool
{});
break
;
case
migraphx
::
op
::
pooling_mode
::
max
:
calc_pooling
<
type
>
(
out
put_shape
,
output
,
input
,
max_pool
{});
calc_pooling
<
type
>
(
dyn_out
.
com
put
ed
_shape
,
output
,
input
,
kernel_dims
,
max_pool
{});
break
;
case
migraphx
::
op
::
pooling_mode
::
lpnorm
:
calc_pooling
<
type
>
(
output_shape
,
output
,
input
,
lpnorm_pool
{
lp_order
});
calc_pooling
<
type
>
(
dyn_out
.
computed_shape
,
output
,
input
,
kernel_dims
,
lpnorm_pool
{
lp_order
});
break
;
}
});
...
...
src/include/migraphx/shape_for_each.hpp
View file @
1504d5d5
...
...
@@ -31,6 +31,9 @@
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
/**
* Iterates the given function over the indices from the shape in order.
*/
template
<
class
F
>
void
shape_for_each
(
const
migraphx
::
shape
&
s
,
F
f
)
{
...
...
@@ -51,7 +54,6 @@ void shape_for_each(const migraphx::shape& s, F f)
call
(
indices
);
}
}
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
...
...
src/insert_pad.cpp
View file @
1504d5d5
...
...
@@ -77,14 +77,14 @@ static void update_pooling(const instruction_ref& input, const instruction_ref&
{
return
;
}
auto
kdims
=
input
->
get_shape
().
lens
().
size
()
-
2
;
auto
kdims
=
input
->
get_shape
().
ndim
()
-
2
;
if
(
std
::
equal
(
op
.
padding
.
begin
(),
op
.
padding
.
begin
()
+
kdims
,
op
.
padding
.
begin
()
+
kdims
,
op
.
padding
.
end
()))
return
;
std
::
vector
<
int64_t
>
padding
(
input
->
get_shape
().
lens
().
size
()
*
2
,
0
);
std
::
vector
<
int64_t
>
padding
(
input
->
get_shape
().
ndim
()
*
2
,
0
);
std
::
vector
<
size_t
>
pads_l
(
op
.
padding
.
begin
(),
op
.
padding
.
begin
()
+
kdims
);
std
::
vector
<
size_t
>
pads_r
(
op
.
padding
.
begin
()
+
kdims
,
op
.
padding
.
end
());
op
.
padding
=
std
::
vector
<
size_t
>
(
kdims
*
2
,
0
);
...
...
src/onnx/parse_pooling.cpp
View file @
1504d5d5
...
...
@@ -47,52 +47,42 @@ struct parse_pooling : op_parser<parse_pooling>
{
"GlobalLpPool"
,
"lpnorm"
}};
}
instruction_ref
parse
(
const
op_desc
&
opd
,
const
onnx_parser
&
/*parser*/
,
value
handle_values
(
const
op_desc
&
opd
,
onnx_parser
::
node_info
info
,
std
::
vector
<
instruction_ref
>
args
)
const
const
shape
&
in_shape
,
value
values
)
const
{
const
std
::
unordered_map
<
std
::
string
,
op
::
pooling_mode
>
mode_map
=
{
{
"max"
,
op
::
pooling_mode
::
max
},
{
"average"
,
op
::
pooling_mode
::
average
},
{
"lpnorm"
,
op
::
pooling_mode
::
lpnorm
}};
std
::
string
mode
=
opd
.
op_name
;
if
(
not
contains
(
mode_map
,
mode
))
auto
kdims
=
in_shape
.
ndim
()
-
2
;
if
(
starts_with
(
opd
.
onnx_name
,
"Global"
))
{
MIGRAPHX_THROW
(
"onnx pooling mode must be [
\"
max
\"
,
\"
average
\"
,
\"
lpnorm
\"
]"
);
// if spatial dimensions are dynamic use dyn_global flag
if
(
in_shape
.
dynamic
()
and
std
::
any_of
(
in_shape
.
dyn_dims
().
cbegin
()
+
2
,
in_shape
.
dyn_dims
().
cend
(),
[](
auto
dd
)
{
return
not
dd
.
is_fixed
();
}))
{
values
[
"dyn_global"
]
=
true
;
values
[
"lengths"
]
=
std
::
vector
<
size_t
>
();
}
operation
op
=
make_op
(
"pooling"
,
{{
"mode"
,
mode_map
.
at
(
mode
)}});
value
values
=
op
.
to_value
();
auto
l0
=
args
[
0
];
auto
in_lens
=
l0
->
get_shape
().
lens
();
assert
(
in_lens
.
size
()
>
2
);
auto
kdims
=
in_lens
.
size
()
-
2
;
if
(
starts_with
(
opd
.
onnx_name
,
"Global"
))
else
{
values
[
"lengths"
]
=
std
::
vector
<
size_t
>
(
in_lens
.
begin
()
+
2
,
in_lens
.
end
());
// works with static and fixed dynamic shape
auto
m_lens
=
in_shape
.
max_lens
();
values
[
"lengths"
]
=
std
::
vector
<
size_t
>
(
m_lens
.
begin
()
+
2
,
m_lens
.
end
());
}
}
// does not support ceil_mode
if
(
contains
(
info
.
attributes
,
"ceil_mode"
))
{
values
[
"ceil_mode"
]
=
static_cast
<
bool
>
(
info
.
attributes
.
at
(
"ceil_mode"
).
i
());
}
// count include padding, if count include pad is 1, we always use
// explicit pad
int
count_include_pad
=
0
;
if
(
contains
(
info
.
attributes
,
"count_include_pad"
))
{
count_include_pad
=
info
.
attributes
.
at
(
"count_include_pad"
).
i
();
}
if
(
contains
(
info
.
attributes
,
"strides"
))
{
values
[
"stride"
].
clear
();
copy
(
info
.
attributes
[
"strides"
].
ints
(),
std
::
back_inserter
(
values
[
"stride"
]));
check_attr_sizes
(
kdims
,
values
[
"stride"
].
size
(),
"PARSE_POOLING: inconsistent strides"
);
}
if
(
contains
(
info
.
attributes
,
"kernel_shape"
))
{
values
[
"lengths"
].
clear
();
...
...
@@ -110,6 +100,46 @@ struct parse_pooling : op_parser<parse_pooling>
// ensure pads availabe only when auto_pad is "NOT_SET"
check_padding_mode
(
info
,
"POOLING"
);
return
values
;
}
instruction_ref
parse
(
const
op_desc
&
opd
,
const
onnx_parser
&
/*parser*/
,
onnx_parser
::
node_info
info
,
std
::
vector
<
instruction_ref
>
args
)
const
{
std
::
string
mode
=
opd
.
op_name
;
const
std
::
unordered_map
<
std
::
string
,
op
::
pooling_mode
>
mode_map
=
{
{
"max"
,
op
::
pooling_mode
::
max
},
{
"average"
,
op
::
pooling_mode
::
average
},
{
"lpnorm"
,
op
::
pooling_mode
::
lpnorm
}};
if
(
not
contains
(
mode_map
,
mode
))
{
MIGRAPHX_THROW
(
"PARSE_POOLING: onnx pooling mode must be [
\"
max
\"
,
\"
average
\"
,
\"
lpnorm
\"
]"
);
}
operation
op
=
make_op
(
"pooling"
,
{{
"mode"
,
mode_map
.
at
(
mode
)}});
value
values
=
op
.
to_value
();
auto
l0
=
args
[
0
];
auto
in_shape
=
l0
->
get_shape
();
assert
(
in_shape
.
ndim
()
>
2
);
auto
kdims
=
in_shape
.
ndim
()
-
2
;
values
=
handle_values
(
opd
,
info
,
in_shape
,
values
);
// count include padding, if count include pad is 1, we always use
// explicit pad
int
count_include_pad
=
0
;
if
(
contains
(
info
.
attributes
,
"count_include_pad"
))
{
if
(
in_shape
.
dynamic
())
{
MIGRAPHX_THROW
(
"PARSE_POOLING: count_include_pad attribute is not supported for "
"dynamic input shape"
);
}
count_include_pad
=
info
.
attributes
.
at
(
"count_include_pad"
).
i
();
}
std
::
vector
<
int64_t
>
paddings
;
float
pad_val
=
((
mode
==
"max"
)
?
std
::
numeric_limits
<
float
>::
lowest
()
:
0.0
f
);
...
...
@@ -122,6 +152,13 @@ struct parse_pooling : op_parser<parse_pooling>
}
if
(
contains
(
info
.
attributes
,
"auto_pad"
))
{
if
(
in_shape
.
dynamic
())
{
MIGRAPHX_THROW
(
"PARSE_POOLING: Auto padding pooling with dynamic input shape not supported"
);
}
else
{
values
[
"padding"
].
clear
();
// return paddings could be empty, then setting to 0 for no padding
...
...
@@ -129,9 +166,10 @@ struct parse_pooling : op_parser<parse_pooling>
values
,
values
[
"lengths"
].
to_vector
<
std
::
size_t
>
(),
{
1
,
1
},
in_
lens
,
in_shape
.
lens
()
,
paddings
);
}
}
if
(
paddings
.
size
()
!=
2
*
kdims
)
{
...
...
@@ -150,6 +188,7 @@ struct parse_pooling : op_parser<parse_pooling>
values
[
"stride"
].
resize
(
kdims
);
std
::
fill_n
(
values
[
"stride"
].
begin
(),
kdims
,
1
);
}
// used to calculate the supposed output shape
std
::
vector
<
int64_t
>
orig_padding
=
paddings
;
...
...
@@ -159,6 +198,11 @@ struct parse_pooling : op_parser<parse_pooling>
if
(
not
slice_start
.
empty
())
{
if
(
in_shape
.
dynamic
())
{
MIGRAPHX_THROW
(
"PARSE_POOLING: asymmetric padding not supported for dynamic input shape"
);
}
// calculate expected output shape
orig_padding
.
insert
(
orig_padding
.
begin
()
+
kdims
,
2
,
0
);
orig_padding
.
insert
(
orig_padding
.
begin
(),
2
,
0
);
...
...
test/literal_test.cpp
View file @
1504d5d5
...
...
@@ -49,6 +49,25 @@ TEST_CASE(literal_test)
EXPECT
(
l4
.
empty
());
}
TEST_CASE
(
literal_nstd_shape_vector
)
{
migraphx
::
shape
nstd_shape
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
2
,
2
},
{
12
,
1
,
6
,
3
}};
std
::
vector
<
float
>
data
(
12
);
std
::
iota
(
data
.
begin
(),
data
.
end
(),
0
);
auto
l0
=
migraphx
::
literal
{
nstd_shape
,
data
};
// check data buffer is read in correctly
std
::
vector
<
float
>
expected_buffer
=
{
0
,
4
,
8
,
1
,
5
,
9
,
2
,
6
,
10
,
3
,
7
,
11
};
const
auto
*
start
=
reinterpret_cast
<
const
float
*>
(
l0
.
data
());
std
::
vector
<
float
>
l0_data
{
start
,
start
+
12
};
EXPECT
(
l0_data
==
expected_buffer
);
// check that using visit() (that uses a tensor view) gives data in correct order
std
::
vector
<
float
>
results_vector
(
12
);
l0
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
results_vector
==
data
);
}
TEST_CASE
(
literal_os1
)
{
migraphx
::
literal
l
{
1
};
...
...
test/onnx/averagepool_dyn_asym_padding_error_test.onnx
0 → 100644
View file @
1504d5d5
File added
test/onnx/averagepool_dyn_autopad_error_test.onnx
0 → 100644
View file @
1504d5d5
File added
test/onnx/averagepool_dyn_cip_error_test.onnx
0 → 100644
View file @
1504d5d5
File added
test/onnx/averagepool_dyn_test.onnx
0 → 100644
View file @
1504d5d5
File added
test/onnx/gen_onnx.py
View file @
1504d5d5
...
...
@@ -237,6 +237,64 @@ def averagepool_3d_test():
return
([
node
],
[
x
],
[
out
])
@
onnx_test
def
averagepool_dyn_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
None
,
3
,
5
,
5
,
5
])
out
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
None
,
3
,
3
,
3
,
3
])
node
=
onnx
.
helper
.
make_node
(
'AveragePool'
,
inputs
=
[
'0'
],
outputs
=
[
'1'
],
kernel_shape
=
[
3
,
3
,
3
])
return
([
node
],
[
x
],
[
out
])
@
onnx_test
def
averagepool_dyn_autopad_error_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
None
,
1
,
5
,
5
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
None
,
1
,
5
,
5
])
node
=
onnx
.
helper
.
make_node
(
'AveragePool'
,
inputs
=
[
'x'
],
outputs
=
[
'y'
],
kernel_shape
=
[
2
,
2
],
auto_pad
=
'SAME_LOWER'
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
def
averagepool_dyn_asym_padding_error_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
None
,
1
,
5
,
5
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
None
,
1
,
3
,
3
])
node
=
onnx
.
helper
.
make_node
(
'AveragePool'
,
inputs
=
[
'x'
],
outputs
=
[
'y'
],
kernel_shape
=
[
2
,
2
],
strides
=
[
2
,
2
],
pads
=
[
0
,
0
,
1
,
1
])
return
([
node
],
[
x
],
[
y
])
@
onnx_test
def
averagepool_dyn_cip_error_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
None
,
1
,
5
,
5
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
None
,
1
,
1
,
1
])
node
=
onnx
.
helper
.
make_node
(
'AveragePool'
,
inputs
=
[
'x'
],
outputs
=
[
'y'
],
kernel_shape
=
[
2
,
2
],
count_include_pad
=
1
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
def
averagepool_notset_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
1
,
1
,
5
,
5
])
...
...
@@ -2069,6 +2127,21 @@ def globalavgpool_test():
return
([
node
],
[
x
],
[
y
])
@
onnx_test
def
globalavgpool_dyn_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
None
,
3
,
16
,
16
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
None
,
3
,
1
,
1
])
node
=
onnx
.
helper
.
make_node
(
'GlobalAveragePool'
,
inputs
=
[
'0'
],
outputs
=
[
'1'
],
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
def
globallppool_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
16
,
16
])
...
...
@@ -2083,6 +2156,21 @@ def globallppool_test():
return
([
node
],
[
x
],
[
y
])
@
onnx_test
def
globallppool_dyn_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
None
,
None
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
1
,
1
])
node
=
onnx
.
helper
.
make_node
(
'GlobalLpPool'
,
inputs
=
[
'0'
],
outputs
=
[
'1'
],
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
def
globalmaxpool_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
16
,
16
])
...
...
@@ -2097,6 +2185,21 @@ def globalmaxpool_test():
return
([
node
],
[
x
],
[
y
])
@
onnx_test
def
globalmaxpool_dyn_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
None
,
3
,
32
,
32
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
None
,
3
,
1
,
1
])
node
=
onnx
.
helper
.
make_node
(
'GlobalMaxPool'
,
inputs
=
[
'0'
],
outputs
=
[
'1'
],
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
def
greater_test
():
ax1
=
np
.
array
([
1.0
,
2.0
,
3.0
,
4.0
,
5.0
,
6.0
])
...
...
test/onnx/globalavgpool_dyn_test.onnx
0 → 100644
View file @
1504d5d5
File added
test/onnx/globallppool_dyn_test.onnx
0 → 100644
View file @
1504d5d5
File added
test/onnx/globalmaxpool_dyn_test.onnx
0 → 100644
View file @
1504d5d5
File added
test/onnx/onnx_test.cpp
View file @
1504d5d5
...
...
@@ -273,6 +273,51 @@ TEST_CASE(averagepool_3d_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
averagepool_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"0"
,
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
5
,
5
,
0
},
{
5
,
5
,
0
},
{
5
,
5
,
0
}}});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
average
},
{
"padding"
,
{
0
,
0
,
0
,
0
,
0
,
0
}},
{
"stride"
,
{
1
,
1
,
1
}},
{
"lengths"
,
{
3
,
3
,
3
}}}),
l0
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
auto
prog
=
migraphx
::
parse_onnx
(
"averagepool_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
averagepool_dyn_autopad_error_test
)
{
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
EXPECT
(
test
::
throws
(
[
&
]
{
migraphx
::
parse_onnx
(
"averagepool_dyn_autopad_error_test.onnx"
,
options
);
}));
}
TEST_CASE
(
averagepool_dyn_asym_padding_error_test
)
{
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
EXPECT
(
test
::
throws
(
[
&
]
{
migraphx
::
parse_onnx
(
"averagepool_dyn_asym_padding_error_test.onnx"
,
options
);
}));
}
TEST_CASE
(
averagepool_dyn_cip_error_test
)
{
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
EXPECT
(
test
::
throws
(
[
&
]
{
migraphx
::
parse_onnx
(
"averagepool_dyn_cip_error_test.onnx"
,
options
);
}));
}
TEST_CASE
(
averagepool_notset_test
)
{
migraphx
::
program
p
;
...
...
@@ -2144,6 +2189,28 @@ TEST_CASE(globalavgpool_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
globalavgpool_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
input
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
16
,
16
,
0
},
{
16
,
16
,
0
}}});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
average
},
{
"lengths"
,
{
16
,
16
}},
{
"padding"
,
{
0
,
0
,
0
,
0
}}}),
input
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
auto
prog
=
parse_onnx
(
"globalavgpool_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
globallppool_test
)
{
migraphx
::
program
p
;
...
...
@@ -2161,6 +2228,29 @@ TEST_CASE(globallppool_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
globallppool_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
input
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
,
0
},
{
3
,
3
,
0
},
{
16
,
32
,
0
},
{
16
,
32
,
0
}}});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
lpnorm
},
{
"dyn_global"
,
true
},
{
"padding"
,
{
0
,
0
,
0
,
0
}},
{
"lengths"
,
{}}}),
input
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
16
,
32
,
0
};
auto
prog
=
migraphx
::
parse_onnx
(
"globallppool_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
globalmaxpool_test
)
{
migraphx
::
program
p
;
...
...
@@ -2178,6 +2268,28 @@ TEST_CASE(globalmaxpool_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
globalmaxpool_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
input
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
32
,
32
,
0
},
{
32
,
32
,
0
}}});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{
"lengths"
,
{
32
,
32
}},
{
"padding"
,
{
0
,
0
,
0
,
0
}}}),
input
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
auto
prog
=
parse_onnx
(
"globalmaxpool_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
greater_test
)
{
migraphx
::
program
p
;
...
...
test/op_shape_test.cpp
View file @
1504d5d5
...
...
@@ -1549,16 +1549,76 @@ TEST_CASE(nms_shape)
score_thres_s
);
}
TEST_CASE
(
pooling_shape
)
TEST_CASE
(
pooling_shape
0
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}};
throws_shape
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{
"padding"
,
{
1
}},
{
"stride"
,
{
0
}},
{
"lengths"
,
{
1
}}}),
input
);
}
TEST_CASE
(
pooling_shape1
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
1
,
1
}};
expect_shape
(
output
,
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{
"padding"
,
{
0
,
0
}},
{
"stride"
,
{
3
,
3
}},
{
"lengths"
,
{
1
,
1
}}}),
input
);
}
TEST_CASE
(
pooling_shape2
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
2
,
2
}};
expect_shape
(
output
,
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{
"padding"
,
{
0
,
0
}},
{
"stride"
,
{
3
,
3
}},
{
"lengths"
,
{
1
,
1
}},
{
"ceil_mode"
,
true
}}),
input
);
}
TEST_CASE
(
pooling_shape3
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}};
expect_shape
(
output
,
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{
"padding"
,
{
2
,
2
}},
{
"stride"
,
{
3
,
3
}},
{
"lengths"
,
{
3
,
3
}},
{
"ceil_mode"
,
true
}}),
input
);
}
TEST_CASE
(
pooling_dyn_shape0
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
3
},
{
3
,
3
,
3
},
{
3
,
3
,
0
}}};
throws_shape
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{
"padding"
,
{
1
}},
{
"stride"
,
{
0
}},
{
"lengths"
,
{
1
}}}),
input
);
}
TEST_CASE
(
pooling_dyn_shape1
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
3
},
{
3
,
3
,
3
},
{
3
,
3
,
0
}}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
3
},
{
1
,
1
,
1
},
{
1
,
1
,
0
}}};
expect_shape
(
output
,
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
...
...
@@ -1566,9 +1626,15 @@ TEST_CASE(pooling_shape)
{
"stride"
,
{
3
,
3
}},
{
"lengths"
,
{
1
,
1
}}}),
input
);
}
migraphx
::
shape
output1
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
2
,
2
}};
expect_shape
(
output1
,
TEST_CASE
(
pooling_dyn_shape2
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
5
,
5
,
0
},
{
3
,
3
,
3
},
{
3
,
3
,
0
}}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
5
,
5
,
0
},
{
2
,
2
,
2
},
{
2
,
2
,
0
}}};
expect_shape
(
output
,
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{
"padding"
,
{
0
,
0
}},
...
...
@@ -1578,6 +1644,37 @@ TEST_CASE(pooling_shape)
input
);
}
TEST_CASE
(
pooling_dyn_shape3
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
4
,
4
,
0
},
{
3
,
3
,
0
},
{
4
,
12
,
8
},
{
4
,
12
,
8
}}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{{
4
,
4
,
0
},
{
3
,
3
,
0
},
{
2
,
4
,
3
},
{
2
,
4
,
3
}}};
expect_shape
(
output
,
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{
"padding"
,
{
0
,
0
}},
{
"stride"
,
{
3
,
3
}},
{
"lengths"
,
{
1
,
1
}}}),
input
);
}
TEST_CASE
(
pooling_dyn_shape4
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
4
,
4
,
0
},
{
3
,
3
,
0
},
{
4
,
12
,
8
},
{
4
,
12
,
8
}}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{{
4
,
4
,
0
},
{
3
,
3
,
0
},
{
3
,
6
,
4
},
{
3
,
6
,
4
}}};
expect_shape
(
output
,
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{
"padding"
,
{
2
,
2
}},
{
"stride"
,
{
3
,
3
}},
{
"lengths"
,
{
3
,
3
}},
{
"ceil_mode"
,
true
}}),
input
);
}
TEST_CASE
(
prefix_scan_sum
)
{
{
...
...
test/ref_ops_test.cpp
View file @
1504d5d5
...
...
@@ -566,10 +566,9 @@ TEST_CASE(atanh_dyn_test)
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
gold
));
}
TEST_CASE(avgpool_test)
TEST_CASE
(
avgpool_
rank3_
test
)
{
// 1D case 1, input is 3D
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
s
=
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
4
}};
...
...
@@ -588,10 +587,36 @@ TEST_CASE(avgpool_test)
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
{
0.25
,
0.3
,
0.25
,
0.65
,
0.7
,
0.5
,
0.4
,
0.4
,
0.35
};
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
gold
));
}
}
TEST_CASE
(
avgpool_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
s
=
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
4
,
4
,
0
}}};
auto
x
=
mm
->
add_parameter
(
"X"
,
s
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
average
},
{
"lengths"
,
{
2
}},
{
"padding"
,
{
0
}},
{
"stride"
,
{
1
}}}),
x
);
p
.
compile
(
migraphx
::
ref
::
target
{});
std
::
vector
<
float
>
data
{
0.3
,
0.2
,
0.4
,
0.1
,
0.8
,
0.5
,
0.9
,
0.1
,
0.1
,
0.7
,
0.1
,
0.6
};
migraphx
::
shape
input_fixed_shape
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
4
}};
migraphx
::
parameter_map
params
;
params
[
"X"
]
=
migraphx
::
argument
(
input_fixed_shape
,
data
.
data
());
auto
result
=
p
.
eval
(
params
).
back
();
std
::
vector
<
float
>
results_vector
;
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
{
0.25
,
0.3
,
0.25
,
0.65
,
0.7
,
0.5
,
0.4
,
0.4
,
0.35
};
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
gold
));
}
TEST_CASE
(
avgpool_rank3_stride2_test
)
{
// 1D case 2, stride 2
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
s
=
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
2
,
2
,
4
}};
...
...
@@ -635,10 +660,11 @@ TEST_CASE(avgpool_test)
0.7854
,
-
0.2915
};
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
gold
));
}
}
TEST_CASE
(
avgpool_rank5_test
)
{
// 3D, input is 5D
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
s
=
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
2
,
2
,
3
,
3
,
3
}};
...
...
@@ -670,7 +696,6 @@ TEST_CASE(avgpool_test)
-
0.04
,
0.304125
,
0.40775
,
0.2835
,
0.112375
,
-
0.073375
,
0.4355
,
-
0.187
,
-
0.392625
,
-
0.258375
,
-
0.485875
,
-
0.0345
,
0.16125
,
-
0.131875
,
-
0.228375
,
0.068625
};
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
gold
));
}
}
TEST_CASE
(
broadcast_test
)
...
...
@@ -919,11 +944,38 @@ TEST_CASE(contiguous_test)
p
.
compile
(
migraphx
::
ref
::
target
{});
auto
result
=
p
.
eval
({}).
back
();
std
::
vector
<
size_t
>
new_strides
=
{
12
,
4
,
2
,
1
};
EXPECT
(
result
.
get_shape
().
strides
()
==
new_strides
);
std
::
vector
<
float
>
results_vector
(
12
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std::vector<size_t> new_lens = {1, 3, 2, 2};
std::vector<size_t> new_strides = {12, 1, 6, 3};
EXPECT(migraphx::verify_range(results_vector, data));
std
::
vector
<
float
>
gold
=
{
0
,
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
10
,
11
};
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
gold
));
}
TEST_CASE
(
contiguous_param_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
2
,
2
},
{
12
,
1
,
6
,
3
}};
auto
a
=
mm
->
add_parameter
(
"X"
,
a_shape
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
a
);
p
.
compile
(
migraphx
::
ref
::
target
{});
std
::
vector
<
float
>
data
(
12
);
std
::
iota
(
data
.
begin
(),
data
.
end
(),
0
);
migraphx
::
parameter_map
params
;
params
[
"X"
]
=
migraphx
::
argument
(
a_shape
,
data
.
data
());
auto
result
=
p
.
eval
(
params
).
back
();
std
::
vector
<
size_t
>
new_strides
=
{
12
,
4
,
2
,
1
};
EXPECT
(
result
.
get_shape
().
strides
()
==
new_strides
);
std
::
vector
<
float
>
results_vector
(
12
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
=
{
0
,
3
,
6
,
9
,
1
,
4
,
7
,
10
,
2
,
5
,
8
,
11
};
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
gold
));
}
TEST_CASE
(
contiguous_dyn_test
)
...
...
@@ -2668,6 +2720,30 @@ TEST_CASE(globalavgpool_test)
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
gold
));
}
TEST_CASE
(
globalavgpool_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
s
=
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
,
0
},
{
3
,
3
,
0
},
{
2
,
6
,
0
},
{
2
,
6
,
2
}}};
auto
x
=
mm
->
add_parameter
(
"X"
,
s
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
average
},
{
"dyn_global"
,
true
}}),
x
);
p
.
compile
(
migraphx
::
ref
::
target
{});
std
::
vector
<
float
>
data
{
0.3
,
0.2
,
0.4
,
0.1
,
0.8
,
0.5
,
0.9
,
0.1
,
0.1
,
0.7
,
0.1
,
0.6
};
migraphx
::
shape
input_fixed_shape
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
2
,
2
}};
migraphx
::
parameter_map
params
;
params
[
"X"
]
=
migraphx
::
argument
(
input_fixed_shape
,
data
.
data
());
auto
result
=
p
.
eval
(
params
).
back
();
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
{
0.25
,
0.575
,
0.375
};
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
gold
));
}
TEST_CASE
(
globallppool_test
)
{
migraphx
::
program
p
;
...
...
@@ -2690,6 +2766,30 @@ TEST_CASE(globallppool_test)
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
gold
));
}
TEST_CASE
(
globallppool_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
s
=
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
,
0
},
{
3
,
3
,
0
},
{
2
,
6
,
2
},
{
2
,
6
,
2
}}};
auto
x
=
mm
->
add_parameter
(
"X"
,
s
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
lpnorm
},
{
"dyn_global"
,
true
}}),
x
);
p
.
compile
(
migraphx
::
ref
::
target
{});
std
::
vector
<
float
>
data
{
0.3
,
0.2
,
0.4
,
0.1
,
0.8
,
0.5
,
0.9
,
0.1
,
0.1
,
0.7
,
0.1
,
0.6
};
migraphx
::
shape
input_fixed_shape
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
2
,
2
}};
migraphx
::
parameter_map
params
;
params
[
"X"
]
=
migraphx
::
argument
(
input_fixed_shape
,
data
.
data
());
auto
result
=
p
.
eval
(
params
).
back
();
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
{
0.5477225575051662
,
1.307669683062202
,
0.9327379053088815
};
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
gold
));
}
TEST_CASE
(
globalmaxpool_test
)
{
migraphx
::
program
p
;
...
...
@@ -2711,6 +2811,30 @@ TEST_CASE(globalmaxpool_test)
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
gold
));
}
TEST_CASE
(
globalmaxpool_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
s
=
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
,
0
},
{
3
,
3
,
0
},
{
2
,
6
,
2
},
{
2
,
6
,
2
}}};
auto
x
=
mm
->
add_parameter
(
"X"
,
s
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{
"dyn_global"
,
true
}}),
x
);
p
.
compile
(
migraphx
::
ref
::
target
{});
std
::
vector
<
float
>
data
{
0.3
,
0.2
,
0.4
,
0.1
,
0.8
,
0.5
,
0.9
,
0.1
,
0.1
,
0.7
,
0.1
,
0.6
};
migraphx
::
shape
input_fixed_shape
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
2
,
2
}};
migraphx
::
parameter_map
params
;
params
[
"X"
]
=
migraphx
::
argument
(
input_fixed_shape
,
data
.
data
());
auto
result
=
p
.
eval
(
params
).
back
();
std
::
vector
<
float
>
results_vector
(
3
);
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
{
0.4
,
0.9
,
0.7
};
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
gold
));
}
TEST_CASE
(
greater_brcst_test
)
{
migraphx
::
program
p
;
...
...
@@ -3733,10 +3857,9 @@ TEST_CASE(logsoftmax_test_axis_3)
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
s
));
}
TEST_CASE(lppool_test)
TEST_CASE
(
lppool_
l1_norm_
test
)
{
// L1 norm test
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
s
=
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
4
}};
...
...
@@ -3756,10 +3879,11 @@ TEST_CASE(lppool_test)
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
{
0.5
,
0.6
,
0.5
,
1.3
,
1.4
,
1.0
,
0.8
,
0.8
,
0.7
};
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
gold
));
}
}
TEST_CASE
(
lppool_l2_norm_test
)
{
// L2 norm test
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
s
=
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
4
}};
...
...
@@ -3787,7 +3911,39 @@ TEST_CASE(lppool_test)
0.7071067811865475
,
0.6082762530298219
};
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
gold
));
}
}
TEST_CASE
(
lppool_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
s
=
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
4
,
4
,
0
}}};
auto
x
=
mm
->
add_parameter
(
"X"
,
s
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
lpnorm
},
{
"lengths"
,
{
2
}},
{
"padding"
,
{
0
}},
{
"stride"
,
{
1
}}}),
x
);
p
.
compile
(
migraphx
::
ref
::
target
{});
std
::
vector
<
float
>
data
{
0.3
,
0.2
,
0.4
,
0.1
,
0.8
,
0.5
,
0.9
,
0.1
,
0.1
,
0.7
,
0.1
,
0.6
};
migraphx
::
shape
input_fixed_shape
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
4
}};
migraphx
::
parameter_map
params
;
params
[
"X"
]
=
migraphx
::
argument
(
input_fixed_shape
,
data
.
data
());
auto
result
=
p
.
eval
(
params
).
back
();
std
::
vector
<
float
>
results_vector
;
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
{
0.36055512754639896
,
0.447213595499958
,
0.4123105625617661
,
0.9433981132056605
,
1.0295630140987
,
0.9055385138137417
,
0.7071067811865475
,
0.7071067811865475
,
0.6082762530298219
};
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
gold
));
}
TEST_CASE
(
lrn_test
)
...
...
@@ -3909,10 +4065,9 @@ TEST_CASE(maxpool_test)
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
c
));
}
TEST_CASE(maxpool_
test_1D_3D
)
TEST_CASE
(
maxpool_
rank3_test0
)
{
// 1D case 1, input is 3D
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
s
=
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
4
}};
...
...
@@ -3931,10 +4086,11 @@ TEST_CASE(maxpool_test_1D_3D)
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
{
0.3
,
0.4
,
0.4
,
0.8
,
0.9
,
0.9
,
0.7
,
0.7
,
0.6
};
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
gold
));
}
}
TEST_CASE
(
maxpool_rank3_test1
)
{
// 1D case 2, input is 3D
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
s
=
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
2
,
2
,
5
}};
...
...
@@ -3955,10 +4111,11 @@ TEST_CASE(maxpool_test_1D_3D)
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
{
0.4975
,
-
0.0405
,
-
0.6186
,
0.6022
,
0.5493
,
-
0.8039
,
1.5001
,
-
1.1603
};
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
gold
));
}
}
TEST_CASE
(
maxpool_rank3_ceil_test
)
{
// 1D case 2, input is 3D, ceil mode
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
s
=
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
2
,
2
,
5
}};
...
...
@@ -3991,10 +4148,11 @@ TEST_CASE(maxpool_test_1D_3D)
-
1.1603
,
1.2556
};
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
gold
));
}
}
TEST_CASE
(
maxpool_rank5_test
)
{
// 3D, input is 5D
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
s
=
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
2
,
2
,
3
,
3
,
3
}};
...
...
@@ -4004,18 +4162,17 @@ TEST_CASE(maxpool_test_1D_3D)
op
.
stride
=
{
2
,
2
,
2
};
std
::
vector
<
float
>
data
{
-2.8029, 0.5861, 0.7015, 0.1297, -1.44, -1.9472, 0.7812, 2.408, -0.3145,
0.3405, -0.9146, 0.0624, 1.5064, -0.8345, 1.7977, 1.8949, 1.0073, -0.2102,
-0.042, -0.7146, 0.6227, -0.5263, -2.2598, 0.1713, 0.449, 0.5303, -0.8622,
-0.5691, 0.907, -0.0569, -1.5348, -0.4109, -0.1461, -0.5445, 0.4266, 0.2282,
1.3655, -2.1519, 0.6068, -0.2001, -0.4702, 0.3864, 1.7083, 0.9096, 0.4286,
-1.8866, 0.7034, 0.0293, 1.4587, 0.7672, -2.8614, 0.8124, -0.053, 1.0449,
0.845, -0.0131, 0.1139, -0.859, -1.2681, -0.6337, -0.4644, 0.1938, 0.2889,
0.9035, 0.7118, -0.5767, 0.4577, -0.0549, 0.2237, 0.5756, 0.0677, -0.0223,
-0.329, 0.2364, 2.7666, -0.7417, -1.3196, -0.2655, 0.1698, -0.1777, -0.9427,
2.6859, -0.7501, 0.5175, 1.0029, -2.6436, -0.4388, -1.2348, -0.1539, -0.6229,
-0.4136, 0.5085, 0.4136, -0.6439, -1.1953, -0.406, -0.0195, 0.1869, -0.8664,
1.1364, 0.5041, 0.0647, 0.1941, -1.0819, -0.4629, -0.5107, 0.3612, -0.3583};
-
2.8029
,
0.5861
,
0.7015
,
0.1297
,
-
1.44
,
-
1.9472
,
0.7812
,
2.408
,
-
0.3145
,
0.3405
,
-
0.9146
,
0.0624
,
1.5064
,
-
0.8345
,
1.7977
,
1.8949
,
1.0073
,
-
0.2102
,
-
0.042
,
-
0.7146
,
0.6227
,
-
0.5263
,
-
2.2598
,
0.1713
,
0.449
,
0.5303
,
-
0.8622
,
-
0.5691
,
0.907
,
-
0.0569
,
-
1.5348
,
-
0.4109
,
-
0.1461
,
-
0.5445
,
0.4266
,
0.2282
,
1.3655
,
-
2.1519
,
0.6068
,
-
0.2001
,
-
0.4702
,
0.3864
,
1.7083
,
0.9096
,
0.4286
,
-
1.8866
,
0.7034
,
0.0293
,
1.4587
,
0.7672
,
-
2.8614
,
0.8124
,
-
0.053
,
1.0449
,
0.845
,
-
0.0131
,
0.1139
,
-
0.859
,
-
1.2681
,
-
0.6337
,
-
0.4644
,
0.1938
,
0.2889
,
0.9035
,
0.7118
,
-
0.5767
,
0.4577
,
-
0.0549
,
0.2237
,
0.5756
,
0.0677
,
-
0.0223
,
-
0.329
,
0.2364
,
2.7666
,
-
0.7417
,
-
1.3196
,
-
0.2655
,
0.1698
,
-
0.1777
,
-
0.9427
,
2.6859
,
-
0.7501
,
0.5175
,
1.0029
,
-
2.6436
,
-
0.4388
,
-
1.2348
,
-
0.1539
,
-
0.6229
,
-
0.4136
,
0.5085
,
0.4136
,
-
0.6439
,
-
1.1953
,
-
0.406
,
-
0.0195
,
0.1869
,
-
0.8664
,
1.1364
,
0.5041
,
0.0647
,
0.1941
,
-
1.0819
,
-
0.4629
,
-
0.5107
,
0.3612
,
-
0.3583
};
auto
l0
=
mm
->
add_literal
(
migraphx
::
literal
{
s
,
data
});
mm
->
add_instruction
(
op
,
l0
);
p
.
compile
(
migraphx
::
ref
::
target
{});
...
...
@@ -4024,7 +4181,31 @@ TEST_CASE(maxpool_test_1D_3D)
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
{
1.5064
,
1.3655
,
0.9035
,
2.6859
};
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
gold
));
}
}
TEST_CASE
(
maxpool_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
s
=
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
4
,
4
,
0
}}};
auto
x
=
mm
->
add_parameter
(
"X"
,
s
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{
"lengths"
,
{
2
}},
{
"padding"
,
{
0
}},
{
"stride"
,
{
1
}}}),
x
);
p
.
compile
(
migraphx
::
ref
::
target
{});
std
::
vector
<
float
>
data
{
0.3
,
0.2
,
0.4
,
0.1
,
0.8
,
0.5
,
0.9
,
0.1
,
0.1
,
0.7
,
0.1
,
0.6
};
migraphx
::
shape
input_fixed_shape
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
4
}};
migraphx
::
parameter_map
params
;
params
[
"X"
]
=
migraphx
::
argument
(
input_fixed_shape
,
data
.
data
());
auto
result
=
p
.
eval
(
params
).
back
();
std
::
vector
<
float
>
results_vector
;
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
std
::
vector
<
float
>
gold
{
0.3
,
0.4
,
0.4
,
0.8
,
0.9
,
0.9
,
0.7
,
0.7
,
0.6
};
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
gold
));
}
TEST_CASE
(
min_test
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment