Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
1504d5d5
Unverified
Commit
1504d5d5
authored
Dec 05, 2022
by
Umang Yadav
Committed by
GitHub
Dec 05, 2022
Browse files
Merge branch 'develop' into dynamic_reduce
parents
0f8ea2e4
fdc3f00a
Changes
18
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
18 changed files
with
976 additions
and
396 deletions
+976
-396
src/include/migraphx/int_divide.hpp
src/include/migraphx/int_divide.hpp
+0
-48
src/include/migraphx/literal.hpp
src/include/migraphx/literal.hpp
+6
-15
src/include/migraphx/op/pooling.hpp
src/include/migraphx/op/pooling.hpp
+115
-36
src/include/migraphx/shape_for_each.hpp
src/include/migraphx/shape_for_each.hpp
+3
-1
src/insert_pad.cpp
src/insert_pad.cpp
+2
-2
src/onnx/parse_pooling.cpp
src/onnx/parse_pooling.cpp
+82
-38
test/literal_test.cpp
test/literal_test.cpp
+19
-0
test/onnx/averagepool_dyn_asym_padding_error_test.onnx
test/onnx/averagepool_dyn_asym_padding_error_test.onnx
+0
-0
test/onnx/averagepool_dyn_autopad_error_test.onnx
test/onnx/averagepool_dyn_autopad_error_test.onnx
+0
-0
test/onnx/averagepool_dyn_cip_error_test.onnx
test/onnx/averagepool_dyn_cip_error_test.onnx
+0
-0
test/onnx/averagepool_dyn_test.onnx
test/onnx/averagepool_dyn_test.onnx
+0
-0
test/onnx/gen_onnx.py
test/onnx/gen_onnx.py
+103
-0
test/onnx/globalavgpool_dyn_test.onnx
test/onnx/globalavgpool_dyn_test.onnx
+0
-0
test/onnx/globallppool_dyn_test.onnx
test/onnx/globallppool_dyn_test.onnx
+0
-0
test/onnx/globalmaxpool_dyn_test.onnx
test/onnx/globalmaxpool_dyn_test.onnx
+0
-0
test/onnx/onnx_test.cpp
test/onnx/onnx_test.cpp
+112
-0
test/op_shape_test.cpp
test/op_shape_test.cpp
+100
-3
test/ref_ops_test.cpp
test/ref_ops_test.cpp
+434
-253
No files found.
src/include/migraphx/int_divide.hpp
deleted
100644 → 0
View file @
0f8ea2e4
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_RTGLIB_INT_DIVIDE_HPP
#define MIGRAPHX_GUARD_RTGLIB_INT_DIVIDE_HPP
#include <migraphx/config.hpp>
#include <cmath>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
template
<
class
R
,
class
T
,
class
U
>
R
floor_divide
(
T
x
,
U
y
)
{
return
R
(
std
::
floor
(
double
(
x
)
/
double
(
y
)));
}
template
<
class
R
,
class
T
,
class
U
>
R
ceil_divide
(
T
x
,
U
y
)
{
return
R
(
std
::
ceil
(
double
(
x
)
/
double
(
y
)));
}
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
#endif
src/include/migraphx/literal.hpp
View file @
1504d5d5
...
@@ -80,6 +80,7 @@ struct literal : raw_data<literal>
...
@@ -80,6 +80,7 @@ struct literal : raw_data<literal>
fill
(
start
,
end
);
fill
(
start
,
end
);
}
}
// Directly copies buffer of x
template
<
class
T
,
MIGRAPHX_REQUIRES
(
sizeof
(
T
)
==
1
)>
template
<
class
T
,
MIGRAPHX_REQUIRES
(
sizeof
(
T
)
==
1
)>
literal
(
const
shape
&
s
,
T
*
x
)
:
buffer
(
make_shared_array
<
char
>
(
s
.
bytes
())),
m_shape
(
s
)
literal
(
const
shape
&
s
,
T
*
x
)
:
buffer
(
make_shared_array
<
char
>
(
s
.
bytes
())),
m_shape
(
s
)
{
{
...
@@ -107,25 +108,15 @@ struct literal : raw_data<literal>
...
@@ -107,25 +108,15 @@ struct literal : raw_data<literal>
std
::
shared_ptr
<
char
>
buffer
;
std
::
shared_ptr
<
char
>
buffer
;
shape
m_shape
;
shape
m_shape
;
// Keeps the same data ordering as the given container
template
<
class
Iterator
>
template
<
class
Iterator
>
void
fill
(
Iterator
start
,
Iterator
end
)
void
fill
(
Iterator
start
,
Iterator
end
)
{
{
assert
(
std
::
distance
(
start
,
end
)
==
m_shape
.
elements
());
assert
(
std
::
distance
(
start
,
end
)
==
m_shape
.
elements
());
if
(
m_shape
.
standard
())
m_shape
.
visit_type
([
&
](
auto
as
)
{
{
auto
output
=
make_view
(
m_shape
,
as
.
from
(
buffer
.
get
()));
m_shape
.
visit_type
([
&
](
auto
as
)
{
std
::
copy
(
start
,
end
,
as
.
from
(
buffer
.
get
()));
});
std
::
copy
(
start
,
end
,
output
.
begin
());
}
});
else
{
auto
it
=
start
;
m_shape
.
visit_type
([
&
](
auto
as
)
{
auto
output
=
make_view
(
m_shape
,
as
.
from
(
buffer
.
get
()));
shape_for_each
(
output
.
get_shape
(),
[
&
](
const
auto
&
idx
)
{
output
(
idx
.
begin
(),
idx
.
end
())
=
*
it
;
// NOLINT(bugprone-signed-char-misuse)
it
++
;
});
});
}
}
}
};
};
...
...
src/include/migraphx/op/pooling.hpp
View file @
1504d5d5
...
@@ -31,7 +31,7 @@
...
@@ -31,7 +31,7 @@
#include <migraphx/argument.hpp>
#include <migraphx/argument.hpp>
#include <migraphx/par_for.hpp>
#include <migraphx/par_for.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/
int_divide
.hpp>
#include <migraphx/
dyn_output
.hpp>
#include <cmath>
#include <cmath>
#include <utility>
#include <utility>
...
@@ -49,6 +49,9 @@ struct pooling
...
@@ -49,6 +49,9 @@ struct pooling
bool
ceil_mode
=
false
;
bool
ceil_mode
=
false
;
int
lp_order
=
2
;
int
lp_order
=
2
;
// Global pooling with dynamic shape input
bool
dyn_global
=
false
;
template
<
class
Self
,
class
F
>
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
static
auto
reflect
(
Self
&
self
,
F
f
)
{
{
...
@@ -57,7 +60,8 @@ struct pooling
...
@@ -57,7 +60,8 @@ struct pooling
f
(
self
.
stride
,
"stride"
),
f
(
self
.
stride
,
"stride"
),
f
(
self
.
lengths
,
"lengths"
),
f
(
self
.
lengths
,
"lengths"
),
f
(
self
.
ceil_mode
,
"ceil_mode"
),
f
(
self
.
ceil_mode
,
"ceil_mode"
),
f
(
self
.
lp_order
,
"lp_order"
));
f
(
self
.
lp_order
,
"lp_order"
),
f
(
self
.
dyn_global
,
"dyn_global"
));
}
}
std
::
string
name
()
const
{
return
"pooling"
;
}
std
::
string
name
()
const
{
return
"pooling"
;
}
...
@@ -65,51 +69,111 @@ struct pooling
...
@@ -65,51 +69,111 @@ struct pooling
void
check_attribute_size
()
const
void
check_attribute_size
()
const
{
{
if
((
padding
.
size
()
!=
stride
.
size
()
and
(
padding
.
size
()
/
2
)
!=
stride
.
size
())
or
if
((
padding
.
size
()
!=
stride
.
size
()
and
(
padding
.
size
()
/
2
)
!=
stride
.
size
())
or
stride
.
size
()
!=
lengths
.
size
())
(
not
dyn_global
and
stride
.
size
()
!=
lengths
.
size
())
)
{
{
MIGRAPHX_THROW
(
"POOLING: inconsistent attribute sizes"
);
MIGRAPHX_THROW
(
"POOLING: inconsistent attribute sizes"
);
}
}
}
}
size_t
kdims
()
const
{
check_attribute_size
();
return
stride
.
size
();
}
value
attributes
()
const
{
return
{{
"normalize_padding"
,
"padding"
}};
}
value
attributes
()
const
{
return
{{
"normalize_padding"
,
"padding"
}};
}
std
::
vector
<
std
::
size_t
>
calc_spatial_dim_out
(
const
std
::
vector
<
std
::
size_t
>&
input_lens
,
std
::
size_t
kdims
)
const
{
std
::
vector
<
std
::
size_t
>
output_lens
{};
for
(
size_t
i
=
0
;
i
<
kdims
;
++
i
)
{
if
(
input_lens
[
i
+
2
]
==
0
)
{
// handle opt = 0
output_lens
.
push_back
(
0
);
}
else
{
std
::
size_t
padding_factor
=
2
*
padding
[
i
];
if
(
padding
.
size
()
==
2
*
kdims
)
padding_factor
=
padding
[
i
]
+
padding
[
i
+
kdims
];
assert
(
input_lens
[
i
+
2
]
+
padding_factor
>=
lengths
[
i
]);
std
::
size_t
dim_size
=
input_lens
[
i
+
2
]
+
padding_factor
-
lengths
[
i
];
std
::
size_t
len
=
(
ceil_mode
)
?
dim_size
/
stride
[
i
]
+
static_cast
<
std
::
size_t
>
((
dim_size
%
stride
[
i
]
!=
0
))
// ceil uint divide
:
dim_size
/
stride
[
i
];
// floor divide
output_lens
.
push_back
(
len
+
1
);
}
}
return
output_lens
;
}
shape
normalize_compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
shape
normalize_compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
{
{
check_shapes
{
inputs
,
*
this
}.
has
(
1
);
check_shapes
{
inputs
,
*
this
,
true
}.
has
(
1
);
check_attribute_size
();
const
shape
&
input
=
inputs
.
at
(
0
);
const
shape
&
input
=
inputs
.
at
(
0
);
auto
padding_size
=
padding
.
size
();
auto
input_lens
=
input
.
lens
();
size_t
kdims
=
input
.
ndim
()
-
2
;
size_t
kdims
=
input_lens
.
size
()
-
2
;
if
(
input
.
ndim
()
!=
padding_size
/
2
+
2
and
input
.
ndim
()
!=
padding_size
+
2
)
auto
input_size
=
inputs
[
0
].
lens
().
size
();
auto
padding_size
=
padding
.
size
();
if
(
input_size
!=
padding_size
/
2
+
2
and
input_size
!=
padding_size
+
2
)
{
{
MIGRAPHX_THROW
(
"POOLING: input and attribute size mismatch!"
);
MIGRAPHX_THROW
(
"POOLING: input and attribute size mismatch!"
);
}
}
std
::
vector
<
std
::
size_t
>
output_lens
(
input_lens
.
begin
(),
input_lens
.
begin
()
+
2
);
if
(
input
.
dynamic
())
for
(
size_t
i
=
0
;
i
<
kdims
;
i
++
)
{
{
std
::
ptrdiff_t
dim_size
;
auto
input_dyn_dims
=
input
.
dyn_dims
();
auto
padding_factor
=
2
*
padding
[
i
];
std
::
vector
<
shape
::
dynamic_dimension
>
output_dyn_dims
(
input_dyn_dims
.
begin
(),
if
(
padding_size
==
2
*
kdims
)
input_dyn_dims
.
begin
()
+
2
);
padding_factor
=
padding
[
i
]
+
padding
[
i
+
kdims
];
if
(
dyn_global
)
dim_size
=
input_lens
[
i
+
2
]
+
padding_factor
-
lengths
[
i
];
{
assert
(
dim_size
>=
0
);
for
(
size_t
i
=
0
;
i
<
kdims
;
++
i
)
std
::
size_t
len
=
(
ceil_mode
)
?
ceil_divide
<
std
::
ptrdiff_t
>
(
dim_size
,
stride
[
i
])
{
:
floor_divide
<
std
::
ptrdiff_t
>
(
dim_size
,
stride
[
i
]);
output_dyn_dims
.
push_back
(
shape
::
dynamic_dimension
{
1
,
1
,
1
});
}
output_lens
.
push_back
(
std
::
size_t
(
std
::
max
<
std
::
ptrdiff_t
>
(
1
,
len
+
1
)));
return
{
input
.
type
(),
output_dyn_dims
};
}
else
{
auto
min_spatial_dims
=
calc_spatial_dim_out
(
input
.
min_lens
(),
kdims
);
auto
max_spatial_dims
=
calc_spatial_dim_out
(
input
.
max_lens
(),
kdims
);
auto
opt_spatial_dims
=
calc_spatial_dim_out
(
input
.
opt_lens
(),
kdims
);
for
(
size_t
i
=
0
;
i
<
kdims
;
++
i
)
{
output_dyn_dims
.
push_back
(
shape
::
dynamic_dimension
{
min_spatial_dims
[
i
],
max_spatial_dims
[
i
],
opt_spatial_dims
[
i
]});
}
return
{
input
.
type
(),
output_dyn_dims
};
}
}
}
return
inputs
[
0
].
with_lens
(
output_lens
);
else
}
{
auto
input_lens
=
input
.
lens
();
size_t
kdims
()
const
std
::
vector
<
std
::
size_t
>
output_lens
(
input_lens
.
begin
(),
input_lens
.
begin
()
+
2
);
{
// Used for when normalize_compute_shape() is called again at model eval time
check_attribute_size
();
// for an originally dynamic shape. Since kernel shape is not used with dyn_global.
return
stride
.
size
();
if
(
dyn_global
)
{
for
(
size_t
i
=
0
;
i
<
kdims
;
++
i
)
{
output_lens
.
push_back
(
1
);
}
return
{
input
.
type
(),
output_lens
};
}
else
{
auto
output_spatial_lens
=
calc_spatial_dim_out
(
input_lens
,
kdims
);
output_lens
.
insert
(
output_lens
.
end
(),
output_spatial_lens
.
begin
(),
output_spatial_lens
.
end
());
return
inputs
[
0
].
with_lens
(
output_lens
);
}
}
}
}
struct
lpnorm_pool
struct
lpnorm_pool
...
@@ -158,7 +222,11 @@ struct pooling
...
@@ -158,7 +222,11 @@ struct pooling
};
};
template
<
class
Type
,
class
Out
,
class
In
,
class
Op
>
template
<
class
Type
,
class
Out
,
class
In
,
class
Op
>
void
calc_pooling
(
const
shape
&
output_shape
,
Out
&
output
,
const
In
&
input
,
Op
op
)
const
void
calc_pooling
(
const
shape
&
output_shape
,
Out
&
output
,
const
In
&
input
,
const
std
::
vector
<
std
::
size_t
>&
kernel_dims
,
Op
op
)
const
{
{
auto
in_s
=
input
.
get_shape
();
auto
in_s
=
input
.
get_shape
();
auto
in_lens
=
in_s
.
lens
();
auto
in_lens
=
in_s
.
lens
();
...
@@ -172,7 +240,7 @@ struct pooling
...
@@ -172,7 +240,7 @@ struct pooling
auto
d_2
=
dim
-
2
;
auto
d_2
=
dim
-
2
;
int
start
=
int
start
=
static_cast
<
int
>
(
idx_o
[
dim
]
*
stride
[
d_2
])
-
static_cast
<
int
>
(
padding
[
d_2
]);
static_cast
<
int
>
(
idx_o
[
dim
]
*
stride
[
d_2
])
-
static_cast
<
int
>
(
padding
[
d_2
]);
int
end
=
std
::
min
(
start
+
length
s
[
d_2
],
in_lens
[
dim
]);
int
end
=
std
::
min
(
start
+
kernel_dim
s
[
d_2
],
in_lens
[
dim
]);
start
=
std
::
max
(
start
,
0
);
start
=
std
::
max
(
start
,
0
);
win_start
.
push_back
(
start
);
win_start
.
push_back
(
start
);
win_size
.
push_back
(
end
-
start
);
win_size
.
push_back
(
end
-
start
);
...
@@ -198,21 +266,32 @@ struct pooling
...
@@ -198,21 +266,32 @@ struct pooling
});
});
}
}
argument
compute
(
const
shape
&
output_shape
,
std
::
vector
<
argument
>
args
)
const
argument
compute
(
const
dyn_output
&
dyn_out
,
std
::
vector
<
argument
>
args
)
const
{
{
argument
result
{
output_shape
};
argument
result
{
dyn_out
.
computed_shape
};
auto
input_lens
=
args
[
0
].
get_shape
().
lens
();
std
::
vector
<
std
::
size_t
>
kernel_dims
;
if
(
dyn_global
)
{
kernel_dims
.
insert
(
kernel_dims
.
end
(),
input_lens
.
begin
()
+
2
,
input_lens
.
end
());
}
else
{
kernel_dims
=
this
->
lengths
;
}
visit_all
(
result
,
args
[
0
])([
&
](
auto
output
,
auto
input
)
{
visit_all
(
result
,
args
[
0
])([
&
](
auto
output
,
auto
input
)
{
using
type
=
typename
decltype
(
output
)
::
value_type
;
using
type
=
typename
decltype
(
output
)
::
value_type
;
switch
(
mode
)
switch
(
mode
)
{
{
case
migraphx
::
op
::
pooling_mode
::
average
:
case
migraphx
::
op
::
pooling_mode
::
average
:
calc_pooling
<
type
>
(
out
put_shape
,
output
,
input
,
avg_pool
{});
calc_pooling
<
type
>
(
dyn_out
.
com
put
ed
_shape
,
output
,
input
,
kernel_dims
,
avg_pool
{});
break
;
break
;
case
migraphx
::
op
::
pooling_mode
::
max
:
case
migraphx
::
op
::
pooling_mode
::
max
:
calc_pooling
<
type
>
(
out
put_shape
,
output
,
input
,
max_pool
{});
calc_pooling
<
type
>
(
dyn_out
.
com
put
ed
_shape
,
output
,
input
,
kernel_dims
,
max_pool
{});
break
;
break
;
case
migraphx
::
op
::
pooling_mode
::
lpnorm
:
case
migraphx
::
op
::
pooling_mode
::
lpnorm
:
calc_pooling
<
type
>
(
output_shape
,
output
,
input
,
lpnorm_pool
{
lp_order
});
calc_pooling
<
type
>
(
dyn_out
.
computed_shape
,
output
,
input
,
kernel_dims
,
lpnorm_pool
{
lp_order
});
break
;
break
;
}
}
});
});
...
...
src/include/migraphx/shape_for_each.hpp
View file @
1504d5d5
...
@@ -31,6 +31,9 @@
...
@@ -31,6 +31,9 @@
namespace
migraphx
{
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
inline
namespace
MIGRAPHX_INLINE_NS
{
/**
* Iterates the given function over the indices from the shape in order.
*/
template
<
class
F
>
template
<
class
F
>
void
shape_for_each
(
const
migraphx
::
shape
&
s
,
F
f
)
void
shape_for_each
(
const
migraphx
::
shape
&
s
,
F
f
)
{
{
...
@@ -51,7 +54,6 @@ void shape_for_each(const migraphx::shape& s, F f)
...
@@ -51,7 +54,6 @@ void shape_for_each(const migraphx::shape& s, F f)
call
(
indices
);
call
(
indices
);
}
}
}
}
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
}
// namespace migraphx
...
...
src/insert_pad.cpp
View file @
1504d5d5
...
@@ -77,14 +77,14 @@ static void update_pooling(const instruction_ref& input, const instruction_ref&
...
@@ -77,14 +77,14 @@ static void update_pooling(const instruction_ref& input, const instruction_ref&
{
{
return
;
return
;
}
}
auto
kdims
=
input
->
get_shape
().
lens
().
size
()
-
2
;
auto
kdims
=
input
->
get_shape
().
ndim
()
-
2
;
if
(
std
::
equal
(
op
.
padding
.
begin
(),
if
(
std
::
equal
(
op
.
padding
.
begin
(),
op
.
padding
.
begin
()
+
kdims
,
op
.
padding
.
begin
()
+
kdims
,
op
.
padding
.
begin
()
+
kdims
,
op
.
padding
.
begin
()
+
kdims
,
op
.
padding
.
end
()))
op
.
padding
.
end
()))
return
;
return
;
std
::
vector
<
int64_t
>
padding
(
input
->
get_shape
().
lens
().
size
()
*
2
,
0
);
std
::
vector
<
int64_t
>
padding
(
input
->
get_shape
().
ndim
()
*
2
,
0
);
std
::
vector
<
size_t
>
pads_l
(
op
.
padding
.
begin
(),
op
.
padding
.
begin
()
+
kdims
);
std
::
vector
<
size_t
>
pads_l
(
op
.
padding
.
begin
(),
op
.
padding
.
begin
()
+
kdims
);
std
::
vector
<
size_t
>
pads_r
(
op
.
padding
.
begin
()
+
kdims
,
op
.
padding
.
end
());
std
::
vector
<
size_t
>
pads_r
(
op
.
padding
.
begin
()
+
kdims
,
op
.
padding
.
end
());
op
.
padding
=
std
::
vector
<
size_t
>
(
kdims
*
2
,
0
);
op
.
padding
=
std
::
vector
<
size_t
>
(
kdims
*
2
,
0
);
...
...
src/onnx/parse_pooling.cpp
View file @
1504d5d5
...
@@ -47,52 +47,42 @@ struct parse_pooling : op_parser<parse_pooling>
...
@@ -47,52 +47,42 @@ struct parse_pooling : op_parser<parse_pooling>
{
"GlobalLpPool"
,
"lpnorm"
}};
{
"GlobalLpPool"
,
"lpnorm"
}};
}
}
instruction_ref
parse
(
const
op_desc
&
opd
,
value
handle_values
(
const
op_desc
&
opd
,
const
onnx_parser
&
/*parser*/
,
onnx_parser
::
node_info
info
,
onnx_parser
::
node_info
info
,
const
shape
&
in_shape
,
std
::
vector
<
instruction_ref
>
arg
s
)
const
value
value
s
)
const
{
{
const
std
::
unordered_map
<
std
::
string
,
op
::
pooling_mode
>
mode_map
=
{
auto
kdims
=
in_shape
.
ndim
()
-
2
;
{
"max"
,
op
::
pooling_mode
::
max
},
{
"average"
,
op
::
pooling_mode
::
average
},
{
"lpnorm"
,
op
::
pooling_mode
::
lpnorm
}};
std
::
string
mode
=
opd
.
op_name
;
if
(
not
contains
(
mode_map
,
mode
))
{
MIGRAPHX_THROW
(
"onnx pooling mode must be [
\"
max
\"
,
\"
average
\"
,
\"
lpnorm
\"
]"
);
}
operation
op
=
make_op
(
"pooling"
,
{{
"mode"
,
mode_map
.
at
(
mode
)}});
value
values
=
op
.
to_value
();
auto
l0
=
args
[
0
];
auto
in_lens
=
l0
->
get_shape
().
lens
();
assert
(
in_lens
.
size
()
>
2
);
auto
kdims
=
in_lens
.
size
()
-
2
;
if
(
starts_with
(
opd
.
onnx_name
,
"Global"
))
if
(
starts_with
(
opd
.
onnx_name
,
"Global"
))
{
{
values
[
"lengths"
]
=
std
::
vector
<
size_t
>
(
in_lens
.
begin
()
+
2
,
in_lens
.
end
());
// if spatial dimensions are dynamic use dyn_global flag
if
(
in_shape
.
dynamic
()
and
std
::
any_of
(
in_shape
.
dyn_dims
().
cbegin
()
+
2
,
in_shape
.
dyn_dims
().
cend
(),
[](
auto
dd
)
{
return
not
dd
.
is_fixed
();
}))
{
values
[
"dyn_global"
]
=
true
;
values
[
"lengths"
]
=
std
::
vector
<
size_t
>
();
}
else
{
// works with static and fixed dynamic shape
auto
m_lens
=
in_shape
.
max_lens
();
values
[
"lengths"
]
=
std
::
vector
<
size_t
>
(
m_lens
.
begin
()
+
2
,
m_lens
.
end
());
}
}
}
// does not support ceil_mode
if
(
contains
(
info
.
attributes
,
"ceil_mode"
))
if
(
contains
(
info
.
attributes
,
"ceil_mode"
))
{
{
values
[
"ceil_mode"
]
=
static_cast
<
bool
>
(
info
.
attributes
.
at
(
"ceil_mode"
).
i
());
values
[
"ceil_mode"
]
=
static_cast
<
bool
>
(
info
.
attributes
.
at
(
"ceil_mode"
).
i
());
}
}
// count include padding, if count include pad is 1, we always use
// explicit pad
int
count_include_pad
=
0
;
if
(
contains
(
info
.
attributes
,
"count_include_pad"
))
{
count_include_pad
=
info
.
attributes
.
at
(
"count_include_pad"
).
i
();
}
if
(
contains
(
info
.
attributes
,
"strides"
))
if
(
contains
(
info
.
attributes
,
"strides"
))
{
{
values
[
"stride"
].
clear
();
values
[
"stride"
].
clear
();
copy
(
info
.
attributes
[
"strides"
].
ints
(),
std
::
back_inserter
(
values
[
"stride"
]));
copy
(
info
.
attributes
[
"strides"
].
ints
(),
std
::
back_inserter
(
values
[
"stride"
]));
check_attr_sizes
(
kdims
,
values
[
"stride"
].
size
(),
"PARSE_POOLING: inconsistent strides"
);
check_attr_sizes
(
kdims
,
values
[
"stride"
].
size
(),
"PARSE_POOLING: inconsistent strides"
);
}
}
if
(
contains
(
info
.
attributes
,
"kernel_shape"
))
if
(
contains
(
info
.
attributes
,
"kernel_shape"
))
{
{
values
[
"lengths"
].
clear
();
values
[
"lengths"
].
clear
();
...
@@ -110,6 +100,46 @@ struct parse_pooling : op_parser<parse_pooling>
...
@@ -110,6 +100,46 @@ struct parse_pooling : op_parser<parse_pooling>
// ensure pads availabe only when auto_pad is "NOT_SET"
// ensure pads availabe only when auto_pad is "NOT_SET"
check_padding_mode
(
info
,
"POOLING"
);
check_padding_mode
(
info
,
"POOLING"
);
return
values
;
}
instruction_ref
parse
(
const
op_desc
&
opd
,
const
onnx_parser
&
/*parser*/
,
onnx_parser
::
node_info
info
,
std
::
vector
<
instruction_ref
>
args
)
const
{
std
::
string
mode
=
opd
.
op_name
;
const
std
::
unordered_map
<
std
::
string
,
op
::
pooling_mode
>
mode_map
=
{
{
"max"
,
op
::
pooling_mode
::
max
},
{
"average"
,
op
::
pooling_mode
::
average
},
{
"lpnorm"
,
op
::
pooling_mode
::
lpnorm
}};
if
(
not
contains
(
mode_map
,
mode
))
{
MIGRAPHX_THROW
(
"PARSE_POOLING: onnx pooling mode must be [
\"
max
\"
,
\"
average
\"
,
\"
lpnorm
\"
]"
);
}
operation
op
=
make_op
(
"pooling"
,
{{
"mode"
,
mode_map
.
at
(
mode
)}});
value
values
=
op
.
to_value
();
auto
l0
=
args
[
0
];
auto
in_shape
=
l0
->
get_shape
();
assert
(
in_shape
.
ndim
()
>
2
);
auto
kdims
=
in_shape
.
ndim
()
-
2
;
values
=
handle_values
(
opd
,
info
,
in_shape
,
values
);
// count include padding, if count include pad is 1, we always use
// explicit pad
int
count_include_pad
=
0
;
if
(
contains
(
info
.
attributes
,
"count_include_pad"
))
{
if
(
in_shape
.
dynamic
())
{
MIGRAPHX_THROW
(
"PARSE_POOLING: count_include_pad attribute is not supported for "
"dynamic input shape"
);
}
count_include_pad
=
info
.
attributes
.
at
(
"count_include_pad"
).
i
();
}
std
::
vector
<
int64_t
>
paddings
;
std
::
vector
<
int64_t
>
paddings
;
float
pad_val
=
((
mode
==
"max"
)
?
std
::
numeric_limits
<
float
>::
lowest
()
:
0.0
f
);
float
pad_val
=
((
mode
==
"max"
)
?
std
::
numeric_limits
<
float
>::
lowest
()
:
0.0
f
);
...
@@ -123,14 +153,22 @@ struct parse_pooling : op_parser<parse_pooling>
...
@@ -123,14 +153,22 @@ struct parse_pooling : op_parser<parse_pooling>
if
(
contains
(
info
.
attributes
,
"auto_pad"
))
if
(
contains
(
info
.
attributes
,
"auto_pad"
))
{
{
values
[
"padding"
].
clear
();
if
(
in_shape
.
dynamic
())
// return paddings could be empty, then setting to 0 for no padding
{
cal_auto_padding_size
(
info
,
MIGRAPHX_THROW
(
values
,
"PARSE_POOLING: Auto padding pooling with dynamic input shape not supported"
);
values
[
"lengths"
].
to_vector
<
std
::
size_t
>
(),
}
{
1
,
1
},
else
in_lens
,
{
paddings
);
values
[
"padding"
].
clear
();
// return paddings could be empty, then setting to 0 for no padding
cal_auto_padding_size
(
info
,
values
,
values
[
"lengths"
].
to_vector
<
std
::
size_t
>
(),
{
1
,
1
},
in_shape
.
lens
(),
paddings
);
}
}
}
if
(
paddings
.
size
()
!=
2
*
kdims
)
if
(
paddings
.
size
()
!=
2
*
kdims
)
...
@@ -150,6 +188,7 @@ struct parse_pooling : op_parser<parse_pooling>
...
@@ -150,6 +188,7 @@ struct parse_pooling : op_parser<parse_pooling>
values
[
"stride"
].
resize
(
kdims
);
values
[
"stride"
].
resize
(
kdims
);
std
::
fill_n
(
values
[
"stride"
].
begin
(),
kdims
,
1
);
std
::
fill_n
(
values
[
"stride"
].
begin
(),
kdims
,
1
);
}
}
// used to calculate the supposed output shape
// used to calculate the supposed output shape
std
::
vector
<
int64_t
>
orig_padding
=
paddings
;
std
::
vector
<
int64_t
>
orig_padding
=
paddings
;
...
@@ -159,6 +198,11 @@ struct parse_pooling : op_parser<parse_pooling>
...
@@ -159,6 +198,11 @@ struct parse_pooling : op_parser<parse_pooling>
if
(
not
slice_start
.
empty
())
if
(
not
slice_start
.
empty
())
{
{
if
(
in_shape
.
dynamic
())
{
MIGRAPHX_THROW
(
"PARSE_POOLING: asymmetric padding not supported for dynamic input shape"
);
}
// calculate expected output shape
// calculate expected output shape
orig_padding
.
insert
(
orig_padding
.
begin
()
+
kdims
,
2
,
0
);
orig_padding
.
insert
(
orig_padding
.
begin
()
+
kdims
,
2
,
0
);
orig_padding
.
insert
(
orig_padding
.
begin
(),
2
,
0
);
orig_padding
.
insert
(
orig_padding
.
begin
(),
2
,
0
);
...
...
test/literal_test.cpp
View file @
1504d5d5
...
@@ -49,6 +49,25 @@ TEST_CASE(literal_test)
...
@@ -49,6 +49,25 @@ TEST_CASE(literal_test)
EXPECT
(
l4
.
empty
());
EXPECT
(
l4
.
empty
());
}
}
TEST_CASE
(
literal_nstd_shape_vector
)
{
migraphx
::
shape
nstd_shape
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
2
,
2
},
{
12
,
1
,
6
,
3
}};
std
::
vector
<
float
>
data
(
12
);
std
::
iota
(
data
.
begin
(),
data
.
end
(),
0
);
auto
l0
=
migraphx
::
literal
{
nstd_shape
,
data
};
// check data buffer is read in correctly
std
::
vector
<
float
>
expected_buffer
=
{
0
,
4
,
8
,
1
,
5
,
9
,
2
,
6
,
10
,
3
,
7
,
11
};
const
auto
*
start
=
reinterpret_cast
<
const
float
*>
(
l0
.
data
());
std
::
vector
<
float
>
l0_data
{
start
,
start
+
12
};
EXPECT
(
l0_data
==
expected_buffer
);
// check that using visit() (that uses a tensor view) gives data in correct order
std
::
vector
<
float
>
results_vector
(
12
);
l0
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
results_vector
==
data
);
}
TEST_CASE
(
literal_os1
)
TEST_CASE
(
literal_os1
)
{
{
migraphx
::
literal
l
{
1
};
migraphx
::
literal
l
{
1
};
...
...
test/onnx/averagepool_dyn_asym_padding_error_test.onnx
0 → 100644
View file @
1504d5d5
File added
test/onnx/averagepool_dyn_autopad_error_test.onnx
0 → 100644
View file @
1504d5d5
File added
test/onnx/averagepool_dyn_cip_error_test.onnx
0 → 100644
View file @
1504d5d5
File added
test/onnx/averagepool_dyn_test.onnx
0 → 100644
View file @
1504d5d5
File added
test/onnx/gen_onnx.py
View file @
1504d5d5
...
@@ -237,6 +237,64 @@ def averagepool_3d_test():
...
@@ -237,6 +237,64 @@ def averagepool_3d_test():
return
([
node
],
[
x
],
[
out
])
return
([
node
],
[
x
],
[
out
])
@
onnx_test
def
averagepool_dyn_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
None
,
3
,
5
,
5
,
5
])
out
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
None
,
3
,
3
,
3
,
3
])
node
=
onnx
.
helper
.
make_node
(
'AveragePool'
,
inputs
=
[
'0'
],
outputs
=
[
'1'
],
kernel_shape
=
[
3
,
3
,
3
])
return
([
node
],
[
x
],
[
out
])
@
onnx_test
def
averagepool_dyn_autopad_error_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
None
,
1
,
5
,
5
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
None
,
1
,
5
,
5
])
node
=
onnx
.
helper
.
make_node
(
'AveragePool'
,
inputs
=
[
'x'
],
outputs
=
[
'y'
],
kernel_shape
=
[
2
,
2
],
auto_pad
=
'SAME_LOWER'
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
def
averagepool_dyn_asym_padding_error_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
None
,
1
,
5
,
5
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
None
,
1
,
3
,
3
])
node
=
onnx
.
helper
.
make_node
(
'AveragePool'
,
inputs
=
[
'x'
],
outputs
=
[
'y'
],
kernel_shape
=
[
2
,
2
],
strides
=
[
2
,
2
],
pads
=
[
0
,
0
,
1
,
1
])
return
([
node
],
[
x
],
[
y
])
@
onnx_test
def
averagepool_dyn_cip_error_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
None
,
1
,
5
,
5
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
None
,
1
,
1
,
1
])
node
=
onnx
.
helper
.
make_node
(
'AveragePool'
,
inputs
=
[
'x'
],
outputs
=
[
'y'
],
kernel_shape
=
[
2
,
2
],
count_include_pad
=
1
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
@
onnx_test
def
averagepool_notset_test
():
def
averagepool_notset_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
1
,
1
,
5
,
5
])
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
1
,
1
,
5
,
5
])
...
@@ -2069,6 +2127,21 @@ def globalavgpool_test():
...
@@ -2069,6 +2127,21 @@ def globalavgpool_test():
return
([
node
],
[
x
],
[
y
])
return
([
node
],
[
x
],
[
y
])
@
onnx_test
def
globalavgpool_dyn_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
None
,
3
,
16
,
16
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
None
,
3
,
1
,
1
])
node
=
onnx
.
helper
.
make_node
(
'GlobalAveragePool'
,
inputs
=
[
'0'
],
outputs
=
[
'1'
],
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
@
onnx_test
def
globallppool_test
():
def
globallppool_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
16
,
16
])
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
16
,
16
])
...
@@ -2083,6 +2156,21 @@ def globallppool_test():
...
@@ -2083,6 +2156,21 @@ def globallppool_test():
return
([
node
],
[
x
],
[
y
])
return
([
node
],
[
x
],
[
y
])
@
onnx_test
def
globallppool_dyn_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
None
,
None
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
1
,
1
])
node
=
onnx
.
helper
.
make_node
(
'GlobalLpPool'
,
inputs
=
[
'0'
],
outputs
=
[
'1'
],
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
@
onnx_test
def
globalmaxpool_test
():
def
globalmaxpool_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
16
,
16
])
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
16
,
16
])
...
@@ -2097,6 +2185,21 @@ def globalmaxpool_test():
...
@@ -2097,6 +2185,21 @@ def globalmaxpool_test():
return
([
node
],
[
x
],
[
y
])
return
([
node
],
[
x
],
[
y
])
@
onnx_test
def
globalmaxpool_dyn_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
None
,
3
,
32
,
32
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
None
,
3
,
1
,
1
])
node
=
onnx
.
helper
.
make_node
(
'GlobalMaxPool'
,
inputs
=
[
'0'
],
outputs
=
[
'1'
],
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
@
onnx_test
def
greater_test
():
def
greater_test
():
ax1
=
np
.
array
([
1.0
,
2.0
,
3.0
,
4.0
,
5.0
,
6.0
])
ax1
=
np
.
array
([
1.0
,
2.0
,
3.0
,
4.0
,
5.0
,
6.0
])
...
...
test/onnx/globalavgpool_dyn_test.onnx
0 → 100644
View file @
1504d5d5
File added
test/onnx/globallppool_dyn_test.onnx
0 → 100644
View file @
1504d5d5
File added
test/onnx/globalmaxpool_dyn_test.onnx
0 → 100644
View file @
1504d5d5
File added
test/onnx/onnx_test.cpp
View file @
1504d5d5
...
@@ -273,6 +273,51 @@ TEST_CASE(averagepool_3d_test)
...
@@ -273,6 +273,51 @@ TEST_CASE(averagepool_3d_test)
EXPECT
(
p
==
prog
);
EXPECT
(
p
==
prog
);
}
}
TEST_CASE
(
averagepool_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"0"
,
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
5
,
5
,
0
},
{
5
,
5
,
0
},
{
5
,
5
,
0
}}});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
average
},
{
"padding"
,
{
0
,
0
,
0
,
0
,
0
,
0
}},
{
"stride"
,
{
1
,
1
,
1
}},
{
"lengths"
,
{
3
,
3
,
3
}}}),
l0
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
auto
prog
=
migraphx
::
parse_onnx
(
"averagepool_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
averagepool_dyn_autopad_error_test
)
{
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
EXPECT
(
test
::
throws
(
[
&
]
{
migraphx
::
parse_onnx
(
"averagepool_dyn_autopad_error_test.onnx"
,
options
);
}));
}
TEST_CASE
(
averagepool_dyn_asym_padding_error_test
)
{
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
EXPECT
(
test
::
throws
(
[
&
]
{
migraphx
::
parse_onnx
(
"averagepool_dyn_asym_padding_error_test.onnx"
,
options
);
}));
}
TEST_CASE
(
averagepool_dyn_cip_error_test
)
{
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
EXPECT
(
test
::
throws
(
[
&
]
{
migraphx
::
parse_onnx
(
"averagepool_dyn_cip_error_test.onnx"
,
options
);
}));
}
TEST_CASE
(
averagepool_notset_test
)
TEST_CASE
(
averagepool_notset_test
)
{
{
migraphx
::
program
p
;
migraphx
::
program
p
;
...
@@ -2144,6 +2189,28 @@ TEST_CASE(globalavgpool_test)
...
@@ -2144,6 +2189,28 @@ TEST_CASE(globalavgpool_test)
EXPECT
(
p
==
prog
);
EXPECT
(
p
==
prog
);
}
}
TEST_CASE
(
globalavgpool_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
input
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
16
,
16
,
0
},
{
16
,
16
,
0
}}});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
average
},
{
"lengths"
,
{
16
,
16
}},
{
"padding"
,
{
0
,
0
,
0
,
0
}}}),
input
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
auto
prog
=
parse_onnx
(
"globalavgpool_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
globallppool_test
)
TEST_CASE
(
globallppool_test
)
{
{
migraphx
::
program
p
;
migraphx
::
program
p
;
...
@@ -2161,6 +2228,29 @@ TEST_CASE(globallppool_test)
...
@@ -2161,6 +2228,29 @@ TEST_CASE(globallppool_test)
EXPECT
(
p
==
prog
);
EXPECT
(
p
==
prog
);
}
}
TEST_CASE
(
globallppool_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
input
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
,
0
},
{
3
,
3
,
0
},
{
16
,
32
,
0
},
{
16
,
32
,
0
}}});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
lpnorm
},
{
"dyn_global"
,
true
},
{
"padding"
,
{
0
,
0
,
0
,
0
}},
{
"lengths"
,
{}}}),
input
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
16
,
32
,
0
};
auto
prog
=
migraphx
::
parse_onnx
(
"globallppool_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
globalmaxpool_test
)
TEST_CASE
(
globalmaxpool_test
)
{
{
migraphx
::
program
p
;
migraphx
::
program
p
;
...
@@ -2178,6 +2268,28 @@ TEST_CASE(globalmaxpool_test)
...
@@ -2178,6 +2268,28 @@ TEST_CASE(globalmaxpool_test)
EXPECT
(
p
==
prog
);
EXPECT
(
p
==
prog
);
}
}
TEST_CASE
(
globalmaxpool_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
input
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
32
,
32
,
0
},
{
32
,
32
,
0
}}});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{
"lengths"
,
{
32
,
32
}},
{
"padding"
,
{
0
,
0
,
0
,
0
}}}),
input
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
auto
prog
=
parse_onnx
(
"globalmaxpool_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
greater_test
)
TEST_CASE
(
greater_test
)
{
{
migraphx
::
program
p
;
migraphx
::
program
p
;
...
...
test/op_shape_test.cpp
View file @
1504d5d5
...
@@ -1549,16 +1549,76 @@ TEST_CASE(nms_shape)
...
@@ -1549,16 +1549,76 @@ TEST_CASE(nms_shape)
score_thres_s
);
score_thres_s
);
}
}
TEST_CASE
(
pooling_shape
)
TEST_CASE
(
pooling_shape
0
)
{
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}};
throws_shape
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{
"padding"
,
{
1
}},
{
"stride"
,
{
0
}},
{
"lengths"
,
{
1
}}}),
input
);
}
TEST_CASE
(
pooling_shape1
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
1
,
1
}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
1
,
1
}};
expect_shape
(
output
,
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{
"padding"
,
{
0
,
0
}},
{
"stride"
,
{
3
,
3
}},
{
"lengths"
,
{
1
,
1
}}}),
input
);
}
TEST_CASE
(
pooling_shape2
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}};
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
2
,
2
}};
expect_shape
(
output
,
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{
"padding"
,
{
0
,
0
}},
{
"stride"
,
{
3
,
3
}},
{
"lengths"
,
{
1
,
1
}},
{
"ceil_mode"
,
true
}}),
input
);
}
TEST_CASE
(
pooling_shape3
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}};
expect_shape
(
output
,
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{
"padding"
,
{
2
,
2
}},
{
"stride"
,
{
3
,
3
}},
{
"lengths"
,
{
3
,
3
}},
{
"ceil_mode"
,
true
}}),
input
);
}
TEST_CASE
(
pooling_dyn_shape0
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
3
},
{
3
,
3
,
3
},
{
3
,
3
,
0
}}};
throws_shape
(
migraphx
::
make_op
(
"pooling"
,
throws_shape
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{
"padding"
,
{
1
}},
{
"padding"
,
{
1
}},
{
"stride"
,
{
0
}},
{
"stride"
,
{
0
}},
{
"lengths"
,
{
1
}}}),
{
"lengths"
,
{
1
}}}),
input
);
input
);
}
TEST_CASE
(
pooling_dyn_shape1
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
3
},
{
3
,
3
,
3
},
{
3
,
3
,
0
}}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
3
},
{
1
,
1
,
1
},
{
1
,
1
,
0
}}};
expect_shape
(
output
,
expect_shape
(
output
,
migraphx
::
make_op
(
"pooling"
,
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
...
@@ -1566,9 +1626,15 @@ TEST_CASE(pooling_shape)
...
@@ -1566,9 +1626,15 @@ TEST_CASE(pooling_shape)
{
"stride"
,
{
3
,
3
}},
{
"stride"
,
{
3
,
3
}},
{
"lengths"
,
{
1
,
1
}}}),
{
"lengths"
,
{
1
,
1
}}}),
input
);
input
);
}
migraphx
::
shape
output1
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
2
,
2
}};
TEST_CASE
(
pooling_dyn_shape2
)
expect_shape
(
output1
,
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
5
,
5
,
0
},
{
3
,
3
,
3
},
{
3
,
3
,
0
}}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
5
,
5
,
0
},
{
2
,
2
,
2
},
{
2
,
2
,
0
}}};
expect_shape
(
output
,
migraphx
::
make_op
(
"pooling"
,
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{
"padding"
,
{
0
,
0
}},
{
"padding"
,
{
0
,
0
}},
...
@@ -1578,6 +1644,37 @@ TEST_CASE(pooling_shape)
...
@@ -1578,6 +1644,37 @@ TEST_CASE(pooling_shape)
input
);
input
);
}
}
TEST_CASE
(
pooling_dyn_shape3
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
4
,
4
,
0
},
{
3
,
3
,
0
},
{
4
,
12
,
8
},
{
4
,
12
,
8
}}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{{
4
,
4
,
0
},
{
3
,
3
,
0
},
{
2
,
4
,
3
},
{
2
,
4
,
3
}}};
expect_shape
(
output
,
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{
"padding"
,
{
0
,
0
}},
{
"stride"
,
{
3
,
3
}},
{
"lengths"
,
{
1
,
1
}}}),
input
);
}
TEST_CASE
(
pooling_dyn_shape4
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
4
,
4
,
0
},
{
3
,
3
,
0
},
{
4
,
12
,
8
},
{
4
,
12
,
8
}}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{{
4
,
4
,
0
},
{
3
,
3
,
0
},
{
3
,
6
,
4
},
{
3
,
6
,
4
}}};
expect_shape
(
output
,
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{
"padding"
,
{
2
,
2
}},
{
"stride"
,
{
3
,
3
}},
{
"lengths"
,
{
3
,
3
}},
{
"ceil_mode"
,
true
}}),
input
);
}
TEST_CASE
(
prefix_scan_sum
)
TEST_CASE
(
prefix_scan_sum
)
{
{
{
{
...
...
test/ref_ops_test.cpp
View file @
1504d5d5
This diff is collapsed.
Click to expand it.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment