Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
f17d6246
Commit
f17d6246
authored
Dec 03, 2022
by
Paul
Browse files
Merge branch 'develop' into ck-gemm-fused
parents
87673e1b
fdc3f00a
Changes
23
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
508 additions
and
159 deletions
+508
-159
examples/nlp/python_bert_squad/requirements_bertsquad.txt
examples/nlp/python_bert_squad/requirements_bertsquad.txt
+1
-1
src/include/migraphx/int_divide.hpp
src/include/migraphx/int_divide.hpp
+0
-48
src/include/migraphx/literal.hpp
src/include/migraphx/literal.hpp
+6
-15
src/include/migraphx/op/pooling.hpp
src/include/migraphx/op/pooling.hpp
+115
-36
src/include/migraphx/op/transpose.hpp
src/include/migraphx/op/transpose.hpp
+31
-15
src/include/migraphx/shape_for_each.hpp
src/include/migraphx/shape_for_each.hpp
+3
-1
src/insert_pad.cpp
src/insert_pad.cpp
+2
-2
src/onnx/parse_pooling.cpp
src/onnx/parse_pooling.cpp
+82
-38
src/onnx/parse_transpose.cpp
src/onnx/parse_transpose.cpp
+1
-1
src/targets/gpu/target.cpp
src/targets/gpu/target.cpp
+0
-2
test/literal_test.cpp
test/literal_test.cpp
+19
-0
test/onnx/averagepool_dyn_asym_padding_error_test.onnx
test/onnx/averagepool_dyn_asym_padding_error_test.onnx
+0
-0
test/onnx/averagepool_dyn_autopad_error_test.onnx
test/onnx/averagepool_dyn_autopad_error_test.onnx
+0
-0
test/onnx/averagepool_dyn_cip_error_test.onnx
test/onnx/averagepool_dyn_cip_error_test.onnx
+0
-0
test/onnx/averagepool_dyn_test.onnx
test/onnx/averagepool_dyn_test.onnx
+0
-0
test/onnx/gen_onnx.py
test/onnx/gen_onnx.py
+118
-0
test/onnx/globalavgpool_dyn_test.onnx
test/onnx/globalavgpool_dyn_test.onnx
+0
-0
test/onnx/globallppool_dyn_test.onnx
test/onnx/globallppool_dyn_test.onnx
+0
-0
test/onnx/globalmaxpool_dyn_test.onnx
test/onnx/globalmaxpool_dyn_test.onnx
+0
-0
test/onnx/onnx_test.cpp
test/onnx/onnx_test.cpp
+130
-0
No files found.
examples/nlp/python_bert_squad/requirements_bertsquad.txt
View file @
f17d6246
...
...
@@ -21,6 +21,6 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#####################################################################################
tensorflow==2.
7.2
tensorflow==2.
9.3
onnxruntime
tokenizers
\ No newline at end of file
src/include/migraphx/int_divide.hpp
deleted
100644 → 0
View file @
87673e1b
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_RTGLIB_INT_DIVIDE_HPP
#define MIGRAPHX_GUARD_RTGLIB_INT_DIVIDE_HPP
#include <migraphx/config.hpp>
#include <cmath>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
template
<
class
R
,
class
T
,
class
U
>
R
floor_divide
(
T
x
,
U
y
)
{
return
R
(
std
::
floor
(
double
(
x
)
/
double
(
y
)));
}
template
<
class
R
,
class
T
,
class
U
>
R
ceil_divide
(
T
x
,
U
y
)
{
return
R
(
std
::
ceil
(
double
(
x
)
/
double
(
y
)));
}
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
#endif
src/include/migraphx/literal.hpp
View file @
f17d6246
...
...
@@ -80,6 +80,7 @@ struct literal : raw_data<literal>
fill
(
start
,
end
);
}
// Directly copies buffer of x
template
<
class
T
,
MIGRAPHX_REQUIRES
(
sizeof
(
T
)
==
1
)>
literal
(
const
shape
&
s
,
T
*
x
)
:
buffer
(
make_shared_array
<
char
>
(
s
.
bytes
())),
m_shape
(
s
)
{
...
...
@@ -107,25 +108,15 @@ struct literal : raw_data<literal>
std
::
shared_ptr
<
char
>
buffer
;
shape
m_shape
;
// Keeps the same data ordering as the given container
template
<
class
Iterator
>
void
fill
(
Iterator
start
,
Iterator
end
)
{
assert
(
std
::
distance
(
start
,
end
)
==
m_shape
.
elements
());
if
(
m_shape
.
standard
())
{
m_shape
.
visit_type
([
&
](
auto
as
)
{
std
::
copy
(
start
,
end
,
as
.
from
(
buffer
.
get
()));
});
}
else
{
auto
it
=
start
;
m_shape
.
visit_type
([
&
](
auto
as
)
{
auto
output
=
make_view
(
m_shape
,
as
.
from
(
buffer
.
get
()));
shape_for_each
(
output
.
get_shape
(),
[
&
](
const
auto
&
idx
)
{
output
(
idx
.
begin
(),
idx
.
end
())
=
*
it
;
// NOLINT(bugprone-signed-char-misuse)
it
++
;
});
});
}
m_shape
.
visit_type
([
&
](
auto
as
)
{
auto
output
=
make_view
(
m_shape
,
as
.
from
(
buffer
.
get
()));
std
::
copy
(
start
,
end
,
output
.
begin
());
});
}
};
...
...
src/include/migraphx/op/pooling.hpp
View file @
f17d6246
...
...
@@ -31,7 +31,7 @@
#include <migraphx/argument.hpp>
#include <migraphx/par_for.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/
int_divide
.hpp>
#include <migraphx/
dyn_output
.hpp>
#include <cmath>
#include <utility>
...
...
@@ -49,6 +49,9 @@ struct pooling
bool
ceil_mode
=
false
;
int
lp_order
=
2
;
// Global pooling with dynamic shape input
bool
dyn_global
=
false
;
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
...
...
@@ -57,7 +60,8 @@ struct pooling
f
(
self
.
stride
,
"stride"
),
f
(
self
.
lengths
,
"lengths"
),
f
(
self
.
ceil_mode
,
"ceil_mode"
),
f
(
self
.
lp_order
,
"lp_order"
));
f
(
self
.
lp_order
,
"lp_order"
),
f
(
self
.
dyn_global
,
"dyn_global"
));
}
std
::
string
name
()
const
{
return
"pooling"
;
}
...
...
@@ -65,51 +69,111 @@ struct pooling
void
check_attribute_size
()
const
{
if
((
padding
.
size
()
!=
stride
.
size
()
and
(
padding
.
size
()
/
2
)
!=
stride
.
size
())
or
stride
.
size
()
!=
lengths
.
size
())
(
not
dyn_global
and
stride
.
size
()
!=
lengths
.
size
())
)
{
MIGRAPHX_THROW
(
"POOLING: inconsistent attribute sizes"
);
}
}
size_t
kdims
()
const
{
check_attribute_size
();
return
stride
.
size
();
}
value
attributes
()
const
{
return
{{
"normalize_padding"
,
"padding"
}};
}
std
::
vector
<
std
::
size_t
>
calc_spatial_dim_out
(
const
std
::
vector
<
std
::
size_t
>&
input_lens
,
std
::
size_t
kdims
)
const
{
std
::
vector
<
std
::
size_t
>
output_lens
{};
for
(
size_t
i
=
0
;
i
<
kdims
;
++
i
)
{
if
(
input_lens
[
i
+
2
]
==
0
)
{
// handle opt = 0
output_lens
.
push_back
(
0
);
}
else
{
std
::
size_t
padding_factor
=
2
*
padding
[
i
];
if
(
padding
.
size
()
==
2
*
kdims
)
padding_factor
=
padding
[
i
]
+
padding
[
i
+
kdims
];
assert
(
input_lens
[
i
+
2
]
+
padding_factor
>=
lengths
[
i
]);
std
::
size_t
dim_size
=
input_lens
[
i
+
2
]
+
padding_factor
-
lengths
[
i
];
std
::
size_t
len
=
(
ceil_mode
)
?
dim_size
/
stride
[
i
]
+
static_cast
<
std
::
size_t
>
((
dim_size
%
stride
[
i
]
!=
0
))
// ceil uint divide
:
dim_size
/
stride
[
i
];
// floor divide
output_lens
.
push_back
(
len
+
1
);
}
}
return
output_lens
;
}
shape
normalize_compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
{
check_shapes
{
inputs
,
*
this
}.
has
(
1
);
check_shapes
{
inputs
,
*
this
,
true
}.
has
(
1
);
check_attribute_size
();
const
shape
&
input
=
inputs
.
at
(
0
);
auto
input_lens
=
input
.
lens
();
size_t
kdims
=
input_lens
.
size
()
-
2
;
auto
input_size
=
inputs
[
0
].
lens
().
size
();
auto
padding_size
=
padding
.
size
();
if
(
input_size
!=
padding_size
/
2
+
2
and
input_size
!=
padding_size
+
2
)
auto
padding_size
=
padding
.
size
();
size_t
kdims
=
input
.
ndim
()
-
2
;
if
(
input
.
ndim
()
!=
padding_size
/
2
+
2
and
input
.
ndim
()
!=
padding_size
+
2
)
{
MIGRAPHX_THROW
(
"POOLING: input and attribute size mismatch!"
);
}
std
::
vector
<
std
::
size_t
>
output_lens
(
input_lens
.
begin
(),
input_lens
.
begin
()
+
2
);
for
(
size_t
i
=
0
;
i
<
kdims
;
i
++
)
if
(
input
.
dynamic
())
{
std
::
ptrdiff_t
dim_size
;
auto
padding_factor
=
2
*
padding
[
i
];
if
(
padding_size
==
2
*
kdims
)
padding_factor
=
padding
[
i
]
+
padding
[
i
+
kdims
];
dim_size
=
input_lens
[
i
+
2
]
+
padding_factor
-
lengths
[
i
];
assert
(
dim_size
>=
0
);
std
::
size_t
len
=
(
ceil_mode
)
?
ceil_divide
<
std
::
ptrdiff_t
>
(
dim_size
,
stride
[
i
])
:
floor_divide
<
std
::
ptrdiff_t
>
(
dim_size
,
stride
[
i
]);
output_lens
.
push_back
(
std
::
size_t
(
std
::
max
<
std
::
ptrdiff_t
>
(
1
,
len
+
1
)));
auto
input_dyn_dims
=
input
.
dyn_dims
();
std
::
vector
<
shape
::
dynamic_dimension
>
output_dyn_dims
(
input_dyn_dims
.
begin
(),
input_dyn_dims
.
begin
()
+
2
);
if
(
dyn_global
)
{
for
(
size_t
i
=
0
;
i
<
kdims
;
++
i
)
{
output_dyn_dims
.
push_back
(
shape
::
dynamic_dimension
{
1
,
1
,
1
});
}
return
{
input
.
type
(),
output_dyn_dims
};
}
else
{
auto
min_spatial_dims
=
calc_spatial_dim_out
(
input
.
min_lens
(),
kdims
);
auto
max_spatial_dims
=
calc_spatial_dim_out
(
input
.
max_lens
(),
kdims
);
auto
opt_spatial_dims
=
calc_spatial_dim_out
(
input
.
opt_lens
(),
kdims
);
for
(
size_t
i
=
0
;
i
<
kdims
;
++
i
)
{
output_dyn_dims
.
push_back
(
shape
::
dynamic_dimension
{
min_spatial_dims
[
i
],
max_spatial_dims
[
i
],
opt_spatial_dims
[
i
]});
}
return
{
input
.
type
(),
output_dyn_dims
};
}
}
return
inputs
[
0
].
with_lens
(
output_lens
);
}
else
{
auto
input_lens
=
input
.
lens
();
size_t
kdims
()
const
{
check_attribute_size
();
return
stride
.
size
();
std
::
vector
<
std
::
size_t
>
output_lens
(
input_lens
.
begin
(),
input_lens
.
begin
()
+
2
);
// Used for when normalize_compute_shape() is called again at model eval time
// for an originally dynamic shape. Since kernel shape is not used with dyn_global.
if
(
dyn_global
)
{
for
(
size_t
i
=
0
;
i
<
kdims
;
++
i
)
{
output_lens
.
push_back
(
1
);
}
return
{
input
.
type
(),
output_lens
};
}
else
{
auto
output_spatial_lens
=
calc_spatial_dim_out
(
input_lens
,
kdims
);
output_lens
.
insert
(
output_lens
.
end
(),
output_spatial_lens
.
begin
(),
output_spatial_lens
.
end
());
return
inputs
[
0
].
with_lens
(
output_lens
);
}
}
}
struct
lpnorm_pool
...
...
@@ -158,7 +222,11 @@ struct pooling
};
template
<
class
Type
,
class
Out
,
class
In
,
class
Op
>
void
calc_pooling
(
const
shape
&
output_shape
,
Out
&
output
,
const
In
&
input
,
Op
op
)
const
void
calc_pooling
(
const
shape
&
output_shape
,
Out
&
output
,
const
In
&
input
,
const
std
::
vector
<
std
::
size_t
>&
kernel_dims
,
Op
op
)
const
{
auto
in_s
=
input
.
get_shape
();
auto
in_lens
=
in_s
.
lens
();
...
...
@@ -172,7 +240,7 @@ struct pooling
auto
d_2
=
dim
-
2
;
int
start
=
static_cast
<
int
>
(
idx_o
[
dim
]
*
stride
[
d_2
])
-
static_cast
<
int
>
(
padding
[
d_2
]);
int
end
=
std
::
min
(
start
+
length
s
[
d_2
],
in_lens
[
dim
]);
int
end
=
std
::
min
(
start
+
kernel_dim
s
[
d_2
],
in_lens
[
dim
]);
start
=
std
::
max
(
start
,
0
);
win_start
.
push_back
(
start
);
win_size
.
push_back
(
end
-
start
);
...
...
@@ -198,21 +266,32 @@ struct pooling
});
}
argument
compute
(
const
shape
&
output_shape
,
std
::
vector
<
argument
>
args
)
const
argument
compute
(
const
dyn_output
&
dyn_out
,
std
::
vector
<
argument
>
args
)
const
{
argument
result
{
output_shape
};
argument
result
{
dyn_out
.
computed_shape
};
auto
input_lens
=
args
[
0
].
get_shape
().
lens
();
std
::
vector
<
std
::
size_t
>
kernel_dims
;
if
(
dyn_global
)
{
kernel_dims
.
insert
(
kernel_dims
.
end
(),
input_lens
.
begin
()
+
2
,
input_lens
.
end
());
}
else
{
kernel_dims
=
this
->
lengths
;
}
visit_all
(
result
,
args
[
0
])([
&
](
auto
output
,
auto
input
)
{
using
type
=
typename
decltype
(
output
)
::
value_type
;
switch
(
mode
)
{
case
migraphx
::
op
::
pooling_mode
::
average
:
calc_pooling
<
type
>
(
out
put_shape
,
output
,
input
,
avg_pool
{});
calc_pooling
<
type
>
(
dyn_out
.
com
put
ed
_shape
,
output
,
input
,
kernel_dims
,
avg_pool
{});
break
;
case
migraphx
::
op
::
pooling_mode
::
max
:
calc_pooling
<
type
>
(
out
put_shape
,
output
,
input
,
max_pool
{});
calc_pooling
<
type
>
(
dyn_out
.
com
put
ed
_shape
,
output
,
input
,
kernel_dims
,
max_pool
{});
break
;
case
migraphx
::
op
::
pooling_mode
::
lpnorm
:
calc_pooling
<
type
>
(
output_shape
,
output
,
input
,
lpnorm_pool
{
lp_order
});
calc_pooling
<
type
>
(
dyn_out
.
computed_shape
,
output
,
input
,
kernel_dims
,
lpnorm_pool
{
lp_order
});
break
;
}
});
...
...
src/include/migraphx/op/transpose.hpp
View file @
f17d6246
...
...
@@ -29,6 +29,7 @@
#include <migraphx/config.hpp>
#include <migraphx/value.hpp>
#include <migraphx/op/normalize_attribute.hpp>
#include <migraphx/dyn_output.hpp>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
...
...
@@ -45,17 +46,15 @@ struct transpose
}
std
::
string
name
()
const
{
return
"transpose"
;
}
shape
compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
{
check_shapes
{
inputs
,
*
this
}.
has
(
1
);
auto
input
=
inputs
.
at
(
0
);
auto
input_lens
=
input
.
lens
();
auto
input_strides
=
input
.
strides
();
auto
t
=
input
.
type
();
check_shapes
{
inputs
,
*
this
,
true
}.
has
(
1
);
auto
input
=
inputs
.
at
(
0
);
if
(
dims
.
size
()
!=
input
_lens
.
size
())
if
(
dims
.
size
()
!=
input
.
ndim
())
{
MIGRAPHX_THROW
(
"Permutation has wrong number of axes"
);
MIGRAPHX_THROW
(
"
TRANSPOSE:
Permutation has wrong number of axes"
);
}
std
::
vector
<
int64_t
>
axes
(
dims
.
size
());
std
::
iota
(
axes
.
begin
(),
axes
.
end
(),
0
);
...
...
@@ -63,19 +62,36 @@ struct transpose
{
MIGRAPHX_THROW
(
"TRANSPOSE: Invalid permutation"
);
}
std
::
vector
<
size_t
>
output_lens
(
input_lens
.
size
());
std
::
vector
<
size_t
>
output_strides
(
input_lens
.
size
());
for
(
std
::
size_t
i
=
0
;
i
<
output_lens
.
size
();
i
++
)
if
(
input
.
dynamic
())
{
output_lens
[
i
]
=
input_lens
[
dims
[
i
]];
output_strides
[
i
]
=
input_strides
[
dims
[
i
]];
std
::
vector
<
shape
::
dynamic_dimension
>
output_dyn_dims
(
input
.
ndim
());
std
::
transform
(
dims
.
cbegin
(),
dims
.
cend
(),
output_dyn_dims
.
begin
(),
[
&
](
auto
dim
)
{
return
input
.
dyn_dims
()[
dim
];
});
return
{
input
.
type
(),
output_dyn_dims
};
}
else
{
auto
input_lens
=
input
.
lens
();
auto
input_strides
=
input
.
strides
();
std
::
vector
<
size_t
>
output_lens
(
input
.
ndim
());
std
::
vector
<
size_t
>
output_strides
(
input
.
ndim
());
for
(
std
::
size_t
i
=
0
;
i
<
input
.
ndim
();
i
++
)
{
output_lens
[
i
]
=
input_lens
[
dims
[
i
]];
output_strides
[
i
]
=
input_strides
[
dims
[
i
]];
}
return
{
input
.
type
(),
output_lens
,
output_strides
};
}
return
{
t
,
output_lens
,
output_strides
};
}
argument
compute
(
shape
output_shape
,
std
::
vector
<
argument
>
args
)
const
argument
compute
(
const
dyn_output
&
dyn_out
,
std
::
vector
<
argument
>
args
)
const
{
return
args
[
0
].
reshape
(
out
put_shape
);
return
args
[
0
].
reshape
(
dyn_out
.
com
put
ed
_shape
);
}
std
::
ptrdiff_t
output_alias
(
const
std
::
vector
<
shape
>&
)
const
{
return
0
;
}
};
...
...
src/include/migraphx/shape_for_each.hpp
View file @
f17d6246
...
...
@@ -31,6 +31,9 @@
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
/**
* Iterates the given function over the indices from the shape in order.
*/
template
<
class
F
>
void
shape_for_each
(
const
migraphx
::
shape
&
s
,
F
f
)
{
...
...
@@ -51,7 +54,6 @@ void shape_for_each(const migraphx::shape& s, F f)
call
(
indices
);
}
}
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
...
...
src/insert_pad.cpp
View file @
f17d6246
...
...
@@ -77,14 +77,14 @@ static void update_pooling(const instruction_ref& input, const instruction_ref&
{
return
;
}
auto
kdims
=
input
->
get_shape
().
lens
().
size
()
-
2
;
auto
kdims
=
input
->
get_shape
().
ndim
()
-
2
;
if
(
std
::
equal
(
op
.
padding
.
begin
(),
op
.
padding
.
begin
()
+
kdims
,
op
.
padding
.
begin
()
+
kdims
,
op
.
padding
.
end
()))
return
;
std
::
vector
<
int64_t
>
padding
(
input
->
get_shape
().
lens
().
size
()
*
2
,
0
);
std
::
vector
<
int64_t
>
padding
(
input
->
get_shape
().
ndim
()
*
2
,
0
);
std
::
vector
<
size_t
>
pads_l
(
op
.
padding
.
begin
(),
op
.
padding
.
begin
()
+
kdims
);
std
::
vector
<
size_t
>
pads_r
(
op
.
padding
.
begin
()
+
kdims
,
op
.
padding
.
end
());
op
.
padding
=
std
::
vector
<
size_t
>
(
kdims
*
2
,
0
);
...
...
src/onnx/parse_pooling.cpp
View file @
f17d6246
...
...
@@ -47,52 +47,42 @@ struct parse_pooling : op_parser<parse_pooling>
{
"GlobalLpPool"
,
"lpnorm"
}};
}
instruction_ref
parse
(
const
op_desc
&
opd
,
const
onnx_parser
&
/*parser*/
,
onnx_parser
::
node_info
info
,
std
::
vector
<
instruction_ref
>
arg
s
)
const
value
handle_values
(
const
op_desc
&
opd
,
onnx_parser
::
node_info
info
,
const
shape
&
in_shape
,
value
value
s
)
const
{
const
std
::
unordered_map
<
std
::
string
,
op
::
pooling_mode
>
mode_map
=
{
{
"max"
,
op
::
pooling_mode
::
max
},
{
"average"
,
op
::
pooling_mode
::
average
},
{
"lpnorm"
,
op
::
pooling_mode
::
lpnorm
}};
std
::
string
mode
=
opd
.
op_name
;
if
(
not
contains
(
mode_map
,
mode
))
{
MIGRAPHX_THROW
(
"onnx pooling mode must be [
\"
max
\"
,
\"
average
\"
,
\"
lpnorm
\"
]"
);
}
operation
op
=
make_op
(
"pooling"
,
{{
"mode"
,
mode_map
.
at
(
mode
)}});
value
values
=
op
.
to_value
();
auto
l0
=
args
[
0
];
auto
in_lens
=
l0
->
get_shape
().
lens
();
assert
(
in_lens
.
size
()
>
2
);
auto
kdims
=
in_lens
.
size
()
-
2
;
auto
kdims
=
in_shape
.
ndim
()
-
2
;
if
(
starts_with
(
opd
.
onnx_name
,
"Global"
))
{
values
[
"lengths"
]
=
std
::
vector
<
size_t
>
(
in_lens
.
begin
()
+
2
,
in_lens
.
end
());
// if spatial dimensions are dynamic use dyn_global flag
if
(
in_shape
.
dynamic
()
and
std
::
any_of
(
in_shape
.
dyn_dims
().
cbegin
()
+
2
,
in_shape
.
dyn_dims
().
cend
(),
[](
auto
dd
)
{
return
not
dd
.
is_fixed
();
}))
{
values
[
"dyn_global"
]
=
true
;
values
[
"lengths"
]
=
std
::
vector
<
size_t
>
();
}
else
{
// works with static and fixed dynamic shape
auto
m_lens
=
in_shape
.
max_lens
();
values
[
"lengths"
]
=
std
::
vector
<
size_t
>
(
m_lens
.
begin
()
+
2
,
m_lens
.
end
());
}
}
// does not support ceil_mode
if
(
contains
(
info
.
attributes
,
"ceil_mode"
))
{
values
[
"ceil_mode"
]
=
static_cast
<
bool
>
(
info
.
attributes
.
at
(
"ceil_mode"
).
i
());
}
// count include padding, if count include pad is 1, we always use
// explicit pad
int
count_include_pad
=
0
;
if
(
contains
(
info
.
attributes
,
"count_include_pad"
))
{
count_include_pad
=
info
.
attributes
.
at
(
"count_include_pad"
).
i
();
}
if
(
contains
(
info
.
attributes
,
"strides"
))
{
values
[
"stride"
].
clear
();
copy
(
info
.
attributes
[
"strides"
].
ints
(),
std
::
back_inserter
(
values
[
"stride"
]));
check_attr_sizes
(
kdims
,
values
[
"stride"
].
size
(),
"PARSE_POOLING: inconsistent strides"
);
}
if
(
contains
(
info
.
attributes
,
"kernel_shape"
))
{
values
[
"lengths"
].
clear
();
...
...
@@ -110,6 +100,46 @@ struct parse_pooling : op_parser<parse_pooling>
// ensure pads availabe only when auto_pad is "NOT_SET"
check_padding_mode
(
info
,
"POOLING"
);
return
values
;
}
instruction_ref
parse
(
const
op_desc
&
opd
,
const
onnx_parser
&
/*parser*/
,
onnx_parser
::
node_info
info
,
std
::
vector
<
instruction_ref
>
args
)
const
{
std
::
string
mode
=
opd
.
op_name
;
const
std
::
unordered_map
<
std
::
string
,
op
::
pooling_mode
>
mode_map
=
{
{
"max"
,
op
::
pooling_mode
::
max
},
{
"average"
,
op
::
pooling_mode
::
average
},
{
"lpnorm"
,
op
::
pooling_mode
::
lpnorm
}};
if
(
not
contains
(
mode_map
,
mode
))
{
MIGRAPHX_THROW
(
"PARSE_POOLING: onnx pooling mode must be [
\"
max
\"
,
\"
average
\"
,
\"
lpnorm
\"
]"
);
}
operation
op
=
make_op
(
"pooling"
,
{{
"mode"
,
mode_map
.
at
(
mode
)}});
value
values
=
op
.
to_value
();
auto
l0
=
args
[
0
];
auto
in_shape
=
l0
->
get_shape
();
assert
(
in_shape
.
ndim
()
>
2
);
auto
kdims
=
in_shape
.
ndim
()
-
2
;
values
=
handle_values
(
opd
,
info
,
in_shape
,
values
);
// count include padding, if count include pad is 1, we always use
// explicit pad
int
count_include_pad
=
0
;
if
(
contains
(
info
.
attributes
,
"count_include_pad"
))
{
if
(
in_shape
.
dynamic
())
{
MIGRAPHX_THROW
(
"PARSE_POOLING: count_include_pad attribute is not supported for "
"dynamic input shape"
);
}
count_include_pad
=
info
.
attributes
.
at
(
"count_include_pad"
).
i
();
}
std
::
vector
<
int64_t
>
paddings
;
float
pad_val
=
((
mode
==
"max"
)
?
std
::
numeric_limits
<
float
>::
lowest
()
:
0.0
f
);
...
...
@@ -123,14 +153,22 @@ struct parse_pooling : op_parser<parse_pooling>
if
(
contains
(
info
.
attributes
,
"auto_pad"
))
{
values
[
"padding"
].
clear
();
// return paddings could be empty, then setting to 0 for no padding
cal_auto_padding_size
(
info
,
values
,
values
[
"lengths"
].
to_vector
<
std
::
size_t
>
(),
{
1
,
1
},
in_lens
,
paddings
);
if
(
in_shape
.
dynamic
())
{
MIGRAPHX_THROW
(
"PARSE_POOLING: Auto padding pooling with dynamic input shape not supported"
);
}
else
{
values
[
"padding"
].
clear
();
// return paddings could be empty, then setting to 0 for no padding
cal_auto_padding_size
(
info
,
values
,
values
[
"lengths"
].
to_vector
<
std
::
size_t
>
(),
{
1
,
1
},
in_shape
.
lens
(),
paddings
);
}
}
if
(
paddings
.
size
()
!=
2
*
kdims
)
...
...
@@ -150,6 +188,7 @@ struct parse_pooling : op_parser<parse_pooling>
values
[
"stride"
].
resize
(
kdims
);
std
::
fill_n
(
values
[
"stride"
].
begin
(),
kdims
,
1
);
}
// used to calculate the supposed output shape
std
::
vector
<
int64_t
>
orig_padding
=
paddings
;
...
...
@@ -159,6 +198,11 @@ struct parse_pooling : op_parser<parse_pooling>
if
(
not
slice_start
.
empty
())
{
if
(
in_shape
.
dynamic
())
{
MIGRAPHX_THROW
(
"PARSE_POOLING: asymmetric padding not supported for dynamic input shape"
);
}
// calculate expected output shape
orig_padding
.
insert
(
orig_padding
.
begin
()
+
kdims
,
2
,
0
);
orig_padding
.
insert
(
orig_padding
.
begin
(),
2
,
0
);
...
...
src/onnx/parse_transpose.cpp
View file @
f17d6246
...
...
@@ -47,7 +47,7 @@ struct parse_transpose : op_parser<parse_transpose>
}
// if perm is empty, use the default value
auto
n_dim
=
args
.
front
()
->
get_shape
().
lens
().
size
();
auto
n_dim
=
args
.
front
()
->
get_shape
().
ndim
();
if
(
perm
.
empty
())
{
perm
.
resize
(
n_dim
);
...
...
src/targets/gpu/target.cpp
View file @
f17d6246
...
...
@@ -149,8 +149,6 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti
dead_code_elimination
{},
pack_int8_args
{},
dead_code_elimination
{},
adjust_allocation
{
gpu_allocation_model
{}},
dead_code_elimination
{},
fuse_ops
{
&
ctx
,
options
.
fast_math
},
dead_code_elimination
{},
replace_allocate
{
gpu_allocation_model
{},
options
.
offload_copy
},
...
...
test/literal_test.cpp
View file @
f17d6246
...
...
@@ -49,6 +49,25 @@ TEST_CASE(literal_test)
EXPECT
(
l4
.
empty
());
}
TEST_CASE
(
literal_nstd_shape_vector
)
{
migraphx
::
shape
nstd_shape
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
2
,
2
},
{
12
,
1
,
6
,
3
}};
std
::
vector
<
float
>
data
(
12
);
std
::
iota
(
data
.
begin
(),
data
.
end
(),
0
);
auto
l0
=
migraphx
::
literal
{
nstd_shape
,
data
};
// check data buffer is read in correctly
std
::
vector
<
float
>
expected_buffer
=
{
0
,
4
,
8
,
1
,
5
,
9
,
2
,
6
,
10
,
3
,
7
,
11
};
const
auto
*
start
=
reinterpret_cast
<
const
float
*>
(
l0
.
data
());
std
::
vector
<
float
>
l0_data
{
start
,
start
+
12
};
EXPECT
(
l0_data
==
expected_buffer
);
// check that using visit() (that uses a tensor view) gives data in correct order
std
::
vector
<
float
>
results_vector
(
12
);
l0
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
results_vector
==
data
);
}
TEST_CASE
(
literal_os1
)
{
migraphx
::
literal
l
{
1
};
...
...
test/onnx/averagepool_dyn_asym_padding_error_test.onnx
0 → 100644
View file @
f17d6246
File added
test/onnx/averagepool_dyn_autopad_error_test.onnx
0 → 100644
View file @
f17d6246
File added
test/onnx/averagepool_dyn_cip_error_test.onnx
0 → 100644
View file @
f17d6246
File added
test/onnx/averagepool_dyn_test.onnx
0 → 100644
View file @
f17d6246
File added
test/onnx/gen_onnx.py
View file @
f17d6246
...
...
@@ -237,6 +237,64 @@ def averagepool_3d_test():
return
([
node
],
[
x
],
[
out
])
@
onnx_test
def
averagepool_dyn_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
None
,
3
,
5
,
5
,
5
])
out
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
None
,
3
,
3
,
3
,
3
])
node
=
onnx
.
helper
.
make_node
(
'AveragePool'
,
inputs
=
[
'0'
],
outputs
=
[
'1'
],
kernel_shape
=
[
3
,
3
,
3
])
return
([
node
],
[
x
],
[
out
])
@
onnx_test
def
averagepool_dyn_autopad_error_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
None
,
1
,
5
,
5
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
None
,
1
,
5
,
5
])
node
=
onnx
.
helper
.
make_node
(
'AveragePool'
,
inputs
=
[
'x'
],
outputs
=
[
'y'
],
kernel_shape
=
[
2
,
2
],
auto_pad
=
'SAME_LOWER'
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
def
averagepool_dyn_asym_padding_error_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
None
,
1
,
5
,
5
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
None
,
1
,
3
,
3
])
node
=
onnx
.
helper
.
make_node
(
'AveragePool'
,
inputs
=
[
'x'
],
outputs
=
[
'y'
],
kernel_shape
=
[
2
,
2
],
strides
=
[
2
,
2
],
pads
=
[
0
,
0
,
1
,
1
])
return
([
node
],
[
x
],
[
y
])
@
onnx_test
def
averagepool_dyn_cip_error_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
None
,
1
,
5
,
5
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
None
,
1
,
1
,
1
])
node
=
onnx
.
helper
.
make_node
(
'AveragePool'
,
inputs
=
[
'x'
],
outputs
=
[
'y'
],
kernel_shape
=
[
2
,
2
],
count_include_pad
=
1
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
def
averagepool_notset_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
1
,
1
,
5
,
5
])
...
...
@@ -2069,6 +2127,21 @@ def globalavgpool_test():
return
([
node
],
[
x
],
[
y
])
@
onnx_test
def
globalavgpool_dyn_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
None
,
3
,
16
,
16
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
None
,
3
,
1
,
1
])
node
=
onnx
.
helper
.
make_node
(
'GlobalAveragePool'
,
inputs
=
[
'0'
],
outputs
=
[
'1'
],
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
def
globallppool_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
16
,
16
])
...
...
@@ -2083,6 +2156,21 @@ def globallppool_test():
return
([
node
],
[
x
],
[
y
])
@
onnx_test
def
globallppool_dyn_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
None
,
None
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
1
,
1
])
node
=
onnx
.
helper
.
make_node
(
'GlobalLpPool'
,
inputs
=
[
'0'
],
outputs
=
[
'1'
],
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
def
globalmaxpool_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
16
,
16
])
...
...
@@ -2097,6 +2185,21 @@ def globalmaxpool_test():
return
([
node
],
[
x
],
[
y
])
@
onnx_test
def
globalmaxpool_dyn_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
None
,
3
,
32
,
32
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
None
,
3
,
1
,
1
])
node
=
onnx
.
helper
.
make_node
(
'GlobalMaxPool'
,
inputs
=
[
'0'
],
outputs
=
[
'1'
],
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
def
greater_test
():
ax1
=
np
.
array
([
1.0
,
2.0
,
3.0
,
4.0
,
5.0
,
6.0
])
...
...
@@ -6277,6 +6380,21 @@ def transpose_test():
return
([
node
],
[
x
],
[
y
])
@
onnx_test
def
transpose_dyn_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
None
,
2
,
2
,
3
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
None
,
3
,
2
,
2
])
node
=
onnx
.
helper
.
make_node
(
'Transpose'
,
perm
=
[
0
,
3
,
1
,
2
],
inputs
=
[
'0'
],
outputs
=
[
'1'
],
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
def
transpose_gather_test
():
x
=
helper
.
make_tensor_value_info
(
'data'
,
TensorProto
.
FLOAT
,
[
3
,
5
,
4
,
6
])
...
...
test/onnx/globalavgpool_dyn_test.onnx
0 → 100644
View file @
f17d6246
File added
test/onnx/globallppool_dyn_test.onnx
0 → 100644
View file @
f17d6246
File added
test/onnx/globalmaxpool_dyn_test.onnx
0 → 100644
View file @
f17d6246
File added
test/onnx/onnx_test.cpp
View file @
f17d6246
...
...
@@ -273,6 +273,51 @@ TEST_CASE(averagepool_3d_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
averagepool_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"0"
,
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
5
,
5
,
0
},
{
5
,
5
,
0
},
{
5
,
5
,
0
}}});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
average
},
{
"padding"
,
{
0
,
0
,
0
,
0
,
0
,
0
}},
{
"stride"
,
{
1
,
1
,
1
}},
{
"lengths"
,
{
3
,
3
,
3
}}}),
l0
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
auto
prog
=
migraphx
::
parse_onnx
(
"averagepool_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
averagepool_dyn_autopad_error_test
)
{
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
EXPECT
(
test
::
throws
(
[
&
]
{
migraphx
::
parse_onnx
(
"averagepool_dyn_autopad_error_test.onnx"
,
options
);
}));
}
TEST_CASE
(
averagepool_dyn_asym_padding_error_test
)
{
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
EXPECT
(
test
::
throws
(
[
&
]
{
migraphx
::
parse_onnx
(
"averagepool_dyn_asym_padding_error_test.onnx"
,
options
);
}));
}
TEST_CASE
(
averagepool_dyn_cip_error_test
)
{
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
EXPECT
(
test
::
throws
(
[
&
]
{
migraphx
::
parse_onnx
(
"averagepool_dyn_cip_error_test.onnx"
,
options
);
}));
}
TEST_CASE
(
averagepool_notset_test
)
{
migraphx
::
program
p
;
...
...
@@ -2144,6 +2189,28 @@ TEST_CASE(globalavgpool_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
globalavgpool_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
input
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
16
,
16
,
0
},
{
16
,
16
,
0
}}});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
average
},
{
"lengths"
,
{
16
,
16
}},
{
"padding"
,
{
0
,
0
,
0
,
0
}}}),
input
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
auto
prog
=
parse_onnx
(
"globalavgpool_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
globallppool_test
)
{
migraphx
::
program
p
;
...
...
@@ -2161,6 +2228,29 @@ TEST_CASE(globallppool_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
globallppool_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
input
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
,
0
},
{
3
,
3
,
0
},
{
16
,
32
,
0
},
{
16
,
32
,
0
}}});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
lpnorm
},
{
"dyn_global"
,
true
},
{
"padding"
,
{
0
,
0
,
0
,
0
}},
{
"lengths"
,
{}}}),
input
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
16
,
32
,
0
};
auto
prog
=
migraphx
::
parse_onnx
(
"globallppool_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
globalmaxpool_test
)
{
migraphx
::
program
p
;
...
...
@@ -2178,6 +2268,28 @@ TEST_CASE(globalmaxpool_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
globalmaxpool_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
input
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
3
,
3
,
0
},
{
32
,
32
,
0
},
{
32
,
32
,
0
}}});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"pooling"
,
{{
"mode"
,
migraphx
::
op
::
pooling_mode
::
max
},
{
"lengths"
,
{
32
,
32
}},
{
"padding"
,
{
0
,
0
,
0
,
0
}}}),
input
);
mm
->
add_return
({
ret
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
auto
prog
=
parse_onnx
(
"globalmaxpool_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
greater_test
)
{
migraphx
::
program
p
;
...
...
@@ -5973,6 +6085,24 @@ TEST_CASE(transpose_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
transpose_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
input
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
2
,
2
,
0
},
{
2
,
2
,
0
},
{
3
,
3
,
0
}}});
std
::
vector
<
int64_t
>
perm
{
0
,
3
,
1
,
2
};
auto
t0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
input
);
mm
->
add_return
({
t0
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
auto
prog
=
migraphx
::
parse_onnx
(
"transpose_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
topk_attrk_test
)
{
migraphx
::
program
p
;
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment