Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
edc23800
"sgl-router/src/vscode:/vscode.git/clone" did not exist on "5ea96ac7ccd182d08bfeb161776867bd270f6793"
Commit
edc23800
authored
Feb 11, 2022
by
Shucai Xiao
Browse files
change the data type for lens and strides from size_t to int in the shape class
parent
c7419a9c
Changes
63
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
150 additions
and
150 deletions
+150
-150
src/include/migraphx/op/pooling.hpp
src/include/migraphx/op/pooling.hpp
+6
-6
src/include/migraphx/op/quant_convolution.hpp
src/include/migraphx/op/quant_convolution.hpp
+8
-8
src/include/migraphx/op/reduce_op.hpp
src/include/migraphx/op/reduce_op.hpp
+3
-3
src/include/migraphx/op/rnn.hpp
src/include/migraphx/op/rnn.hpp
+3
-3
src/include/migraphx/op/roialign.hpp
src/include/migraphx/op/roialign.hpp
+17
-17
src/include/migraphx/op/slice.hpp
src/include/migraphx/op/slice.hpp
+8
-8
src/include/migraphx/par_for.hpp
src/include/migraphx/par_for.hpp
+14
-14
src/include/migraphx/rewrite_rnn.hpp
src/include/migraphx/rewrite_rnn.hpp
+1
-1
src/include/migraphx/shape.hpp
src/include/migraphx/shape.hpp
+24
-24
src/include/migraphx/shape_for_each.hpp
src/include/migraphx/shape_for_each.hpp
+4
-4
src/include/migraphx/tensor_view.hpp
src/include/migraphx/tensor_view.hpp
+14
-14
src/insert_pad.cpp
src/insert_pad.cpp
+7
-7
src/normalize_attributes.cpp
src/normalize_attributes.cpp
+2
-2
src/onnx/include/migraphx/onnx/onnx_parser.hpp
src/onnx/include/migraphx/onnx/onnx_parser.hpp
+5
-5
src/onnx/include/migraphx/onnx/padding.hpp
src/onnx/include/migraphx/onnx/padding.hpp
+3
-3
src/onnx/onnx.cpp
src/onnx/onnx.cpp
+1
-1
src/onnx/onnx_parser.cpp
src/onnx/onnx_parser.cpp
+13
-13
src/onnx/padding.cpp
src/onnx/padding.cpp
+13
-13
src/onnx/parse_convolution.cpp
src/onnx/parse_convolution.cpp
+3
-3
src/onnx/parse_expand.cpp
src/onnx/parse_expand.cpp
+1
-1
No files found.
src/include/migraphx/op/pooling.hpp
View file @
edc23800
...
@@ -22,9 +22,9 @@ namespace op {
...
@@ -22,9 +22,9 @@ namespace op {
struct
pooling
struct
pooling
{
{
std
::
string
mode
=
"average"
;
std
::
string
mode
=
"average"
;
std
::
vector
<
std
::
size_
t
>
padding
=
{
0
,
0
};
std
::
vector
<
in
t
>
padding
=
{
0
,
0
};
std
::
vector
<
std
::
size_
t
>
stride
=
{
1
,
1
};
std
::
vector
<
in
t
>
stride
=
{
1
,
1
};
std
::
vector
<
std
::
size_
t
>
lengths
=
{
1
,
1
};
std
::
vector
<
in
t
>
lengths
=
{
1
,
1
};
bool
ceil_mode
=
false
;
bool
ceil_mode
=
false
;
template
<
class
Self
,
class
F
>
template
<
class
Self
,
class
F
>
...
@@ -65,7 +65,7 @@ struct pooling
...
@@ -65,7 +65,7 @@ struct pooling
MIGRAPHX_THROW
(
"POOLING: input and attribute size mismatch!"
);
MIGRAPHX_THROW
(
"POOLING: input and attribute size mismatch!"
);
}
}
std
::
vector
<
std
::
size_
t
>
output_lens
(
input_lens
.
begin
(),
input_lens
.
begin
()
+
2
);
std
::
vector
<
in
t
>
output_lens
(
input_lens
.
begin
(),
input_lens
.
begin
()
+
2
);
for
(
size_t
i
=
0
;
i
<
kdims
;
i
++
)
for
(
size_t
i
=
0
;
i
<
kdims
;
i
++
)
{
{
...
@@ -75,10 +75,10 @@ struct pooling
...
@@ -75,10 +75,10 @@ struct pooling
padding_factor
=
padding
[
i
]
+
padding
[
i
+
kdims
];
padding_factor
=
padding
[
i
]
+
padding
[
i
+
kdims
];
dim_size
=
input_lens
[
i
+
2
]
+
padding_factor
-
lengths
[
i
];
dim_size
=
input_lens
[
i
+
2
]
+
padding_factor
-
lengths
[
i
];
assert
(
dim_size
>=
0
);
assert
(
dim_size
>=
0
);
std
::
size_
t
len
=
(
ceil_mode
)
?
ceil_divide
<
std
::
ptrdiff_t
>
(
dim_size
,
stride
[
i
])
in
t
len
=
(
ceil_mode
)
?
ceil_divide
<
std
::
ptrdiff_t
>
(
dim_size
,
stride
[
i
])
:
floor_divide
<
std
::
ptrdiff_t
>
(
dim_size
,
stride
[
i
]);
:
floor_divide
<
std
::
ptrdiff_t
>
(
dim_size
,
stride
[
i
]);
output_lens
.
push_back
(
std
::
size_
t
(
std
::
max
<
std
::
ptrdiff_t
>
(
1
,
len
+
1
)));
output_lens
.
push_back
(
in
t
(
std
::
max
<
std
::
ptrdiff_t
>
(
1
,
len
+
1
)));
}
}
return
inputs
[
0
].
with_lens
(
output_lens
);
return
inputs
[
0
].
with_lens
(
output_lens
);
}
}
...
...
src/include/migraphx/op/quant_convolution.hpp
View file @
edc23800
...
@@ -19,9 +19,9 @@ namespace op {
...
@@ -19,9 +19,9 @@ namespace op {
struct
quant_convolution
struct
quant_convolution
{
{
std
::
vector
<
std
::
size_
t
>
padding
=
{
0
,
0
};
std
::
vector
<
in
t
>
padding
=
{
0
,
0
};
std
::
vector
<
std
::
size_
t
>
stride
=
{
1
,
1
};
std
::
vector
<
in
t
>
stride
=
{
1
,
1
};
std
::
vector
<
std
::
size_
t
>
dilation
=
{
1
,
1
};
std
::
vector
<
in
t
>
dilation
=
{
1
,
1
};
padding_mode_t
padding_mode
=
default_
;
padding_mode_t
padding_mode
=
default_
;
int
group
=
1
;
int
group
=
1
;
...
@@ -60,7 +60,7 @@ struct quant_convolution
...
@@ -60,7 +60,7 @@ struct quant_convolution
const
shape
&
input
=
inputs
.
at
(
0
);
const
shape
&
input
=
inputs
.
at
(
0
);
const
shape
&
weights
=
inputs
.
at
(
1
);
const
shape
&
weights
=
inputs
.
at
(
1
);
auto
t
=
input
.
type
();
auto
t
=
input
.
type
();
size_
t
kdims
=
input
.
lens
().
size
()
-
2
;
in
t
kdims
=
input
.
lens
().
size
()
-
2
;
if
(
kdims
!=
this
->
kdims
())
if
(
kdims
!=
this
->
kdims
())
{
{
MIGRAPHX_THROW
(
"quant_convolution: input k-dims does not match attribute size"
);
MIGRAPHX_THROW
(
"quant_convolution: input k-dims does not match attribute size"
);
...
@@ -73,14 +73,14 @@ struct quant_convolution
...
@@ -73,14 +73,14 @@ struct quant_convolution
}
}
t
=
shape
::
int32_type
;
t
=
shape
::
int32_type
;
std
::
vector
<
size_
t
>
output_lens
{
input
.
lens
()[
0
],
weights
.
lens
()[
0
]};
std
::
vector
<
in
t
>
output_lens
{
input
.
lens
()[
0
],
weights
.
lens
()[
0
]};
auto
padding_size
=
padding
.
size
();
auto
padding_size
=
padding
.
size
();
for
(
size_
t
i
=
0
;
i
<
kdims
;
i
++
)
for
(
in
t
i
=
0
;
i
<
kdims
;
i
++
)
{
{
auto
padding_factor
=
2
*
padding
[
i
];
auto
padding_factor
=
2
*
padding
[
i
];
if
(
padding_size
==
2
*
kdims
)
if
(
padding_size
==
2
*
kdims
)
padding_factor
=
padding
[
i
]
+
padding
[
i
+
kdims
];
padding_factor
=
padding
[
i
]
+
padding
[
i
+
kdims
];
output_lens
.
push_back
(
std
::
size_
t
(
std
::
max
<
std
::
ptrdiff_t
>
(
output_lens
.
push_back
(
in
t
(
std
::
max
<
std
::
ptrdiff_t
>
(
1
,
1
,
(
input
.
lens
()[
i
+
2
]
-
(
1
+
dilation
[
i
]
*
(
weights
.
lens
()[
i
+
2
]
-
1
))
+
(
input
.
lens
()[
i
+
2
]
-
(
1
+
dilation
[
i
]
*
(
weights
.
lens
()[
i
+
2
]
-
1
))
+
padding_factor
)
/
padding_factor
)
/
...
@@ -91,7 +91,7 @@ struct quant_convolution
...
@@ -91,7 +91,7 @@ struct quant_convolution
return
inputs
[
0
].
with_lens
(
t
,
output_lens
);
return
inputs
[
0
].
with_lens
(
t
,
output_lens
);
}
}
size_
t
kdims
()
const
in
t
kdims
()
const
{
{
check_attribute_size
();
check_attribute_size
();
return
stride
.
size
();
return
stride
.
size
();
...
...
src/include/migraphx/op/reduce_op.hpp
View file @
edc23800
...
@@ -69,7 +69,7 @@ struct reduce_op : op_name<Derived>
...
@@ -69,7 +69,7 @@ struct reduce_op : op_name<Derived>
return
{{
"normalize_axes"
,
normalize
}};
return
{{
"normalize_axes"
,
normalize
}};
}
}
std
::
vector
<
int64_t
>
tune_axes
(
std
::
size_
t
n_dim
)
const
std
::
vector
<
int64_t
>
tune_axes
(
in
t
n_dim
)
const
{
{
auto
tuned_axes
=
axes
;
auto
tuned_axes
=
axes
;
if
(
tuned_axes
.
empty
())
if
(
tuned_axes
.
empty
())
...
@@ -110,7 +110,7 @@ struct reduce_op : op_name<Derived>
...
@@ -110,7 +110,7 @@ struct reduce_op : op_name<Derived>
void
reduce
(
tensor_view
<
T
>&
input
,
void
reduce
(
tensor_view
<
T
>&
input
,
shape
&
batch_shape
,
shape
&
batch_shape
,
std
::
vector
<
int64_t
>&
tuned_axes
,
std
::
vector
<
int64_t
>&
tuned_axes
,
std
::
vector
<
std
::
size_
t
>&
out_idx
,
std
::
vector
<
in
t
>&
out_idx
,
tensor_view
<
T
>&
output
)
const
tensor_view
<
T
>&
output
)
const
{
{
using
accumulator
=
accumulator_type
<
T
>
;
using
accumulator
=
accumulator_type
<
T
>
;
...
@@ -132,7 +132,7 @@ struct reduce_op : op_name<Derived>
...
@@ -132,7 +132,7 @@ struct reduce_op : op_name<Derived>
argument
result
{
output_shape
};
argument
result
{
output_shape
};
auto
arg_lens
=
args
.
front
().
get_shape
().
lens
();
auto
arg_lens
=
args
.
front
().
get_shape
().
lens
();
auto
tuned_axes
=
tune_axes
(
arg_lens
.
size
());
auto
tuned_axes
=
tune_axes
(
arg_lens
.
size
());
std
::
vector
<
std
::
size_
t
>
batch_lens
(
output_shape
.
lens
().
size
(),
1
);
std
::
vector
<
in
t
>
batch_lens
(
output_shape
.
lens
().
size
(),
1
);
tune_dims
(
tuned_axes
,
arg_lens
,
batch_lens
);
tune_dims
(
tuned_axes
,
arg_lens
,
batch_lens
);
shape
batch_shape
{
output_shape
.
type
(),
batch_lens
};
shape
batch_shape
{
output_shape
.
type
(),
batch_lens
};
visit_all
(
result
,
args
[
0
])([
&
](
auto
output
,
auto
input
)
{
visit_all
(
result
,
args
[
0
])([
&
](
auto
output
,
auto
input
)
{
...
...
src/include/migraphx/op/rnn.hpp
View file @
edc23800
...
@@ -20,7 +20,7 @@ namespace op {
...
@@ -20,7 +20,7 @@ namespace op {
struct
rnn
struct
rnn
{
{
std
::
size_
t
hidden_size
=
1
;
in
t
hidden_size
=
1
;
std
::
vector
<
operation
>
actv_funcs
{
tanh
{},
tanh
{}};
std
::
vector
<
operation
>
actv_funcs
{
tanh
{},
tanh
{}};
rnn_direction
direction
=
rnn_direction
::
forward
;
rnn_direction
direction
=
rnn_direction
::
forward
;
float
clip
=
0.0
f
;
float
clip
=
0.0
f
;
...
@@ -44,7 +44,7 @@ struct rnn
...
@@ -44,7 +44,7 @@ struct rnn
MIGRAPHX_THROW
(
"RNN: hidden size mismatch in attribute and input"
);
MIGRAPHX_THROW
(
"RNN: hidden size mismatch in attribute and input"
);
}
}
std
::
size_
t
num_directions
=
1
;
in
t
num_directions
=
1
;
if
(
direction
==
rnn_direction
::
bidirectional
)
if
(
direction
==
rnn_direction
::
bidirectional
)
{
{
num_directions
=
2
;
num_directions
=
2
;
...
@@ -55,7 +55,7 @@ struct rnn
...
@@ -55,7 +55,7 @@ struct rnn
MIGRAPHX_THROW
(
"RNN: num_direction mismatch in attribute and input"
);
MIGRAPHX_THROW
(
"RNN: num_direction mismatch in attribute and input"
);
}
}
std
::
vector
<
std
::
size_
t
>
out_dims
(
in_dims
);
std
::
vector
<
in
t
>
out_dims
(
in_dims
);
out_dims
.
insert
(
out_dims
.
begin
()
+
1
,
num_directions
);
out_dims
.
insert
(
out_dims
.
begin
()
+
1
,
num_directions
);
out_dims
.
back
()
=
hidden_size
;
out_dims
.
back
()
=
hidden_size
;
...
...
src/include/migraphx/op/roialign.hpp
View file @
edc23800
...
@@ -65,7 +65,7 @@ struct roialign
...
@@ -65,7 +65,7 @@ struct roialign
MIGRAPHX_THROW
(
"ROIALIGN: rois and batch indices inputs should have the same number!"
);
MIGRAPHX_THROW
(
"ROIALIGN: rois and batch indices inputs should have the same number!"
);
}
}
std
::
vector
<
std
::
size_
t
>
out_lens
=
x_lens
;
std
::
vector
<
in
t
>
out_lens
=
x_lens
;
out_lens
[
0
]
=
roi_lens
[
0
];
out_lens
[
0
]
=
roi_lens
[
0
];
out_lens
[
2
]
=
output_height
;
out_lens
[
2
]
=
output_height
;
out_lens
[
3
]
=
output_width
;
out_lens
[
3
]
=
output_width
;
...
@@ -76,27 +76,27 @@ struct roialign
...
@@ -76,27 +76,27 @@ struct roialign
struct
pos_weight
struct
pos_weight
{
{
// neighbor indices for the bilinear interpolation
// neighbor indices for the bilinear interpolation
std
::
array
<
std
::
size_
t
,
4
>
pos
=
{
0
,
0
,
0
,
0
};
std
::
array
<
in
t
,
4
>
pos
=
{
0
,
0
,
0
,
0
};
// neighbor weights for the bilinear interpolation
// neighbor weights for the bilinear interpolation
std
::
array
<
float
,
4
>
w
=
{
0.0
f
,
0.0
f
,
0.0
f
,
0.0
f
};
std
::
array
<
float
,
4
>
w
=
{
0.0
f
,
0.0
f
,
0.0
f
,
0.0
f
};
};
};
auto
calc_pos_weight
(
const
std
::
array
<
std
::
size_
t
,
2
>&
dims
,
auto
calc_pos_weight
(
const
std
::
array
<
in
t
,
2
>&
dims
,
const
shape
&
comp_s
,
const
shape
&
comp_s
,
const
std
::
array
<
float
,
2
>&
roi_start
,
const
std
::
array
<
float
,
2
>&
roi_start
,
const
std
::
array
<
float
,
2
>&
bin_size
,
const
std
::
array
<
float
,
2
>&
bin_size
,
const
std
::
array
<
std
::
size_
t
,
2
>&
bin_grid_size
)
const
const
std
::
array
<
in
t
,
2
>&
bin_grid_size
)
const
{
{
std
::
vector
<
pos_weight
>
results
(
bin_grid_size
[
0
]
*
bin_grid_size
[
1
]
*
output_height
*
std
::
vector
<
pos_weight
>
results
(
bin_grid_size
[
0
]
*
bin_grid_size
[
1
]
*
output_height
*
output_width
);
output_width
);
shape_for_each
(
comp_s
,
[
&
](
auto
idx
)
{
shape_for_each
(
comp_s
,
[
&
](
auto
idx
)
{
std
::
array
<
std
::
size_
t
,
2
>
p
=
{
idx
[
0
],
idx
[
1
]};
std
::
array
<
in
t
,
2
>
p
=
{
idx
[
0
],
idx
[
1
]};
std
::
array
<
std
::
size_
t
,
2
>
i
=
{
idx
[
2
],
idx
[
3
]};
std
::
array
<
in
t
,
2
>
i
=
{
idx
[
2
],
idx
[
3
]};
auto
index
=
comp_s
.
index
(
idx
);
auto
index
=
comp_s
.
index
(
idx
);
std
::
array
<
float
,
2
>
xy
{};
std
::
array
<
float
,
2
>
xy
{};
std
::
array
<
int
64_t
,
2
>
low
{};
std
::
array
<
int
,
2
>
low
{};
std
::
array
<
int
64_t
,
2
>
high
{};
std
::
array
<
int
,
2
>
high
{};
for
(
auto
ii
:
range
(
p
.
size
()))
for
(
auto
ii
:
range
(
p
.
size
()))
{
{
xy
[
ii
]
=
roi_start
[
ii
]
+
p
[
ii
]
*
bin_size
[
ii
]
+
xy
[
ii
]
=
roi_start
[
ii
]
+
p
[
ii
]
*
bin_size
[
ii
]
+
...
@@ -140,7 +140,7 @@ struct roialign
...
@@ -140,7 +140,7 @@ struct roialign
double
operator
()(
double
x
,
double
y
)
{
return
std
::
max
(
x
,
y
);
}
double
operator
()(
double
x
,
double
y
)
{
return
std
::
max
(
x
,
y
);
}
double
final
(
double
x
,
std
::
size_
t
)
{
return
(
x
);
}
double
final
(
double
x
,
in
t
)
{
return
(
x
);
}
};
};
struct
avg_pool
struct
avg_pool
...
@@ -149,12 +149,12 @@ struct roialign
...
@@ -149,12 +149,12 @@ struct roialign
double
operator
()(
double
x
,
double
y
)
{
return
x
+
y
;
}
double
operator
()(
double
x
,
double
y
)
{
return
x
+
y
;
}
double
final
(
double
x
,
std
::
size_
t
y
)
{
return
(
y
==
0
)
?
0.0
:
(
x
/
y
);
}
double
final
(
double
x
,
in
t
y
)
{
return
(
y
==
0
)
?
0.0
:
(
x
/
y
);
}
};
};
template
<
class
T
,
class
Op
>
template
<
class
T
,
class
Op
>
std
::
tuple
<
double
,
int64_t
>
calc_pooling
(
const
T
&
data
,
std
::
tuple
<
double
,
int64_t
>
calc_pooling
(
const
T
&
data
,
const
std
::
array
<
std
::
size_
t
,
2
>&
bin_grid_size
,
const
std
::
array
<
in
t
,
2
>&
bin_grid_size
,
const
std
::
vector
<
pos_weight
>&
pos_weights
,
const
std
::
vector
<
pos_weight
>&
pos_weights
,
int64_t
index
,
int64_t
index
,
Op
op
)
const
Op
op
)
const
...
@@ -182,13 +182,13 @@ struct roialign
...
@@ -182,13 +182,13 @@ struct roialign
argument
result
{
output_shape
};
argument
result
{
output_shape
};
const
auto
&
out_lens
=
output_shape
.
lens
();
const
auto
&
out_lens
=
output_shape
.
lens
();
int64_t
n_rois
=
out_lens
[
0
];
int64_t
n_rois
=
out_lens
[
0
];
std
::
size_
t
channels
=
out_lens
[
1
];
in
t
channels
=
out_lens
[
1
];
// output dims of height and width, in all 2-dim arrays, the first dim
// output dims of height and width, in all 2-dim arrays, the first dim
// is for height and second dim is for width
// is for height and second dim is for width
std
::
array
<
std
::
size_
t
,
2
>
out_dims
=
{
out_lens
[
2
],
out_lens
[
3
]};
std
::
array
<
in
t
,
2
>
out_dims
=
{
out_lens
[
2
],
out_lens
[
3
]};
const
auto
&
x_lens
=
args
.
at
(
0
).
get_shape
().
lens
();
const
auto
&
x_lens
=
args
.
at
(
0
).
get_shape
().
lens
();
// input dims of height and width
// input dims of height and width
std
::
array
<
std
::
size_
t
,
2
>
in_dims
=
{
x_lens
[
2
],
x_lens
[
3
]};
std
::
array
<
in
t
,
2
>
in_dims
=
{
x_lens
[
2
],
x_lens
[
3
]};
auto
roi_s
=
args
.
at
(
1
).
get_shape
();
auto
roi_s
=
args
.
at
(
1
).
get_shape
();
visit_all
(
result
,
args
.
at
(
0
),
args
.
at
(
1
))([
&
](
auto
output
,
auto
x
,
auto
roi
)
{
visit_all
(
result
,
args
.
at
(
0
),
args
.
at
(
1
))([
&
](
auto
output
,
auto
x
,
auto
roi
)
{
...
@@ -207,7 +207,7 @@ struct roialign
...
@@ -207,7 +207,7 @@ struct roialign
// Force malformed ROIs to be 1x1
// Force malformed ROIs to be 1x1
std
::
array
<
float
,
2
>
roi_size
{};
std
::
array
<
float
,
2
>
roi_size
{};
std
::
array
<
float
,
2
>
bin_size
{};
std
::
array
<
float
,
2
>
bin_size
{};
std
::
array
<
std
::
size_
t
,
2
>
bin_grid_size
{};
std
::
array
<
in
t
,
2
>
bin_grid_size
{};
for
(
auto
ii
:
range
(
roi_size
.
size
()))
for
(
auto
ii
:
range
(
roi_size
.
size
()))
{
{
...
@@ -222,13 +222,13 @@ struct roialign
...
@@ -222,13 +222,13 @@ struct roialign
// we want to precalculate indices and weights shared by all channels,
// we want to precalculate indices and weights shared by all channels,
// this is the key point of optimization
// this is the key point of optimization
std
::
vector
<
std
::
size_
t
>
comp_lens
=
{
std
::
vector
<
in
t
>
comp_lens
=
{
out_dims
[
0
],
out_dims
[
1
],
bin_grid_size
[
0
],
bin_grid_size
[
1
]};
out_dims
[
0
],
out_dims
[
1
],
bin_grid_size
[
0
],
bin_grid_size
[
1
]};
shape
comp_s
{
shape
::
float_type
,
comp_lens
};
shape
comp_s
{
shape
::
float_type
,
comp_lens
};
auto
pre_calc
=
auto
pre_calc
=
this
->
calc_pos_weight
(
in_dims
,
comp_s
,
roi_starts
,
bin_size
,
bin_grid_size
);
this
->
calc_pos_weight
(
in_dims
,
comp_s
,
roi_starts
,
bin_size
,
bin_grid_size
);
std
::
vector
<
std
::
size_
t
>
comp_lens1
=
{
channels
,
out_dims
[
0
],
out_dims
[
1
]};
std
::
vector
<
in
t
>
comp_lens1
=
{
channels
,
out_dims
[
0
],
out_dims
[
1
]};
shape
comp_s1
{
migraphx
::
shape
::
float_type
,
comp_lens1
};
shape
comp_s1
{
migraphx
::
shape
::
float_type
,
comp_lens1
};
std
::
vector
<
int64_t
>
vec_index
(
channels
,
0
);
std
::
vector
<
int64_t
>
vec_index
(
channels
,
0
);
shape_for_each
(
comp_s1
,
[
&
](
auto
idx
)
{
shape_for_each
(
comp_s1
,
[
&
](
auto
idx
)
{
...
...
src/include/migraphx/op/slice.hpp
View file @
edc23800
...
@@ -46,22 +46,22 @@ struct slice
...
@@ -46,22 +46,22 @@ struct slice
std
::
string
name
()
const
{
return
"slice"
;
}
std
::
string
name
()
const
{
return
"slice"
;
}
auto
fix_index
(
const
std
::
vector
<
std
::
size_t
>&
lens
,
std
::
size_
t
axis
,
int64_t
index
)
const
auto
fix_index
(
const
std
::
vector
<
int
>&
lens
,
in
t
axis
,
int64_t
index
)
const
{
{
int64_t
r
=
std
::
min
(
index
,
static_cast
<
int64_t
>
(
lens
[
axis
]));
int64_t
r
=
std
::
min
(
index
,
static_cast
<
int64_t
>
(
lens
[
axis
]));
if
(
r
<
0
)
if
(
r
<
0
)
r
+=
lens
[
axis
];
r
+=
lens
[
axis
];
return
std
::
size_
t
(
r
);
return
in
t
(
r
);
}
}
auto
compute_offset
(
const
shape
&
s
)
const
auto
compute_offset
(
const
shape
&
s
)
const
{
{
const
std
::
vector
<
std
::
size_
t
>&
lens
=
s
.
lens
();
const
std
::
vector
<
in
t
>&
lens
=
s
.
lens
();
const
std
::
vector
<
std
::
size_
t
>&
strides
=
s
.
strides
();
const
std
::
vector
<
in
t
>&
strides
=
s
.
strides
();
auto
offset
=
0
;
auto
offset
=
0
;
if
(
!
axes
.
empty
())
if
(
!
axes
.
empty
())
{
{
for
(
std
::
size_
t
i
=
0
;
i
<
axes
.
size
();
i
++
)
for
(
in
t
i
=
0
;
i
<
axes
.
size
();
i
++
)
{
{
auto
axis
=
axes
[
i
];
auto
axis
=
axes
[
i
];
offset
+=
fix_index
(
lens
,
axis
,
starts
[
i
])
*
strides
[
axis
];
offset
+=
fix_index
(
lens
,
axis
,
starts
[
i
])
*
strides
[
axis
];
...
@@ -69,7 +69,7 @@ struct slice
...
@@ -69,7 +69,7 @@ struct slice
}
}
else
else
{
{
for
(
std
::
size_
t
axis
=
0
;
axis
<
lens
.
size
();
axis
++
)
for
(
in
t
axis
=
0
;
axis
<
lens
.
size
();
axis
++
)
{
{
offset
+=
fix_index
(
lens
,
axis
,
starts
[
axis
])
*
strides
[
axis
];
offset
+=
fix_index
(
lens
,
axis
,
starts
[
axis
])
*
strides
[
axis
];
}
}
...
@@ -95,8 +95,8 @@ struct slice
...
@@ -95,8 +95,8 @@ struct slice
MIGRAPHX_THROW
(
"SLICE: inconsistent sizes"
);
MIGRAPHX_THROW
(
"SLICE: inconsistent sizes"
);
}
}
std
::
vector
<
std
::
size_
t
>
new_lens
=
old_lens
;
std
::
vector
<
in
t
>
new_lens
=
old_lens
;
for
(
std
::
size_
t
i
=
0
;
i
<
axes
.
size
();
i
++
)
for
(
in
t
i
=
0
;
i
<
axes
.
size
();
i
++
)
{
{
auto
axis
=
axes
[
i
];
auto
axis
=
axes
[
i
];
new_lens
[
axis
]
=
new_lens
[
axis
]
=
...
...
src/include/migraphx/par_for.hpp
View file @
edc23800
...
@@ -28,23 +28,23 @@ struct joinable_thread : std::thread
...
@@ -28,23 +28,23 @@ struct joinable_thread : std::thread
};
};
template
<
class
F
>
template
<
class
F
>
auto
thread_invoke
(
std
::
size_t
i
,
std
::
size_
t
tid
,
F
f
)
->
decltype
(
f
(
i
,
tid
))
auto
thread_invoke
(
int
i
,
in
t
tid
,
F
f
)
->
decltype
(
f
(
i
,
tid
))
{
{
f
(
i
,
tid
);
f
(
i
,
tid
);
}
}
template
<
class
F
>
template
<
class
F
>
auto
thread_invoke
(
std
::
size_t
i
,
std
::
size_
t
,
F
f
)
->
decltype
(
f
(
i
))
auto
thread_invoke
(
int
i
,
in
t
,
F
f
)
->
decltype
(
f
(
i
))
{
{
f
(
i
);
f
(
i
);
}
}
template
<
class
F
>
template
<
class
F
>
void
par_for_impl
(
std
::
size_t
n
,
std
::
size_
t
threadsize
,
F
f
)
void
par_for_impl
(
int
n
,
in
t
threadsize
,
F
f
)
{
{
if
(
threadsize
<=
1
)
if
(
threadsize
<=
1
)
{
{
for
(
std
::
size_
t
i
=
0
;
i
<
n
;
i
++
)
for
(
in
t
i
=
0
;
i
<
n
;
i
++
)
thread_invoke
(
i
,
0
,
f
);
thread_invoke
(
i
,
0
,
f
);
}
}
else
else
...
@@ -54,15 +54,15 @@ void par_for_impl(std::size_t n, std::size_t threadsize, F f)
...
@@ -54,15 +54,15 @@ void par_for_impl(std::size_t n, std::size_t threadsize, F f)
#if(!defined(__GNUC__) || __GNUC__ != 5)
#if(!defined(__GNUC__) || __GNUC__ != 5)
const
const
#endif
#endif
std
::
size_
t
grainsize
=
std
::
ceil
(
static_cast
<
double
>
(
n
)
/
threads
.
size
());
in
t
grainsize
=
std
::
ceil
(
static_cast
<
double
>
(
n
)
/
threads
.
size
());
std
::
size_
t
work
=
0
;
in
t
work
=
0
;
std
::
size_
t
tid
=
0
;
in
t
tid
=
0
;
std
::
generate
(
threads
.
begin
(),
threads
.
end
(),
[
=
,
&
work
,
&
tid
]
{
std
::
generate
(
threads
.
begin
(),
threads
.
end
(),
[
=
,
&
work
,
&
tid
]
{
auto
result
=
joinable_thread
([
=
]
{
auto
result
=
joinable_thread
([
=
]
{
std
::
size_
t
start
=
work
;
in
t
start
=
work
;
std
::
size_
t
last
=
std
::
min
(
n
,
work
+
grainsize
);
in
t
last
=
std
::
min
(
n
,
work
+
grainsize
);
for
(
std
::
size_
t
i
=
start
;
i
<
last
;
i
++
)
for
(
in
t
i
=
start
;
i
<
last
;
i
++
)
{
{
thread_invoke
(
i
,
tid
,
f
);
thread_invoke
(
i
,
tid
,
f
);
}
}
...
@@ -76,15 +76,15 @@ void par_for_impl(std::size_t n, std::size_t threadsize, F f)
...
@@ -76,15 +76,15 @@ void par_for_impl(std::size_t n, std::size_t threadsize, F f)
}
}
template
<
class
F
>
template
<
class
F
>
void
par_for
(
std
::
size_t
n
,
std
::
size_
t
min_grain
,
F
f
)
void
par_for
(
int
n
,
in
t
min_grain
,
F
f
)
{
{
const
auto
threadsize
=
std
::
min
<
std
::
size_
t
>
(
std
::
thread
::
hardware_concurrency
(),
const
auto
threadsize
=
std
::
min
<
in
t
>
(
std
::
thread
::
hardware_concurrency
(),
n
/
std
::
max
<
std
::
size_
t
>
(
1
,
min_grain
));
n
/
std
::
max
<
in
t
>
(
1
,
min_grain
));
par_for_impl
(
n
,
threadsize
,
f
);
par_for_impl
(
n
,
threadsize
,
f
);
}
}
template
<
class
F
>
template
<
class
F
>
void
par_for
(
std
::
size_
t
n
,
F
f
)
void
par_for
(
in
t
n
,
F
f
)
{
{
const
int
min_grain
=
8
;
const
int
min_grain
=
8
;
par_for
(
n
,
min_grain
,
f
);
par_for
(
n
,
min_grain
,
f
);
...
...
src/include/migraphx/rewrite_rnn.hpp
View file @
edc23800
...
@@ -69,7 +69,7 @@ struct rewrite_rnn
...
@@ -69,7 +69,7 @@ struct rewrite_rnn
instruction_ref
last_cell_output
,
instruction_ref
last_cell_output
,
op
::
rnn_direction
dirct
)
const
;
op
::
rnn_direction
dirct
)
const
;
std
::
size_
t
in
t
get_seq_len
(
const
module
&
prog
,
instruction_ref
input
,
instruction_ref
seq_lens
)
const
;
get_seq_len
(
const
module
&
prog
,
instruction_ref
input
,
instruction_ref
seq_lens
)
const
;
instruction_ref
pad_hidden_states
(
module
&
prog
,
instruction_ref
pad_hidden_states
(
module
&
prog
,
...
...
src/include/migraphx/shape.hpp
View file @
edc23800
...
@@ -66,52 +66,52 @@ struct shape
...
@@ -66,52 +66,52 @@ struct shape
shape
();
shape
();
shape
(
type_t
t
);
shape
(
type_t
t
);
shape
(
type_t
t
,
std
::
vector
<
std
::
size_
t
>
l
);
shape
(
type_t
t
,
std
::
vector
<
in
t
>
l
);
shape
(
type_t
t
,
std
::
vector
<
std
::
size_
t
>
l
,
std
::
vector
<
std
::
size_
t
>
s
);
shape
(
type_t
t
,
std
::
vector
<
in
t
>
l
,
std
::
vector
<
in
t
>
s
);
template
<
class
Range
>
template
<
class
Range
>
shape
(
type_t
t
,
const
Range
&
l
)
:
shape
(
t
,
std
::
vector
<
std
::
size_
t
>
(
l
.
begin
(),
l
.
end
()))
shape
(
type_t
t
,
const
Range
&
l
)
:
shape
(
t
,
std
::
vector
<
in
t
>
(
l
.
begin
(),
l
.
end
()))
{
{
}
}
template
<
class
Range1
,
class
Range2
>
template
<
class
Range1
,
class
Range2
>
shape
(
type_t
t
,
const
Range1
&
l
,
const
Range2
&
s
)
shape
(
type_t
t
,
const
Range1
&
l
,
const
Range2
&
s
)
:
shape
(
t
,
:
shape
(
t
,
std
::
vector
<
std
::
size_
t
>
(
l
.
begin
(),
l
.
end
()),
std
::
vector
<
in
t
>
(
l
.
begin
(),
l
.
end
()),
std
::
vector
<
std
::
size_
t
>
(
s
.
begin
(),
s
.
end
()))
std
::
vector
<
in
t
>
(
s
.
begin
(),
s
.
end
()))
{
{
}
}
shape
(
const
std
::
vector
<
shape
>&
subs
);
shape
(
const
std
::
vector
<
shape
>&
subs
);
static
shape
static
shape
from_permutation
(
type_t
t
,
const
std
::
vector
<
std
::
size_
t
>&
l
,
const
std
::
vector
<
int64_t
>&
perm
);
from_permutation
(
type_t
t
,
const
std
::
vector
<
in
t
>&
l
,
const
std
::
vector
<
int64_t
>&
perm
);
type_t
type
()
const
;
type_t
type
()
const
;
const
std
::
vector
<
std
::
size_
t
>&
lens
()
const
;
const
std
::
vector
<
in
t
>&
lens
()
const
;
const
std
::
vector
<
std
::
size_
t
>&
strides
()
const
;
const
std
::
vector
<
in
t
>&
strides
()
const
;
std
::
size_
t
elements
()
const
;
in
t
elements
()
const
;
std
::
size_
t
bytes
()
const
;
in
t
bytes
()
const
;
std
::
size_
t
type_size
()
const
;
in
t
type_size
()
const
;
/// Map multiple indices to space index
/// Map multiple indices to space index
std
::
size_
t
index
(
std
::
initializer_list
<
std
::
size_
t
>
l
)
const
;
in
t
index
(
std
::
initializer_list
<
in
t
>
l
)
const
;
/// Map multiple indices to space index
/// Map multiple indices to space index
std
::
size_
t
index
(
const
std
::
vector
<
std
::
size_
t
>&
l
)
const
;
in
t
index
(
const
std
::
vector
<
in
t
>&
l
)
const
;
/// Map multiple indices from a range of iterator to a space index
/// Map multiple indices from a range of iterator to a space index
template
<
class
Iterator
>
template
<
class
Iterator
>
std
::
size_
t
index
(
Iterator
start
,
Iterator
last
)
const
in
t
index
(
Iterator
start
,
Iterator
last
)
const
{
{
assert
(
std
::
distance
(
start
,
last
)
<=
this
->
lens
().
size
());
assert
(
std
::
distance
(
start
,
last
)
<=
this
->
lens
().
size
());
assert
(
this
->
lens
().
size
()
==
this
->
strides
().
size
());
assert
(
this
->
lens
().
size
()
==
this
->
strides
().
size
());
return
std
::
inner_product
(
start
,
last
,
this
->
strides
().
begin
(),
std
::
size_
t
{
0
});
// NOLINT
return
std
::
inner_product
(
start
,
last
,
this
->
strides
().
begin
(),
in
t
{
0
});
// NOLINT
}
}
/// Map element index to space index
/// Map element index to space index
std
::
size_t
index
(
std
::
size_
t
i
)
const
;
int
index
(
in
t
i
)
const
;
std
::
vector
<
std
::
size_t
>
multi
(
std
::
size_
t
i
)
const
;
std
::
vector
<
int
>
multi
(
in
t
i
)
const
;
void
multi_copy
(
std
::
size_t
i
,
std
::
size_
t
*
start
,
const
std
::
size_
t
*
end
)
const
;
void
multi_copy
(
int
i
,
in
t
*
start
,
const
in
t
*
end
)
const
;
/// Returns true if the shape is packed with no padding
/// Returns true if the shape is packed with no padding
bool
packed
()
const
;
bool
packed
()
const
;
...
@@ -128,8 +128,8 @@ struct shape
...
@@ -128,8 +128,8 @@ struct shape
shape
normalize_standard
()
const
;
shape
normalize_standard
()
const
;
shape
with_lens
(
type_t
t
,
const
std
::
vector
<
std
::
size_
t
>&
l
)
const
;
shape
with_lens
(
type_t
t
,
const
std
::
vector
<
in
t
>&
l
)
const
;
shape
with_lens
(
const
std
::
vector
<
std
::
size_
t
>&
l
)
const
;
shape
with_lens
(
const
std
::
vector
<
in
t
>&
l
)
const
;
friend
bool
operator
==
(
const
shape
&
x
,
const
shape
&
y
);
friend
bool
operator
==
(
const
shape
&
x
,
const
shape
&
y
);
friend
bool
operator
!=
(
const
shape
&
x
,
const
shape
&
y
);
friend
bool
operator
!=
(
const
shape
&
x
,
const
shape
&
y
);
...
@@ -164,16 +164,16 @@ struct shape
...
@@ -164,16 +164,16 @@ struct shape
type
operator
()()
const
{
return
{};
}
type
operator
()()
const
{
return
{};
}
std
::
size_t
size
(
std
::
size_
t
n
=
1
)
const
{
return
sizeof
(
type
)
*
n
;
}
int
size
(
in
t
n
=
1
)
const
{
return
sizeof
(
type
)
*
n
;
}
template
<
class
U
>
template
<
class
U
>
type
*
from
(
U
*
buffer
,
std
::
size_
t
n
=
0
)
const
type
*
from
(
U
*
buffer
,
in
t
n
=
0
)
const
{
{
return
reinterpret_cast
<
type
*>
(
buffer
)
+
n
;
return
reinterpret_cast
<
type
*>
(
buffer
)
+
n
;
}
}
template
<
class
U
>
template
<
class
U
>
const
type
*
from
(
const
U
*
buffer
,
std
::
size_
t
n
=
0
)
const
const
type
*
from
(
const
U
*
buffer
,
in
t
n
=
0
)
const
{
{
return
reinterpret_cast
<
const
type
*>
(
buffer
)
+
n
;
return
reinterpret_cast
<
const
type
*>
(
buffer
)
+
n
;
}
}
...
@@ -227,7 +227,7 @@ struct shape
...
@@ -227,7 +227,7 @@ struct shape
private:
private:
std
::
shared_ptr
<
const
shape_impl
>
impl
;
std
::
shared_ptr
<
const
shape_impl
>
impl
;
std
::
size_
t
element_space
()
const
;
in
t
element_space
()
const
;
};
};
void
migraphx_to_value
(
value
&
v
,
const
shape
&
s
);
void
migraphx_to_value
(
value
&
v
,
const
shape
&
s
);
...
...
src/include/migraphx/shape_for_each.hpp
View file @
edc23800
...
@@ -12,16 +12,16 @@ template <class F>
...
@@ -12,16 +12,16 @@ template <class F>
void
shape_for_each
(
const
migraphx
::
shape
&
s
,
F
f
)
void
shape_for_each
(
const
migraphx
::
shape
&
s
,
F
f
)
{
{
// Ensure calls to f use const ref to vector
// Ensure calls to f use const ref to vector
auto
call
=
[
&
f
](
const
std
::
vector
<
std
::
size_
t
>&
i
)
{
f
(
i
);
};
auto
call
=
[
&
f
](
const
std
::
vector
<
in
t
>&
i
)
{
f
(
i
);
};
std
::
vector
<
std
::
size_
t
>
indices
(
s
.
lens
().
size
());
std
::
vector
<
in
t
>
indices
(
s
.
lens
().
size
());
shape
ss
{
s
.
type
(),
s
.
lens
()};
shape
ss
{
s
.
type
(),
s
.
lens
()};
for
(
std
::
size_
t
i
=
0
;
i
<
ss
.
elements
();
i
++
)
for
(
in
t
i
=
0
;
i
<
ss
.
elements
();
i
++
)
{
{
std
::
transform
(
ss
.
strides
().
begin
(),
std
::
transform
(
ss
.
strides
().
begin
(),
ss
.
strides
().
end
(),
ss
.
strides
().
end
(),
ss
.
lens
().
begin
(),
ss
.
lens
().
begin
(),
indices
.
begin
(),
indices
.
begin
(),
[
&
](
std
::
size_t
stride
,
std
::
size_
t
len
)
{
[
&
](
int
stride
,
in
t
len
)
{
assert
(
len
>
0
and
stride
>
0
);
assert
(
len
>
0
and
stride
>
0
);
return
(
i
/
stride
)
%
len
;
return
(
i
/
stride
)
%
len
;
});
});
...
...
src/include/migraphx/tensor_view.hpp
View file @
edc23800
...
@@ -25,7 +25,7 @@ template <class T>
...
@@ -25,7 +25,7 @@ template <class T>
struct
tensor_view_iterator_read
struct
tensor_view_iterator_read
{
{
T
*
view
;
T
*
view
;
auto
&
operator
()(
std
::
size_
t
n
)
const
auto
&
operator
()(
in
t
n
)
const
{
{
assert
(
view
!=
nullptr
);
assert
(
view
!=
nullptr
);
return
(
*
view
)[
n
];
return
(
*
view
)[
n
];
...
@@ -36,9 +36,9 @@ template <class T>
...
@@ -36,9 +36,9 @@ template <class T>
struct
tensor_view
struct
tensor_view
{
{
using
value_type
=
T
;
using
value_type
=
T
;
using
iterator
=
basic_iota_iterator
<
tensor_view_iterator_read
<
tensor_view
<
T
>>
,
std
::
size_
t
>
;
using
iterator
=
basic_iota_iterator
<
tensor_view_iterator_read
<
tensor_view
<
T
>>
,
in
t
>
;
using
const_iterator
=
using
const_iterator
=
basic_iota_iterator
<
tensor_view_iterator_read
<
const
tensor_view
<
T
>>
,
std
::
size_
t
>
;
basic_iota_iterator
<
tensor_view_iterator_read
<
const
tensor_view
<
T
>>
,
in
t
>
;
tensor_view
()
:
m_data
(
nullptr
)
{}
tensor_view
()
:
m_data
(
nullptr
)
{}
tensor_view
(
shape
s
,
T
*
d
)
:
m_data
(
d
),
m_shape
(
std
::
move
(
s
))
{}
tensor_view
(
shape
s
,
T
*
d
)
:
m_data
(
d
),
m_shape
(
std
::
move
(
s
))
{}
...
@@ -46,7 +46,7 @@ struct tensor_view
...
@@ -46,7 +46,7 @@ struct tensor_view
bool
empty
()
const
{
return
m_data
==
nullptr
||
m_shape
.
lens
().
empty
();
}
bool
empty
()
const
{
return
m_data
==
nullptr
||
m_shape
.
lens
().
empty
();
}
std
::
size_
t
size
()
const
{
return
m_shape
.
elements
();
}
in
t
size
()
const
{
return
m_shape
.
elements
();
}
T
*
data
()
{
return
this
->
m_data
;
}
T
*
data
()
{
return
this
->
m_data
;
}
...
@@ -55,17 +55,17 @@ struct tensor_view
...
@@ -55,17 +55,17 @@ struct tensor_view
template
<
class
...
Ts
,
MIGRAPHX_REQUIRES
(
std
::
is_integral
<
Ts
>{}...)
>
template
<
class
...
Ts
,
MIGRAPHX_REQUIRES
(
std
::
is_integral
<
Ts
>{}...)
>
const
T
&
operator
()(
Ts
...
xs
)
const
const
T
&
operator
()(
Ts
...
xs
)
const
{
{
assert
(
std
::
vector
<
std
::
size_
t
>
{
static_cast
<
std
::
size_
t
>
(
xs
)...}
<
m_shape
.
lens
());
assert
(
std
::
vector
<
in
t
>
{
static_cast
<
in
t
>
(
xs
)...}
<
m_shape
.
lens
());
assert
(
m_shape
.
index
({
static_cast
<
std
::
size_
t
>
(
xs
)...})
<
m_shape
.
bytes
()
/
sizeof
(
T
));
assert
(
m_shape
.
index
({
static_cast
<
in
t
>
(
xs
)...})
<
m_shape
.
bytes
()
/
sizeof
(
T
));
return
m_data
[
m_shape
.
index
({
static_cast
<
std
::
size_
t
>
(
xs
)...})];
return
m_data
[
m_shape
.
index
({
static_cast
<
in
t
>
(
xs
)...})];
}
}
template
<
class
...
Ts
,
MIGRAPHX_REQUIRES
(
std
::
is_integral
<
Ts
>{}...)
>
template
<
class
...
Ts
,
MIGRAPHX_REQUIRES
(
std
::
is_integral
<
Ts
>{}...)
>
T
&
operator
()(
Ts
...
xs
)
T
&
operator
()(
Ts
...
xs
)
{
{
assert
(
std
::
vector
<
std
::
size_
t
>
{
static_cast
<
std
::
size_
t
>
(
xs
)...}
<
m_shape
.
lens
());
assert
(
std
::
vector
<
in
t
>
{
static_cast
<
in
t
>
(
xs
)...}
<
m_shape
.
lens
());
assert
(
m_shape
.
index
({
static_cast
<
std
::
size_
t
>
(
xs
)...})
<
m_shape
.
bytes
()
/
sizeof
(
T
));
assert
(
m_shape
.
index
({
static_cast
<
in
t
>
(
xs
)...})
<
m_shape
.
bytes
()
/
sizeof
(
T
));
return
m_data
[
m_shape
.
index
({
static_cast
<
std
::
size_
t
>
(
xs
)...})];
return
m_data
[
m_shape
.
index
({
static_cast
<
in
t
>
(
xs
)...})];
}
}
template
<
class
Iterator
,
MIGRAPHX_REQUIRES
(
not
std
::
is_integral
<
Iterator
>{})
>
template
<
class
Iterator
,
MIGRAPHX_REQUIRES
(
not
std
::
is_integral
<
Iterator
>{})
>
...
@@ -84,13 +84,13 @@ struct tensor_view
...
@@ -84,13 +84,13 @@ struct tensor_view
return
m_data
[
m_shape
.
index
(
start
,
last
)];
return
m_data
[
m_shape
.
index
(
start
,
last
)];
}
}
T
&
operator
[](
std
::
size_
t
i
)
T
&
operator
[](
in
t
i
)
{
{
assert
(
!
this
->
empty
()
&&
i
<
this
->
size
());
assert
(
!
this
->
empty
()
&&
i
<
this
->
size
());
return
m_data
[
m_shape
.
index
(
i
)];
return
m_data
[
m_shape
.
index
(
i
)];
}
}
const
T
&
operator
[](
std
::
size_
t
i
)
const
const
T
&
operator
[](
in
t
i
)
const
{
{
assert
(
!
this
->
empty
()
&&
i
<
this
->
size
());
assert
(
!
this
->
empty
()
&&
i
<
this
->
size
());
return
m_data
[
m_shape
.
index
(
i
)];
return
m_data
[
m_shape
.
index
(
i
)];
...
@@ -141,7 +141,7 @@ struct tensor_view
...
@@ -141,7 +141,7 @@ struct tensor_view
if
(
!
x
.
empty
())
if
(
!
x
.
empty
())
{
{
os
<<
as_number
(
x
.
front
());
os
<<
as_number
(
x
.
front
());
for
(
std
::
size_
t
i
=
1
;
i
<
x
.
m_shape
.
elements
();
i
++
)
for
(
in
t
i
=
1
;
i
<
x
.
m_shape
.
elements
();
i
++
)
{
{
os
<<
", "
<<
as_number
(
x
.
m_data
[
x
.
m_shape
.
index
(
i
)]);
os
<<
", "
<<
as_number
(
x
.
m_data
[
x
.
m_shape
.
index
(
i
)]);
}
}
...
@@ -159,7 +159,7 @@ bool operator==(const tensor_view<T>& x, const tensor_view<U>& y)
...
@@ -159,7 +159,7 @@ bool operator==(const tensor_view<T>& x, const tensor_view<U>& y)
{
{
if
(
x
.
get_shape
()
==
y
.
get_shape
())
if
(
x
.
get_shape
()
==
y
.
get_shape
())
{
{
for
(
std
::
size_
t
i
=
0
;
i
<
x
.
get_shape
().
elements
();
i
++
)
for
(
in
t
i
=
0
;
i
<
x
.
get_shape
().
elements
();
i
++
)
{
{
if
(
!
float_equal
(
x
[
i
],
y
[
i
]))
if
(
!
float_equal
(
x
[
i
],
y
[
i
]))
return
false
;
return
false
;
...
...
src/insert_pad.cpp
View file @
edc23800
...
@@ -15,7 +15,7 @@ static void update_op(const instruction_ref& input, const instruction_ref& ins,
...
@@ -15,7 +15,7 @@ static void update_op(const instruction_ref& input, const instruction_ref& ins,
{
{
auto
op
=
ins
->
get_operator
();
auto
op
=
ins
->
get_operator
();
auto
val
=
op
.
to_value
();
auto
val
=
op
.
to_value
();
auto
op_padding
=
val
.
at
(
"padding"
).
to_vector
<
size_
t
>
();
auto
op_padding
=
val
.
at
(
"padding"
).
to_vector
<
in
t
>
();
auto
kdims
=
input
->
get_shape
().
lens
().
size
()
-
2
;
auto
kdims
=
input
->
get_shape
().
lens
().
size
()
-
2
;
if
(
std
::
equal
(
op_padding
.
begin
(),
if
(
std
::
equal
(
op_padding
.
begin
(),
...
@@ -25,9 +25,9 @@ static void update_op(const instruction_ref& input, const instruction_ref& ins,
...
@@ -25,9 +25,9 @@ static void update_op(const instruction_ref& input, const instruction_ref& ins,
return
;
return
;
std
::
vector
<
int64_t
>
padding
(
input
->
get_shape
().
lens
().
size
()
*
2
,
0
);
std
::
vector
<
int64_t
>
padding
(
input
->
get_shape
().
lens
().
size
()
*
2
,
0
);
std
::
vector
<
size_
t
>
pads_l
(
op_padding
.
begin
(),
op_padding
.
begin
()
+
kdims
);
std
::
vector
<
in
t
>
pads_l
(
op_padding
.
begin
(),
op_padding
.
begin
()
+
kdims
);
std
::
vector
<
size_
t
>
pads_r
(
op_padding
.
begin
()
+
kdims
,
op_padding
.
end
());
std
::
vector
<
in
t
>
pads_r
(
op_padding
.
begin
()
+
kdims
,
op_padding
.
end
());
op_padding
=
std
::
vector
<
size_
t
>
(
kdims
*
2
,
0
);
op_padding
=
std
::
vector
<
in
t
>
(
kdims
*
2
,
0
);
op
.
from_value
({{
"padding"
,
op_padding
}});
op
.
from_value
({{
"padding"
,
op_padding
}});
std
::
copy
(
pads_l
.
begin
(),
pads_l
.
end
(),
padding
.
begin
()
+
2
);
std
::
copy
(
pads_l
.
begin
(),
pads_l
.
end
(),
padding
.
begin
()
+
2
);
...
@@ -56,9 +56,9 @@ static void update_pooling(const instruction_ref& input, const instruction_ref&
...
@@ -56,9 +56,9 @@ static void update_pooling(const instruction_ref& input, const instruction_ref&
return
;
return
;
std
::
vector
<
int64_t
>
padding
(
input
->
get_shape
().
lens
().
size
()
*
2
,
0
);
std
::
vector
<
int64_t
>
padding
(
input
->
get_shape
().
lens
().
size
()
*
2
,
0
);
std
::
vector
<
size_
t
>
pads_l
(
op
.
padding
.
begin
(),
op
.
padding
.
begin
()
+
kdims
);
std
::
vector
<
in
t
>
pads_l
(
op
.
padding
.
begin
(),
op
.
padding
.
begin
()
+
kdims
);
std
::
vector
<
size_
t
>
pads_r
(
op
.
padding
.
begin
()
+
kdims
,
op
.
padding
.
end
());
std
::
vector
<
in
t
>
pads_r
(
op
.
padding
.
begin
()
+
kdims
,
op
.
padding
.
end
());
op
.
padding
=
std
::
vector
<
size_
t
>
(
kdims
*
2
,
0
);
op
.
padding
=
std
::
vector
<
in
t
>
(
kdims
*
2
,
0
);
std
::
copy
(
pads_l
.
begin
(),
pads_l
.
end
(),
padding
.
begin
()
+
2
);
std
::
copy
(
pads_l
.
begin
(),
pads_l
.
end
(),
padding
.
begin
()
+
2
);
std
::
copy
(
pads_r
.
begin
(),
pads_r
.
end
(),
padding
.
begin
()
+
kdims
+
2
+
2
);
std
::
copy
(
pads_r
.
begin
(),
pads_r
.
end
(),
padding
.
begin
()
+
kdims
+
2
+
2
);
...
...
src/normalize_attributes.cpp
View file @
edc23800
...
@@ -17,7 +17,7 @@ inline namespace MIGRAPHX_INLINE_NS {
...
@@ -17,7 +17,7 @@ inline namespace MIGRAPHX_INLINE_NS {
auto
tune_attribute
(
const
std
::
vector
<
int64_t
>&
vec
,
auto
tune_attribute
(
const
std
::
vector
<
int64_t
>&
vec
,
const
std
::
vector
<
int64_t
>&
axes
,
const
std
::
vector
<
int64_t
>&
axes
,
const
value
&
val
,
const
value
&
val
,
const
std
::
vector
<
std
::
size_
t
>&
lens
)
const
std
::
vector
<
in
t
>&
lens
)
{
{
std
::
vector
<
int64_t
>
result
(
vec
);
std
::
vector
<
int64_t
>
result
(
vec
);
int64_t
n_rank
=
lens
.
size
();
int64_t
n_rank
=
lens
.
size
();
...
@@ -127,7 +127,7 @@ auto tune_pad_attribute(const value& val)
...
@@ -127,7 +127,7 @@ auto tune_pad_attribute(const value& val)
return
result
;
return
result
;
}
}
bool
normalize_attributes
(
operation
&
op
,
const
std
::
vector
<
std
::
size_
t
>&
lens
)
bool
normalize_attributes
(
operation
&
op
,
const
std
::
vector
<
in
t
>&
lens
)
{
{
bool
tuned
=
false
;
bool
tuned
=
false
;
auto
attrs
=
op
.
attributes
();
auto
attrs
=
op
.
attributes
();
...
...
src/onnx/include/migraphx/onnx/onnx_parser.hpp
View file @
edc23800
...
@@ -25,7 +25,7 @@ struct onnx_parser
...
@@ -25,7 +25,7 @@ struct onnx_parser
struct
node_info
struct
node_info
{
{
attribute_map
attributes
{};
attribute_map
attributes
{};
std
::
size_
t
num_outputs
=
1
;
in
t
num_outputs
=
1
;
std
::
string
name
=
""
;
std
::
string
name
=
""
;
module
*
mod
=
nullptr
;
module
*
mod
=
nullptr
;
instruction_ref
make_contiguous
(
instruction_ref
ins
)
const
;
instruction_ref
make_contiguous
(
instruction_ref
ins
)
const
;
...
@@ -60,8 +60,8 @@ struct onnx_parser
...
@@ -60,8 +60,8 @@ struct onnx_parser
node_map
nodes
;
node_map
nodes
;
std
::
unordered_map
<
std
::
string
,
instruction_ref
>
instructions
;
std
::
unordered_map
<
std
::
string
,
instruction_ref
>
instructions
;
program
prog
=
program
();
program
prog
=
program
();
std
::
size_
t
default_dim_value
=
1
;
in
t
default_dim_value
=
1
;
std
::
unordered_map
<
std
::
string
,
std
::
vector
<
std
::
size_
t
>>
map_input_dims
;
std
::
unordered_map
<
std
::
string
,
std
::
vector
<
in
t
>>
map_input_dims
;
bool
skip_unknown_operators
=
false
;
bool
skip_unknown_operators
=
false
;
int64_t
max_loop_iterations
=
10
;
int64_t
max_loop_iterations
=
10
;
int64_t
opset_version
=
13
;
int64_t
opset_version
=
13
;
...
@@ -76,11 +76,11 @@ struct onnx_parser
...
@@ -76,11 +76,11 @@ struct onnx_parser
static
int64_t
get_opset_version
(
const
onnx
::
ModelProto
&
model
);
static
int64_t
get_opset_version
(
const
onnx
::
ModelProto
&
model
);
void
parse_from
(
std
::
istream
&
is
,
std
::
string
name
=
""
);
void
parse_from
(
std
::
istream
&
is
,
std
::
string
name
=
""
);
void
parse_from
(
const
void
*
data
,
std
::
size_
t
size
);
void
parse_from
(
const
void
*
data
,
in
t
size
);
void
parse_graph
(
module
*
mod
,
const
onnx
::
GraphProto
&
graph
);
void
parse_graph
(
module
*
mod
,
const
onnx
::
GraphProto
&
graph
);
literal
parse_value
(
const
onnx
::
AttributeProto
&
attr
)
const
;
literal
parse_value
(
const
onnx
::
AttributeProto
&
attr
)
const
;
literal
parse_tensor
(
const
onnx
::
TensorProto
&
t
)
const
;
literal
parse_tensor
(
const
onnx
::
TensorProto
&
t
)
const
;
shape
parse_type
(
const
onnx
::
TypeProto
&
t
,
const
std
::
vector
<
std
::
size_
t
>&
input_dims
)
const
;
shape
parse_type
(
const
onnx
::
TypeProto
&
t
,
const
std
::
vector
<
in
t
>&
input_dims
)
const
;
};
};
shape
::
type_t
get_type
(
int
dtype
);
shape
::
type_t
get_type
(
int
dtype
);
...
...
src/onnx/include/migraphx/onnx/padding.hpp
View file @
edc23800
...
@@ -12,9 +12,9 @@ bool is_asym_padding(const std::vector<int64_t>& padding);
...
@@ -12,9 +12,9 @@ bool is_asym_padding(const std::vector<int64_t>& padding);
void
cal_auto_padding_size
(
onnx_parser
::
node_info
info
,
void
cal_auto_padding_size
(
onnx_parser
::
node_info
info
,
value
&
v
,
value
&
v
,
const
std
::
vector
<
std
::
size_
t
>&
k_lens
,
const
std
::
vector
<
in
t
>&
k_lens
,
const
std
::
vector
<
std
::
size_
t
>&
dilation
,
const
std
::
vector
<
in
t
>&
dilation
,
const
std
::
vector
<
std
::
size_
t
>&
in_lens
,
const
std
::
vector
<
in
t
>&
in_lens
,
std
::
vector
<
int64_t
>&
paddings
);
std
::
vector
<
int64_t
>&
paddings
);
void
check_padding_mode
(
const
onnx_parser
::
node_info
&
info
,
const
std
::
string
&
op_name
);
void
check_padding_mode
(
const
onnx_parser
::
node_info
&
info
,
const
std
::
string
&
op_name
);
...
...
src/onnx/onnx.cpp
View file @
edc23800
...
@@ -54,7 +54,7 @@ program parse_onnx_buffer(const std::string& buffer, const onnx_options& options
...
@@ -54,7 +54,7 @@ program parse_onnx_buffer(const std::string& buffer, const onnx_options& options
return
parse_onnx_from
(
options
,
buffer
.
data
(),
buffer
.
size
());
return
parse_onnx_from
(
options
,
buffer
.
data
(),
buffer
.
size
());
}
}
program
parse_onnx_buffer
(
const
void
*
data
,
std
::
size_
t
size
,
const
onnx_options
&
options
)
program
parse_onnx_buffer
(
const
void
*
data
,
in
t
size
,
const
onnx_options
&
options
)
{
{
return
parse_onnx_from
(
options
,
data
,
size
);
return
parse_onnx_from
(
options
,
data
,
size
);
}
}
...
...
src/onnx/onnx_parser.cpp
View file @
edc23800
...
@@ -28,11 +28,11 @@ static onnx_parser::attribute_map get_attributes(const onnx::NodeProto& node)
...
@@ -28,11 +28,11 @@ static onnx_parser::attribute_map get_attributes(const onnx::NodeProto& node)
}
}
static
literal
static
literal
create_literal
(
shape
::
type_t
shape_type
,
const
std
::
vector
<
size_
t
>&
dims
,
const
char
*
data
)
create_literal
(
shape
::
type_t
shape_type
,
const
std
::
vector
<
in
t
>&
dims
,
const
char
*
data
)
{
{
// empty input
// empty input
auto
elem_num
=
auto
elem_num
=
std
::
accumulate
(
dims
.
begin
(),
dims
.
end
(),
std
::
size_
t
(
1
),
std
::
multiplies
<
std
::
size_
t
>
());
std
::
accumulate
(
dims
.
begin
(),
dims
.
end
(),
in
t
(
1
),
std
::
multiplies
<
in
t
>
());
if
(
elem_num
==
0
)
if
(
elem_num
==
0
)
{
{
return
{};
return
{};
...
@@ -45,11 +45,11 @@ create_literal(shape::type_t shape_type, const std::vector<size_t>& dims, const
...
@@ -45,11 +45,11 @@ create_literal(shape::type_t shape_type, const std::vector<size_t>& dims, const
}
}
template
<
class
T
,
MIGRAPHX_REQUIRES
(
not
std
::
is_pointer
<
T
>{})
>
template
<
class
T
,
MIGRAPHX_REQUIRES
(
not
std
::
is_pointer
<
T
>{})
>
static
literal
create_literal
(
shape
::
type_t
shape_type
,
const
std
::
vector
<
size_
t
>&
dims
,
T
data
)
static
literal
create_literal
(
shape
::
type_t
shape_type
,
const
std
::
vector
<
in
t
>&
dims
,
T
data
)
{
{
// empty input
// empty input
auto
elem_num
=
auto
elem_num
=
std
::
accumulate
(
dims
.
begin
(),
dims
.
end
(),
std
::
size_
t
(
1
),
std
::
multiplies
<
std
::
size_
t
>
());
std
::
accumulate
(
dims
.
begin
(),
dims
.
end
(),
in
t
(
1
),
std
::
multiplies
<
in
t
>
());
if
(
elem_num
==
0
)
if
(
elem_num
==
0
)
{
{
return
{};
return
{};
...
@@ -64,7 +64,7 @@ static literal create_literal(shape::type_t shape_type, const std::vector<size_t
...
@@ -64,7 +64,7 @@ static literal create_literal(shape::type_t shape_type, const std::vector<size_t
template
<
class
T
>
template
<
class
T
>
static
literal
from_repeated
(
shape
::
type_t
t
,
const
T
&
r
)
static
literal
from_repeated
(
shape
::
type_t
t
,
const
T
&
r
)
{
{
std
::
size_
t
size
=
r
.
size
();
in
t
size
=
r
.
size
();
return
literal
{{
t
,
{
size
}},
r
.
begin
(),
r
.
end
()};
return
literal
{{
t
,
{
size
}},
r
.
begin
(),
r
.
end
()};
}
}
...
@@ -187,7 +187,7 @@ void onnx_parser::parse_from(std::istream& is, std::string name)
...
@@ -187,7 +187,7 @@ void onnx_parser::parse_from(std::istream& is, std::string name)
}
}
}
}
void
onnx_parser
::
parse_from
(
const
void
*
data
,
std
::
size_
t
size
)
void
onnx_parser
::
parse_from
(
const
void
*
data
,
in
t
size
)
{
{
auto
*
mm
=
prog
.
get_main_module
();
auto
*
mm
=
prog
.
get_main_module
();
onnx
::
ModelProto
model
;
onnx
::
ModelProto
model
;
...
@@ -247,7 +247,7 @@ void onnx_parser::parse_graph(module* mod, const onnx::GraphProto& graph)
...
@@ -247,7 +247,7 @@ void onnx_parser::parse_graph(module* mod, const onnx::GraphProto& graph)
"
\"
existing in parent graph!"
);
"
\"
existing in parent graph!"
);
}
}
std
::
vector
<
std
::
size_
t
>
dims
;
std
::
vector
<
in
t
>
dims
;
if
(
map_input_dims
.
count
(
name
)
>
0
)
if
(
map_input_dims
.
count
(
name
)
>
0
)
{
{
dims
=
map_input_dims
.
at
(
name
);
dims
=
map_input_dims
.
at
(
name
);
...
@@ -278,7 +278,7 @@ void onnx_parser::parse_graph(module* mod, const onnx::GraphProto& graph)
...
@@ -278,7 +278,7 @@ void onnx_parser::parse_graph(module* mod, const onnx::GraphProto& graph)
}
}
std
::
vector
<
instruction_ref
>
result
;
std
::
vector
<
instruction_ref
>
result
;
std
::
size_
t
output_num
=
static_cast
<
std
::
size_
t
>
(
node
.
output
().
size
());
in
t
output_num
=
static_cast
<
in
t
>
(
node
.
output
().
size
());
if
(
ops
.
count
(
node
.
op_type
())
==
0
)
if
(
ops
.
count
(
node
.
op_type
())
==
0
)
{
{
if
(
skip_unknown_operators
)
if
(
skip_unknown_operators
)
...
@@ -293,7 +293,7 @@ void onnx_parser::parse_graph(module* mod, const onnx::GraphProto& graph)
...
@@ -293,7 +293,7 @@ void onnx_parser::parse_graph(module* mod, const onnx::GraphProto& graph)
*
this
,
{
get_attributes
(
node
),
output_num
,
node_name
,
mod
},
args
);
*
this
,
{
get_attributes
(
node
),
output_num
,
node_name
,
mod
},
args
);
}
}
output_num
=
std
::
min
<
std
::
size_
t
>
(
output_num
,
result
.
size
());
output_num
=
std
::
min
<
in
t
>
(
output_num
,
result
.
size
());
std
::
transform
(
node
.
output
().
begin
(),
std
::
transform
(
node
.
output
().
begin
(),
node
.
output
().
begin
()
+
output_num
,
node
.
output
().
begin
()
+
output_num
,
result
.
begin
(),
result
.
begin
(),
...
@@ -351,7 +351,7 @@ literal onnx_parser::parse_value(const onnx::AttributeProto& attr) const
...
@@ -351,7 +351,7 @@ literal onnx_parser::parse_value(const onnx::AttributeProto& attr) const
literal
onnx_parser
::
parse_tensor
(
const
onnx
::
TensorProto
&
t
)
const
literal
onnx_parser
::
parse_tensor
(
const
onnx
::
TensorProto
&
t
)
const
{
{
std
::
vector
<
std
::
size_
t
>
dims
(
t
.
dims
().
begin
(),
t
.
dims
().
end
());
std
::
vector
<
in
t
>
dims
(
t
.
dims
().
begin
(),
t
.
dims
().
end
());
if
(
not
t
.
external_data
().
empty
())
if
(
not
t
.
external_data
().
empty
())
{
{
const
std
::
string
&
data_file
=
t
.
external_data
().
at
(
0
).
value
();
const
std
::
string
&
data_file
=
t
.
external_data
().
at
(
0
).
value
();
...
@@ -401,7 +401,7 @@ literal onnx_parser::parse_tensor(const onnx::TensorProto& t) const
...
@@ -401,7 +401,7 @@ literal onnx_parser::parse_tensor(const onnx::TensorProto& t) const
MIGRAPHX_THROW
(
"PARSE_TENSOR: Invalid tensor type"
);
MIGRAPHX_THROW
(
"PARSE_TENSOR: Invalid tensor type"
);
}
}
shape
onnx_parser
::
parse_type
(
const
onnx
::
TypeProto
&
t
,
shape
onnx_parser
::
parse_type
(
const
onnx
::
TypeProto
&
t
,
const
std
::
vector
<
std
::
size_
t
>&
input_dims
)
const
const
std
::
vector
<
in
t
>&
input_dims
)
const
{
{
shape
::
type_t
shape_type
=
get_type
(
t
.
tensor_type
().
elem_type
());
shape
::
type_t
shape_type
=
get_type
(
t
.
tensor_type
().
elem_type
());
if
(
!
input_dims
.
empty
())
if
(
!
input_dims
.
empty
())
...
@@ -409,12 +409,12 @@ shape onnx_parser::parse_type(const onnx::TypeProto& t,
...
@@ -409,12 +409,12 @@ shape onnx_parser::parse_type(const onnx::TypeProto& t,
return
{
shape_type
,
input_dims
};
return
{
shape_type
,
input_dims
};
}
}
std
::
vector
<
std
::
size_
t
>
dims
;
std
::
vector
<
in
t
>
dims
;
auto
&&
tensor_dims
=
t
.
tensor_type
().
shape
().
dim
();
auto
&&
tensor_dims
=
t
.
tensor_type
().
shape
().
dim
();
std
::
transform
(
tensor_dims
.
begin
(),
std
::
transform
(
tensor_dims
.
begin
(),
tensor_dims
.
end
(),
tensor_dims
.
end
(),
std
::
back_inserter
(
dims
),
std
::
back_inserter
(
dims
),
[
&
](
auto
&&
d
)
->
std
::
size_
t
{
[
&
](
auto
&&
d
)
->
in
t
{
if
(
d
.
has_dim_value
())
if
(
d
.
has_dim_value
())
{
{
if
(
static_cast
<
int
>
(
d
.
dim_value
())
<=
0
)
if
(
static_cast
<
int
>
(
d
.
dim_value
())
<=
0
)
...
...
src/onnx/padding.cpp
View file @
edc23800
...
@@ -10,12 +10,12 @@ namespace onnx {
...
@@ -10,12 +10,12 @@ namespace onnx {
void
cal_auto_padding_size
(
onnx_parser
::
node_info
info
,
void
cal_auto_padding_size
(
onnx_parser
::
node_info
info
,
value
&
v
,
value
&
v
,
const
std
::
vector
<
std
::
size_
t
>&
k_lens
,
const
std
::
vector
<
in
t
>&
k_lens
,
const
std
::
vector
<
std
::
size_
t
>&
dilation
,
const
std
::
vector
<
in
t
>&
dilation
,
const
std
::
vector
<
std
::
size_
t
>&
in_lens
,
const
std
::
vector
<
in
t
>&
in_lens
,
std
::
vector
<
int64_t
>&
paddings
)
std
::
vector
<
int64_t
>&
paddings
)
{
{
size_
t
kdims
=
in_lens
.
size
()
-
2
;
in
t
kdims
=
in_lens
.
size
()
-
2
;
assert
(
k_lens
.
size
()
==
kdims
and
dilation
.
size
()
==
kdims
);
assert
(
k_lens
.
size
()
==
kdims
and
dilation
.
size
()
==
kdims
);
if
(
!
contains
(
info
.
attributes
,
"auto_pad"
))
if
(
!
contains
(
info
.
attributes
,
"auto_pad"
))
...
@@ -29,7 +29,7 @@ void cal_auto_padding_size(onnx_parser::node_info info,
...
@@ -29,7 +29,7 @@ void cal_auto_padding_size(onnx_parser::node_info info,
bool
is_same_upper
=
(
auto_pad
.
find
(
"SAME_UPPER"
)
!=
std
::
string
::
npos
);
bool
is_same_upper
=
(
auto_pad
.
find
(
"SAME_UPPER"
)
!=
std
::
string
::
npos
);
paddings
.
resize
(
2
*
kdims
);
paddings
.
resize
(
2
*
kdims
);
for
(
size_
t
i
=
0
;
i
<
paddings
.
size
()
/
2
;
i
++
)
for
(
in
t
i
=
0
;
i
<
paddings
.
size
()
/
2
;
i
++
)
{
{
calculate_padding
(
i
,
calculate_padding
(
i
,
paddings
,
paddings
,
...
@@ -45,9 +45,9 @@ void cal_auto_padding_size(onnx_parser::node_info info,
...
@@ -45,9 +45,9 @@ void cal_auto_padding_size(onnx_parser::node_info info,
bool
is_asym_padding
(
const
std
::
vector
<
int64_t
>&
padding
)
bool
is_asym_padding
(
const
std
::
vector
<
int64_t
>&
padding
)
{
{
assert
(
padding
.
size
()
%
2
==
0
);
assert
(
padding
.
size
()
%
2
==
0
);
size_
t
pad_ndims
=
padding
.
size
()
/
2
;
in
t
pad_ndims
=
padding
.
size
()
/
2
;
for
(
size_
t
i
=
0
;
i
<
pad_ndims
;
i
++
)
for
(
in
t
i
=
0
;
i
<
pad_ndims
;
i
++
)
{
{
if
(
padding
[
i
]
!=
padding
[
i
+
pad_ndims
])
if
(
padding
[
i
]
!=
padding
[
i
+
pad_ndims
])
{
{
...
@@ -106,9 +106,9 @@ void tune_padding_size(const value& v,
...
@@ -106,9 +106,9 @@ void tune_padding_size(const value& v,
}
}
// asymmetric padding, make it symmetric
// asymmetric padding, make it symmetric
std
::
size_
t
n_dims
=
padding
.
size
()
/
2
;
in
t
n_dims
=
padding
.
size
()
/
2
;
s_start
.
resize
(
n_dims
);
s_start
.
resize
(
n_dims
);
for
(
std
::
size_
t
i
=
0
;
i
<
n_dims
;
++
i
)
for
(
in
t
i
=
0
;
i
<
n_dims
;
++
i
)
{
{
tune_padding_to_symmetric
(
tune_padding_to_symmetric
(
padding
[
i
],
padding
[
i
+
n_dims
],
v
.
at
(
"stride"
)[
i
].
to
<
int64_t
>
(),
s_start
[
i
]);
padding
[
i
],
padding
[
i
+
n_dims
],
v
.
at
(
"stride"
)[
i
].
to
<
int64_t
>
(),
s_start
[
i
]);
...
@@ -122,7 +122,7 @@ void check_asym_padding(const onnx_parser::node_info& info,
...
@@ -122,7 +122,7 @@ void check_asym_padding(const onnx_parser::node_info& info,
int
count_include_pad
,
int
count_include_pad
,
float
pad_val
)
float
pad_val
)
{
{
size_
t
pad_ndims
=
padding
.
size
()
/
2
;
in
t
pad_ndims
=
padding
.
size
()
/
2
;
auto
left_pad_it
=
padding
.
begin
();
auto
left_pad_it
=
padding
.
begin
();
auto
right_pad_it
=
left_pad_it
+
pad_ndims
;
auto
right_pad_it
=
left_pad_it
+
pad_ndims
;
...
@@ -134,18 +134,18 @@ void check_asym_padding(const onnx_parser::node_info& info,
...
@@ -134,18 +134,18 @@ void check_asym_padding(const onnx_parser::node_info& info,
// add right pads
// add right pads
asym_pads
.
insert
(
asym_pads
.
begin
()
+
pad_ndims
+
4
,
right_pad_it
,
padding
.
end
());
asym_pads
.
insert
(
asym_pads
.
begin
()
+
pad_ndims
+
4
,
right_pad_it
,
padding
.
end
());
ins
=
info
.
add_instruction
(
make_op
(
"pad"
,
{{
"pads"
,
asym_pads
},
{
"value"
,
pad_val
}}),
ins
);
ins
=
info
.
add_instruction
(
make_op
(
"pad"
,
{{
"pads"
,
asym_pads
},
{
"value"
,
pad_val
}}),
ins
);
std
::
vector
<
size_
t
>
new_padding
(
padding
.
size
());
std
::
vector
<
in
t
>
new_padding
(
padding
.
size
());
// subtract asym padding originally found from parsing the operator
// subtract asym padding originally found from parsing the operator
std
::
transform
(
padding
.
begin
(),
std
::
transform
(
padding
.
begin
(),
left_pad_it
,
left_pad_it
,
asym_pads
.
begin
()
+
2
,
asym_pads
.
begin
()
+
2
,
new_padding
.
begin
(),
new_padding
.
begin
(),
std
::
minus
<
size_
t
>
());
std
::
minus
<
in
t
>
());
std
::
transform
(
right_pad_it
,
std
::
transform
(
right_pad_it
,
padding
.
end
(),
padding
.
end
(),
asym_pads
.
begin
()
+
pad_ndims
+
4
,
asym_pads
.
begin
()
+
pad_ndims
+
4
,
new_padding
.
begin
()
+
pad_ndims
,
new_padding
.
begin
()
+
pad_ndims
,
std
::
minus
<
size_
t
>
());
std
::
minus
<
in
t
>
());
v
[
"padding"
]
=
new_padding
;
v
[
"padding"
]
=
new_padding
;
}
}
}
}
...
...
src/onnx/parse_convolution.cpp
View file @
edc23800
...
@@ -60,11 +60,11 @@ struct parse_convolution : op_parser<parse_convolution>
...
@@ -60,11 +60,11 @@ struct parse_convolution : op_parser<parse_convolution>
if
(
contains
(
info
.
attributes
,
"auto_pad"
))
if
(
contains
(
info
.
attributes
,
"auto_pad"
))
{
{
auto
weight_lens
=
weights
->
get_shape
().
lens
();
auto
weight_lens
=
weights
->
get_shape
().
lens
();
std
::
vector
<
std
::
size_
t
>
k_lens
(
weight_lens
.
begin
()
+
2
,
weight_lens
.
end
());
std
::
vector
<
in
t
>
k_lens
(
weight_lens
.
begin
()
+
2
,
weight_lens
.
end
());
cal_auto_padding_size
(
info
,
cal_auto_padding_size
(
info
,
values
,
values
,
k_lens
,
k_lens
,
values
[
"dilation"
].
to_vector
<
std
::
size_
t
>
(),
values
[
"dilation"
].
to_vector
<
in
t
>
(),
in_lens
,
in_lens
,
padding
);
padding
);
auto
auto_pad
=
info
.
attributes
[
"auto_pad"
].
s
();
auto
auto_pad
=
info
.
attributes
[
"auto_pad"
].
s
();
...
@@ -73,7 +73,7 @@ struct parse_convolution : op_parser<parse_convolution>
...
@@ -73,7 +73,7 @@ struct parse_convolution : op_parser<parse_convolution>
values
[
"padding_mode"
]
=
to_value
(
op
::
padding_mode_t
::
same
);
values
[
"padding_mode"
]
=
to_value
(
op
::
padding_mode_t
::
same
);
}
}
}
}
values
[
"padding"
]
=
std
::
vector
<
size_
t
>
(
padding
.
begin
(),
padding
.
end
());
values
[
"padding"
]
=
std
::
vector
<
in
t
>
(
padding
.
begin
(),
padding
.
end
());
if
(
contains
(
info
.
attributes
,
"group"
))
if
(
contains
(
info
.
attributes
,
"group"
))
{
{
...
...
src/onnx/parse_expand.cpp
View file @
edc23800
...
@@ -21,7 +21,7 @@ struct parse_expand : op_parser<parse_expand>
...
@@ -21,7 +21,7 @@ struct parse_expand : op_parser<parse_expand>
auto
in_lens
=
args
[
0
]
->
get_shape
().
lens
();
auto
in_lens
=
args
[
0
]
->
get_shape
().
lens
();
migraphx
::
argument
arg_s
=
args
[
1
]
->
eval
();
migraphx
::
argument
arg_s
=
args
[
1
]
->
eval
();
check_arg_empty
(
arg_s
,
"Expand: dynamic shape is not supported"
);
check_arg_empty
(
arg_s
,
"Expand: dynamic shape is not supported"
);
std
::
vector
<
std
::
size_
t
>
dims
;
std
::
vector
<
in
t
>
dims
;
arg_s
.
visit
([
&
](
auto
input
)
{
dims
.
assign
(
input
.
begin
(),
input
.
end
());
});
arg_s
.
visit
([
&
](
auto
input
)
{
dims
.
assign
(
input
.
begin
(),
input
.
end
());
});
auto
out_lens
=
compute_broadcasted_lens
(
in_lens
,
dims
);
auto
out_lens
=
compute_broadcasted_lens
(
in_lens
,
dims
);
return
info
.
add_instruction
(
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
out_lens
}}),
args
[
0
]);
return
info
.
add_instruction
(
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
out_lens
}}),
args
[
0
]);
...
...
Prev
1
2
3
4
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment