Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
34150e61
Commit
34150e61
authored
Aug 27, 2019
by
Khalique
Browse files
add decorators
parents
8ded3a31
62cb3441
Changes
12
Hide whitespace changes
Inline
Side-by-side
Showing
12 changed files
with
570 additions
and
573 deletions
+570
-573
src/onnx/onnx.cpp
src/onnx/onnx.cpp
+30
-7
src/quantization.cpp
src/quantization.cpp
+2
-1
src/targets/gpu/quant_gemm.cpp
src/targets/gpu/quant_gemm.cpp
+30
-75
src/tf/tf.cpp
src/tf/tf.cpp
+89
-12
test/onnx/gen_onnx.py
test/onnx/gen_onnx.py
+204
-321
test/onnx/onnx_test.cpp
test/onnx/onnx_test.cpp
+27
-0
test/onnx/transpose_gather_test.onnx
test/onnx/transpose_gather_test.onnx
+0
-0
test/tf/assert_less_equal_test.pb
test/tf/assert_less_equal_test.pb
+0
-0
test/tf/gen_tf_pb.py
test/tf/gen_tf_pb.py
+115
-142
test/tf/onehot_test.pb
test/tf/onehot_test.pb
+0
-0
test/tf/stridedslice_masks_test.pb
test/tf/stridedslice_masks_test.pb
+0
-0
test/tf/tf_test.cpp
test/tf/tf_test.cpp
+73
-15
No files found.
src/onnx/onnx.cpp
View file @
34150e61
...
...
@@ -206,6 +206,16 @@ struct onnx_parser
return
out_lens
;
}
instruction_ref
make_contiguous
(
instruction_ref
ins
)
{
if
(
ins
->
get_shape
().
standard
())
{
return
ins
;
}
return
prog
.
add_instruction
(
op
::
contiguous
{},
ins
);
}
template
<
class
T
>
instruction_ref
add_broadcastable_binary_op
(
instruction_ref
arg0
,
instruction_ref
arg1
,
T
x
)
{
...
...
@@ -441,12 +451,7 @@ struct onnx_parser
s
.
visit
([
&
](
auto
v
)
{
copy
(
v
,
std
::
back_inserter
(
op
.
dims
));
});
}
if
(
!
args
[
0
]
->
get_shape
().
standard
())
{
args
[
0
]
=
prog
.
add_instruction
(
op
::
contiguous
{},
args
[
0
]);
}
return
prog
.
add_instruction
(
op
,
args
[
0
]);
return
prog
.
add_instruction
(
op
,
make_contiguous
(
args
[
0
]));
}
instruction_ref
...
...
@@ -494,23 +499,41 @@ struct onnx_parser
{
axis
=
parse_value
(
attributes
.
at
(
"axis"
)).
at
<
int
>
();
}
op
::
gather
op
{
axis
};
return
prog
.
add_instruction
(
op
,
std
::
move
(
args
));
return
prog
.
add_instruction
(
op
,
make_contiguous
(
args
[
0
]),
make_contiguous
(
args
[
1
]
));
}
instruction_ref
parse_slice
(
const
std
::
string
&
,
attribute_map
attributes
,
std
::
vector
<
instruction_ref
>
args
)
{
op
::
slice
op
;
std
::
vector
<
size_t
>
dims
=
args
[
0
]
->
get_shape
().
lens
();
size_t
num_dims
=
dims
.
size
();
if
(
contains
(
attributes
,
"axes"
))
{
literal
s
=
parse_value
(
attributes
.
at
(
"axes"
));
s
.
visit
([
&
](
auto
v
)
{
copy
(
v
,
std
::
back_inserter
(
op
.
axes
));
});
}
else
{
op
.
axes
=
std
::
vector
<
int64_t
>
(
num_dims
);
std
::
iota
(
op
.
axes
.
begin
(),
op
.
axes
.
end
(),
0
);
}
if
(
contains
(
attributes
,
"ends"
))
{
literal
s
=
parse_value
(
attributes
.
at
(
"ends"
));
s
.
visit
([
&
](
auto
v
)
{
copy
(
v
,
std
::
back_inserter
(
op
.
ends
));
});
for
(
size_t
i
=
0
;
i
<
num_dims
;
i
++
)
{
if
(
static_cast
<
size_t
>
(
op
.
ends
[
i
])
>
dims
[
i
])
{
op
.
ends
[
i
]
=
dims
[
i
];
}
}
}
if
(
contains
(
attributes
,
"starts"
))
{
literal
s
=
parse_value
(
attributes
.
at
(
"starts"
));
s
.
visit
([
&
](
auto
v
)
{
copy
(
v
,
std
::
back_inserter
(
op
.
starts
));
});
...
...
src/quantization.cpp
View file @
34150e61
...
...
@@ -74,7 +74,8 @@ void quantize(program& prog, const std::vector<std::string>& ins_names)
// if the input is a convert operator, uses its input
// as its current input
instruction_ref
input_fp16
{};
if
(
input
->
name
()
==
"convert"
)
if
(
input
->
name
()
==
"convert"
and
input
->
inputs
().
front
()
->
get_shape
().
type
()
==
shape
::
half_type
)
{
input_fp16
=
input
->
inputs
().
front
();
}
...
...
src/targets/gpu/quant_gemm.cpp
View file @
34150e61
...
...
@@ -8,51 +8,6 @@ namespace migraphx {
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
template
<
class
...
Ts
>
rocblas_status
generic_rocblas_gemm_ex
(
Ts
&&
...
xs
)
{
return
rocblas_gemm_ex
(
std
::
forward
<
Ts
>
(
xs
)...);
}
template
<
class
...
Ts
>
rocblas_status
generic_rocblas_batched_gemm_ex
(
Ts
&&
...
xs
)
{
return
rocblas_gemm_strided_batched_ex
(
std
::
forward
<
Ts
>
(
xs
)...);
}
template
<
class
T
>
struct
compute_rocblas_type
{
using
type
=
T
;
};
template
<
class
T
>
struct
compute_rocblas_type
<
const
T
>
{
using
type
=
const
typename
compute_rocblas_type
<
T
>::
type
;
};
template
<
>
struct
compute_rocblas_type
<
half
>
{
using
type
=
rocblas_half
;
};
template
<
class
T
>
using
rb_type
=
typename
compute_rocblas_type
<
T
>::
type
;
template
<
class
T
>
rb_type
<
T
>
to_rocblas_type
(
T
x
)
{
return
reinterpret_cast
<
const
rb_type
<
T
>&>
(
x
);
}
template
<
class
T
>
rb_type
<
T
>*
to_rocblas_type
(
T
*
x
)
{
return
reinterpret_cast
<
rb_type
<
T
>*>
(
x
);
}
shape
rocblas_quant_gemm
::
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
std
::
vector
<
shape
>
in_shapes
(
inputs
);
...
...
@@ -102,13 +57,13 @@ argument rocblas_quant_gemm::compute(context& ctx,
auto
a_lens
=
args
[
0
].
get_shape
().
lens
();
auto
b_lens
=
args
[
1
].
get_shape
().
lens
();
output_shape
.
visit_type
([
&
](
auto
as
)
{
auto
alpha_r
=
to_rocblas_type
(
as
(
op
.
alpha
)
)
;
auto
beta_r
=
to_rocblas_type
(
as
(
beta
)
)
;
auto
alpha_r
=
as
(
op
.
alpha
);
auto
beta_r
=
as
(
beta
);
auto
out_lens
=
output_shape
.
lens
();
rocblas_int
m
=
out_lens
[
dim_0
];
rocblas_int
n
=
out_lens
[
dim_1
];
rocblas_int
k
=
args
[
0
].
get_shape
().
lens
()[
dim_1
];
auto
to_pointer
=
[
&
](
auto
&&
arg
)
{
return
to_rocblas_type
(
as
.
from
(
arg
.
data
())
)
;
};
auto
to_pointer
=
[
&
](
auto
&&
arg
)
{
return
as
.
from
(
arg
.
data
());
};
assert
(
k
%
4
==
0
);
auto
num_matrices
=
std
::
accumulate
(
...
...
@@ -119,36 +74,36 @@ argument rocblas_quant_gemm::compute(context& ctx,
// column-major format. When doing a C = A * B, we actually do
// C^T = (B^T) * (A^T). That is the reason we input args[1] as
// A and args[0] as B in calling the rocblas_gemm.
generic_
rocblas_gemm_ex
(
ctx
.
get_stream
().
get_rocblas
(),
transb
?
rocblas_operation_transpose
:
rocblas_operation_none
,
transa
?
rocblas_operation_transpose
:
rocblas_operation_none
,
n
,
m
,
k
,
&
alpha_r
,
to_pointer
(
args
.
at
(
1
)),
rocblas_datatype_i8_r
,
ldb
,
to_pointer
(
args
.
at
(
0
)),
rocblas_datatype_i8_r
,
lda
,
&
beta_r
,
to_pointer
(
args
[
2
]),
rocblas_datatype_i32_r
,
ldc
,
is_3inputs
?
to_pointer
(
args
[
3
])
:
to_pointer
(
args
[
2
]),
rocblas_datatype_i32_r
,
ldc
,
rocblas_datatype_i32_r
,
rocblas_gemm_algo_standard
,
0
,
0
,
nullptr
,
nullptr
);
rocblas_gemm_ex
(
ctx
.
get_stream
().
get_rocblas
(),
transb
?
rocblas_operation_transpose
:
rocblas_operation_none
,
transa
?
rocblas_operation_transpose
:
rocblas_operation_none
,
n
,
m
,
k
,
&
alpha_r
,
to_pointer
(
args
.
at
(
1
)),
rocblas_datatype_i8_r
,
ldb
,
to_pointer
(
args
.
at
(
0
)),
rocblas_datatype_i8_r
,
lda
,
&
beta_r
,
to_pointer
(
args
[
2
]),
rocblas_datatype_i32_r
,
ldc
,
is_3inputs
?
to_pointer
(
args
[
3
])
:
to_pointer
(
args
[
2
]),
rocblas_datatype_i32_r
,
ldc
,
rocblas_datatype_i32_r
,
rocblas_gemm_algo_standard
,
0
,
0
,
nullptr
,
nullptr
);
}
else
{
generic_rocblas
_batched_
gemm_
ex
(
rocblas_gemm_strided
_batched_ex
(
ctx
.
get_stream
().
get_rocblas
(),
transb
?
rocblas_operation_transpose
:
rocblas_operation_none
,
transa
?
rocblas_operation_transpose
:
rocblas_operation_none
,
...
...
src/tf/tf.cpp
View file @
34150e61
...
...
@@ -26,7 +26,6 @@ struct tf_parser
{
using
attribute_map
=
std
::
unordered_map
<
std
::
string
,
tensorflow
::
AttrValue
>
;
using
node_map
=
std
::
map
<
std
::
string
,
tensorflow
::
NodeDef
>
;
// using input_node_map = std::unordered_map<std::string, std::unordered_set<std::string>>;
using
op_func
=
std
::
function
<
instruction_ref
(
attribute_map
,
std
::
vector
<
instruction_ref
>
)
>
;
node_map
nodes
;
...
...
@@ -149,9 +148,26 @@ struct tf_parser
return
axes
;
}
std
::
vector
<
int64_t
>
get_axes_from_mask
(
const
size_t
num_axes
,
const
uint32_t
mask
)
{
uint32_t
bitwise_compare
=
1
;
std
::
vector
<
int64_t
>
axes
;
for
(
size_t
i
=
0
;
i
<
num_axes
;
i
++
)
{
// the LSB corresponds to axis 0 when determining which axes to begin
if
(((
mask
>>
i
)
&
bitwise_compare
)
==
1
)
axes
.
push_back
(
1
);
else
axes
.
push_back
(
0
);
}
return
axes
;
}
tf_parser
()
{
add_generic_op
(
"All"
,
op
::
identity
{});
add_generic_op
(
"Identity"
,
op
::
identity
{});
add_generic_op
(
"LessEqual"
,
op
::
identity
{});
add_generic_op
(
"Relu"
,
op
::
relu
{});
add_generic_op
(
"Relu6"
,
op
::
clip
{
6.0
,
0.0
});
add_generic_op
(
"Rsqrt"
,
op
::
rsqrt
{});
...
...
@@ -166,6 +182,7 @@ struct tf_parser
add_mem_op
(
"AvgPool"
,
&
tf_parser
::
parse_pooling
);
add_mem_op
(
"BatchMatMul"
,
&
tf_parser
::
parse_matmul
,
false
);
add_mem_op
(
"BatchMatMulV2"
,
&
tf_parser
::
parse_matmul
,
false
);
add_mem_op
(
"BiasAdd"
,
&
tf_parser
::
parse_biasadd
);
add_mem_op
(
"Cast"
,
&
tf_parser
::
parse_cast
,
false
);
add_mem_op
(
"ConcatV2"
,
&
tf_parser
::
parse_concat
,
false
);
...
...
@@ -177,14 +194,15 @@ struct tf_parser
add_mem_op
(
"GatherV2"
,
&
tf_parser
::
parse_gather
,
false
);
add_mem_op
(
"MatMul"
,
&
tf_parser
::
parse_matmul
,
false
);
add_mem_op
(
"MaxPool"
,
&
tf_parser
::
parse_pooling
);
add_mem_op
(
"Mean"
,
&
tf_parser
::
parse_mean
);
add_mem_op
(
"Mean"
,
&
tf_parser
::
parse_mean
,
false
);
add_mem_op
(
"OneHot"
,
&
tf_parser
::
parse_onehot
,
false
);
add_mem_op
(
"Pack"
,
&
tf_parser
::
parse_pack
,
false
);
add_mem_op
(
"Pad"
,
&
tf_parser
::
parse_pad
);
add_mem_op
(
"Reshape"
,
&
tf_parser
::
parse_reshape
,
false
);
add_mem_op
(
"Slice"
,
&
tf_parser
::
parse_slice
,
false
);
add_mem_op
(
"Softmax"
,
&
tf_parser
::
parse_softmax
<
op
::
softmax
>
);
add_mem_op
(
"Softmax"
,
&
tf_parser
::
parse_softmax
<
op
::
softmax
>
,
false
);
add_mem_op
(
"Squeeze"
,
&
tf_parser
::
parse_squeeze
,
false
);
add_mem_op
(
"StridedSlice"
,
&
tf_parser
::
parse_stridedslice
);
add_mem_op
(
"StridedSlice"
,
&
tf_parser
::
parse_stridedslice
,
false
);
add_mem_op
(
"Transpose"
,
&
tf_parser
::
parse_transpose
,
false
);
}
...
...
@@ -547,7 +565,7 @@ struct tf_parser
}
if
(
contains
(
attributes
,
"transpose_b"
))
{
transb
=
attributes
.
at
(
"transpose_
a
"
).
b
();
transb
=
attributes
.
at
(
"transpose_
b
"
).
b
();
}
if
(
contains
(
attributes
,
"adj_x"
))
...
...
@@ -574,8 +592,7 @@ struct tf_parser
parse_mean
(
const
std
::
string
&
,
attribute_map
attributes
,
std
::
vector
<
instruction_ref
>
args
)
{
bool
keep_dims
=
attributes
.
at
(
"keep_dims"
).
b
();
auto
lens
=
args
[
0
]
->
get_shape
().
lens
();
auto
axes
=
parse_axes
(
args
[
1
]
->
eval
().
get
<
int32_t
>
().
to_vector
<
int64_t
>
(),
lens
.
size
());
auto
axes
=
args
[
1
]
->
eval
().
get
<
int32_t
>
().
to_vector
<
int64_t
>
();
if
(
keep_dims
)
{
...
...
@@ -588,6 +605,32 @@ struct tf_parser
}
}
instruction_ref
parse_onehot
(
const
std
::
string
&
,
attribute_map
attributes
,
std
::
vector
<
instruction_ref
>
args
)
{
size_t
depth
=
static_cast
<
size_t
>
(
args
[
1
]
->
eval
().
at
<
int32_t
>
());
int64_t
axis
=
-
1
;
float
on_value
=
args
[
2
]
->
eval
().
at
<
float
>
();
float
off_value
=
args
[
3
]
->
eval
().
at
<
float
>
();
std
::
vector
<
float
>
depth_input
(
depth
*
depth
,
off_value
);
for
(
int
i
=
0
;
i
<
depth
;
i
++
)
{
depth_input
[
depth
*
i
+
i
]
=
on_value
;
}
if
(
contains
(
attributes
,
"axis"
))
axis
=
attributes
.
at
(
"axis"
).
i
();
if
(
axis
==
-
1
)
{
shape
s
{
shape
::
float_type
,
{
depth
,
depth
}};
auto
l0
=
prog
.
add_literal
({
s
,
depth_input
});
return
prog
.
add_instruction
(
op
::
gather
{
0
},
{
l0
,
args
[
0
]});
}
MIGRAPHX_THROW
(
"MIGraphX does not support axis != -1"
);
}
instruction_ref
parse_pack
(
const
std
::
string
&
,
const
attribute_map
&
attributes
,
std
::
vector
<
instruction_ref
>
args
)
...
...
@@ -799,21 +842,50 @@ struct tf_parser
std
::
vector
<
instruction_ref
>
args
)
{
op
::
slice
op
;
auto
starts
=
args
[
1
]
->
eval
().
get
<
int32_t
>
().
to_vector
();
auto
ends
=
args
[
2
]
->
eval
().
get
<
int32_t
>
().
to_vector
();
size_t
num_axes
=
args
[
0
]
->
get_shape
().
lens
().
size
();
auto
starts
=
args
[
1
]
->
eval
().
get
<
int32_t
>
().
to_vector
();
auto
ends
=
args
[
2
]
->
eval
().
get
<
int32_t
>
().
to_vector
();
auto
l0
=
args
[
0
];
size_t
num_axes
=
l0
->
get_shape
().
lens
().
size
();
std
::
vector
<
size_t
>
axes
=
l0
->
get_shape
().
lens
();
op
.
starts
=
std
::
vector
<
int64_t
>
(
starts
.
begin
(),
starts
.
end
());
op
.
ends
=
std
::
vector
<
int64_t
>
(
ends
.
begin
(),
ends
.
end
());
op
.
axes
=
std
::
vector
<
int64_t
>
(
num_axes
);
std
::
iota
(
op
.
axes
.
begin
(),
op
.
axes
.
end
(),
0
);
uint32_t
begin_mask
=
0
;
uint32_t
end_mask
=
0
;
uint32_t
shrink_axis_mask
=
0
;
uint32_t
bitwise_compare
=
1
;
std
::
vector
<
int64_t
>
squeeze_axes
;
if
(
contains
(
attributes
,
"begin_mask"
))
begin_mask
=
static_cast
<
uint32_t
>
(
attributes
.
at
(
"begin_mask"
).
i
());
if
(
contains
(
attributes
,
"end_mask"
))
end_mask
=
static_cast
<
uint32_t
>
(
attributes
.
at
(
"end_mask"
).
i
());
if
(
contains
(
attributes
,
"shrink_axis_mask"
))
shrink_axis_mask
=
static_cast
<
uint32_t
>
(
attributes
.
at
(
"shrink_axis_mask"
).
i
());
std
::
vector
<
int64_t
>
begin_axes
=
get_axes_from_mask
(
num_axes
,
begin_mask
);
std
::
vector
<
int64_t
>
end_axes
=
get_axes_from_mask
(
num_axes
,
end_mask
);
for
(
size_t
i
=
0
;
i
<
num_axes
;
i
++
)
{
if
(
begin_axes
.
at
(
i
)
==
1
)
{
op
.
starts
.
at
(
i
)
=
0
;
}
if
(
end_axes
.
at
(
i
)
==
1
)
{
op
.
ends
.
at
(
i
)
=
axes
.
at
(
i
);
}
}
auto
l1
=
prog
.
add_instruction
(
op
,
l0
);
if
(
shrink_axis_mask
==
0
)
return
l1
;
for
(
size_t
i
=
0
;
i
<
num_axes
;
i
++
)
{
// the LSB corresponds to axis 0 when determining which axes to squeeze
...
...
@@ -821,8 +893,7 @@ struct tf_parser
squeeze_axes
.
push_back
(
i
);
}
auto
l0
=
prog
.
add_instruction
(
op
,
make_contiguous
(
args
[
0
]));
return
to_nhwc
(
prog
.
add_instruction
(
op
::
squeeze
{
squeeze_axes
},
l0
));
return
prog
.
add_instruction
(
op
::
squeeze
{
squeeze_axes
},
l1
);
}
instruction_ref
...
...
@@ -862,10 +933,16 @@ struct tf_parser
if
(
instructions
.
count
(
name
)
==
0
)
{
auto
&&
node
=
nodes
.
at
(
name
);
// assert ops ignored
if
(
node
.
op
()
==
"Assert"
or
contains
(
name
,
"Assert"
))
return
;
std
::
vector
<
instruction_ref
>
args
;
for
(
auto
&&
input
:
node
.
input
())
{
// control dependencies (signified by ^ before the name) are ignored
if
(
contains
(
input
,
"^"
))
continue
;
if
(
nodes
.
count
(
input
)
>
0
)
{
auto
&&
iname
=
get_name
(
nodes
.
at
(
input
));
...
...
test/onnx/gen_onnx.py
View file @
34150e61
...
...
@@ -4,6 +4,13 @@ from onnx import helper
from
onnx
import
numpy_helper
from
onnx
import
AttributeProto
,
TensorProto
,
GraphProto
def
onnx_test
(
op_test
):
def
run_test
():
model_def
=
helper
.
make_model
(
op_test
(),
producer_name
=
op_test
.
__name__
)
onnx
.
save
(
model_def
,
'{}.onnx'
.
format
(
op_test
.
__name__
))
return
run_test
@
onnx_test
def
acos_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
10
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
10
])
...
...
@@ -14,16 +21,14 @@ def acos_test():
outputs
=
[
'y'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_acos'
,
[
x
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'acos-example'
)
onnx
.
save
(
model_def
,
'onnx_acos.onnx'
)
@
onnx_test
def
add_bcast_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
2
,
3
,
4
,
5
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
...
...
@@ -37,16 +42,14 @@ def add_bcast_test():
outputs
=
[
'2'
]
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test-add_bcast'
,
[
x
,
y
],
[
z
]
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'add_bcast-example'
)
onnx
.
save
(
model_def
,
'add_bcast_test.onnx'
)
@
onnx_test
def
add_fp16_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT16
,
[
1
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT16
,
[
1
])
...
...
@@ -58,7 +61,7 @@ def add_fp16_test():
outputs
=
[
'2'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test-add-fp16'
,
[
x
,
y
],
...
...
@@ -71,6 +74,7 @@ def add_fp16_test():
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
(
'add-fp16-example'
))
onnx
.
save
(
model_def
,
'add_fp16_test.onnx'
)
@
onnx_test
def
add_scalar_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
2
,
3
,
4
,
5
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[])
...
...
@@ -82,7 +86,7 @@ def add_scalar_test():
outputs
=
[
'2'
]
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test-add-scalar'
,
[
x
,
y
],
...
...
@@ -90,9 +94,7 @@ def add_scalar_test():
initializer
=
[
helper
.
make_tensor
(
'1'
,
TensorProto
.
FLOAT
,
[],
[
1
])]
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'add_scalar-example'
)
onnx
.
save
(
model_def
,
'add_scalar_test.onnx'
)
@
onnx_test
def
argmax_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
3
,
4
,
5
,
6
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
3
,
4
,
6
])
...
...
@@ -106,16 +108,14 @@ def argmax_test():
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_argmax'
,
[
x
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'argmax-example'
)
onnx
.
save
(
model_def
,
'argmax_test.onnx'
)
@
onnx_test
def
argmin_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
3
,
4
,
5
,
6
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
3
,
4
,
5
])
...
...
@@ -128,16 +128,14 @@ def argmin_test():
keepdims
=
0
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_argmin'
,
[
x
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'argmin-example'
)
onnx
.
save
(
model_def
,
'argmin_test.onnx'
)
@
onnx_test
def
asin_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
10
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
10
])
...
...
@@ -148,16 +146,14 @@ def asin_test():
outputs
=
[
'y'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_asin'
,
[
x
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'asin-example'
)
onnx
.
save
(
model_def
,
'asin_test.onnx'
)
@
onnx_test
def
atan_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
10
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
10
])
...
...
@@ -168,16 +164,14 @@ def atan_test():
outputs
=
[
'y'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_atan'
,
[
x
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'atan-example'
)
onnx
.
save
(
model_def
,
'atan_test.onnx'
)
@
onnx_test
def
cast_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT16
,
[
10
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
10
])
...
...
@@ -189,16 +183,14 @@ def cast_test():
to
=
1
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_cast'
,
[
x
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'cast-example'
)
onnx
.
save
(
model_def
,
'cast_test.onnx'
)
@
onnx_test
def
clip_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
3
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
3
])
...
...
@@ -212,16 +204,14 @@ def clip_test():
min
=
0.0
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test-model'
,
[
x
],
[
y
]
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'clip-example'
)
onnx
.
save
(
model_def
,
'clip_test.onnx'
)
@
onnx_test
def
concat_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
2
,
4
,
3
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
7
,
4
,
3
])
...
...
@@ -234,16 +224,14 @@ def concat_test():
outputs
=
[
'2'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test-concat'
,
[
x
,
y
],
[
z
]
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'concat-example'
)
onnx
.
save
(
model_def
,
'concat_test.onnx'
)
@
onnx_test
def
constant_test
():
x
=
np
.
array
([
0
,
1
,
2
])
y
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
3
])
...
...
@@ -260,16 +248,14 @@ def constant_test():
),
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test-constant'
,
[],
[
y
]
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
(
'constant-example'
))
onnx
.
save
(
model_def
,
'constant_test.onnx'
)
@
onnx_test
def
constant_fill_test
():
value
=
helper
.
make_tensor_value_info
(
'value'
,
TensorProto
.
FLOAT
,
[
2
,
3
])
...
...
@@ -283,16 +269,14 @@ def constant_fill_test():
input_as_shape
=
0
,
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'constant_fill'
,
[],
[
value
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'constant-fill-example'
)
onnx
.
save
(
model_def
,
'constant_fill_test.onnx'
)
@
onnx_test
def
constant_fill_input_as_shape_test
():
np_shape
=
np
.
array
([
2
,
3
])
shape
=
helper
.
make_tensor_value_info
(
'shape'
,
TensorProto
.
INT32
,
[
2
])
...
...
@@ -321,16 +305,14 @@ def constant_fill_input_as_shape_test():
input_as_shape
=
1
,
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
const_shape_node
,
node
],
'constant_fill'
,
[],
[
value
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'constant-fill-example'
)
onnx
.
save
(
model_def
,
'constant_fill_input_as_shape_test.onnx'
)
@
onnx_test
def
constant_scalar_test
():
x
=
np
.
array
([
1
])
y
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
])
...
...
@@ -347,16 +329,14 @@ def constant_scalar_test():
),
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test-constant'
,
[],
[
y
]
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
(
'constant-scalar-example'
))
onnx
.
save
(
model_def
,
'constant_scalar_test.onnx'
)
@
onnx_test
def
const_of_shape_empty_input_test
():
tensor_val
=
onnx
.
helper
.
make_tensor
(
'value'
,
...
...
@@ -385,16 +365,14 @@ def const_of_shape_empty_input_test():
value
=
tensor_val
,
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
shape_const
,
node
],
'constant_of_shape'
,
[],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'constant-of-shape'
)
onnx
.
save
(
model_def
,
'const_of_shape_empty_input_test.onnx'
)
@
onnx_test
def
const_of_shape_float_test
():
tensor_val
=
onnx
.
helper
.
make_tensor
(
'value'
,
...
...
@@ -423,16 +401,14 @@ def const_of_shape_float_test():
value
=
tensor_val
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
shape_const
,
node
],
'constant_of_shape'
,
[],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'constant-of-shape'
)
onnx
.
save
(
model_def
,
'const_of_shape_float_test.onnx'
)
@
onnx_test
def
const_of_shape_int64_test
():
tensor_val
=
onnx
.
helper
.
make_tensor
(
'value'
,
...
...
@@ -460,16 +436,14 @@ def const_of_shape_int64_test():
value
=
tensor_val
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
shape_const
,
node
],
'constant_of_shape'
,
[],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'constant-of-shape'
)
onnx
.
save
(
model_def
,
'const_of_shape_int64_test.onnx'
)
@
onnx_test
def
const_of_shape_no_value_attr_test
():
shape_val
=
np
.
array
([
2
,
3
,
4
]).
astype
(
np
.
int64
)
shape_ts
=
helper
.
make_tensor
(
...
...
@@ -492,16 +466,14 @@ def const_of_shape_no_value_attr_test():
outputs
=
[
'y'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
shape_const
,
node
],
'constant_of_shape'
,
[],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'constant-of-shape'
)
onnx
.
save
(
model_def
,
'const_of_shape_no_value_attr_test.onnx'
)
@
onnx_test
def
conv_autopad_fail_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
32
,
32
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
1
,
1
])
...
...
@@ -517,16 +489,14 @@ def conv_autopad_fail_test():
pads
=
[
0
,
0
,
1
,
1
,
0
,
0
,
1
,
1
]
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_conv'
,
[
x
,
y
],
[
out
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'conv-example'
)
onnx
.
save
(
model_def
,
'conv_autopad_fail_test.onnx'
)
@
onnx_test
def
conv_bias_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
32
,
32
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
5
,
5
])
...
...
@@ -541,16 +511,14 @@ def conv_bias_test():
strides
=
[
1
,
1
]
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_conv'
,
[
x
,
y
,
z
],
[
out
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'conv-example'
)
onnx
.
save
(
model_def
,
'conv_bias_test.onnx'
)
@
onnx_test
def
conv_bn_relu_maxpool_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
32
,
32
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
5
,
5
])
...
...
@@ -592,16 +560,14 @@ def conv_bn_relu_maxpool_test():
kernel_shape
=
[
2
,
2
]
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node0
,
node1
,
node2
,
node3
],
'test_conv_bn_relu'
,
[
x
,
y
,
z
,
m
,
n
,
k
,
l
],
[
out
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'conv_relu-example'
)
onnx
.
save
(
model_def
,
'conv_bn_relu_maxpool_test.onnx'
)
@
onnx_test
def
conv_relu_maxpool_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
32
,
32
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
5
,
5
])
...
...
@@ -632,16 +598,14 @@ def conv_relu_maxpool_test():
kernel_shape
=
[
2
,
2
]
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node1
,
node2
,
node3
],
'test_conv_relu'
,
[
x
,
y
,
z
],
[
out
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'conv_relu-example'
)
onnx
.
save
(
model_def
,
'conv_relu_maxpool_test.onnx'
)
@
onnx_test
def
conv_relu_maxpool_x2_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
32
,
32
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
5
,
3
,
5
,
5
])
...
...
@@ -698,16 +662,14 @@ def conv_relu_maxpool_x2_test():
kernel_shape
=
[
2
,
2
]
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node1
,
node2
,
node3
,
node4
,
node5
,
node6
],
'test_conv_relu2'
,
[
x
,
y
,
z
,
m
,
n
],
[
out
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'conv_relu-example'
)
onnx
.
save
(
model_def
,
'conv_relu_maxpool_x2_test.onnx'
)
@
onnx_test
def
cos_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
10
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
10
])
...
...
@@ -718,16 +680,14 @@ def cos_test():
outputs
=
[
'y'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_cos'
,
[
x
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'cos-example'
)
onnx
.
save
(
model_def
,
'cos_test.onnx'
)
@
onnx_test
def
cosh_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
1
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
1
])
...
...
@@ -738,16 +698,14 @@ def cosh_test():
outputs
=
[
'y'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_cosh'
,
[
x
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'cosh-example'
)
onnx
.
save
(
model_def
,
'cosh_test.onnx'
)
@
onnx_test
def
dropout_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
2
,
2
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
2
,
2
])
...
...
@@ -758,16 +716,14 @@ def dropout_test():
outputs
=
[
'1'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test-dropout'
,
[
x
],
[
y
]
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'dropout-example'
)
onnx
.
save
(
model_def
,
'dropout_test.onnx'
)
@
onnx_test
def
elu_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
3
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
3
])
...
...
@@ -779,16 +735,14 @@ def elu_test():
alpha
=
0.01
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test-model'
,
[
x
],
[
y
]
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'elu-example'
)
onnx
.
save
(
model_def
,
'elu_test.onnx'
)
@
onnx_test
def
erf_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
10
,
15
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
10
,
15
])
...
...
@@ -799,16 +753,14 @@ def erf_test():
outputs
=
[
'y'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_erf'
,
[
x
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'erf-example'
)
onnx
.
save
(
model_def
,
'erf_test.onnx'
)
@
onnx_test
def
exp_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
10
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
10
])
...
...
@@ -819,16 +771,14 @@ def exp_test():
outputs
=
[
'y'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_exp'
,
[
x
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'exp-example'
)
onnx
.
save
(
model_def
,
'exp_test.onnx'
)
@
onnx_test
def
expand_test
():
shape_val
=
np
.
array
([
2
,
3
,
4
,
5
]).
astype
(
np
.
int64
)
shape_ts
=
helper
.
make_tensor
(
...
...
@@ -852,16 +802,14 @@ def expand_test():
outputs
=
[
'y'
]
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
shape_const
,
node
],
'expand'
,
[
x
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'expand'
)
onnx
.
save
(
model_def
,
'expand_test.onnx'
)
@
onnx_test
def
flatten_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
2
,
3
,
4
,
5
])
y
=
helper
.
make_tensor_value_info
(
'2'
,
TensorProto
.
FLOAT
,
[
6
,
20
])
...
...
@@ -880,16 +828,14 @@ def flatten_test():
outputs
=
[
'3'
]
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
,
node2
],
'test-flatten'
,
[
x
],
[
y
,
y2
]
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
(
'flatten-example'
))
onnx
.
save
(
model_def
,
'flatten_test.onnx'
)
@
onnx_test
def
gather_test
():
x
=
helper
.
make_tensor_value_info
(
'data'
,
TensorProto
.
FLOAT
,
[
3
,
4
,
5
,
6
])
i
=
helper
.
make_tensor_value_info
(
'indices'
,
TensorProto
.
INT32
,
[
2
,
3
,
4
,
5
])
...
...
@@ -902,16 +848,14 @@ def gather_test():
axis
=
1
,
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_gather'
,
[
x
,
i
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'gather-example'
)
onnx
.
save
(
model_def
,
'gather_test.onnx'
)
@
onnx_test
def
gemm_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
5
,
7
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
11
,
5
])
...
...
@@ -928,16 +872,14 @@ def gemm_test():
transB
=
1
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test-gemm'
,
[
x
,
y
,
z
],
[
a
]
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
(
'gemm-example'
))
onnx
.
save
(
model_def
,
'gemm_test.onnx'
)
@
onnx_test
def
gemm_ex_test
():
m1
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
1
,
1
,
5
,
6
])
m2
=
helper
.
make_tensor_value_info
(
'2'
,
TensorProto
.
FLOAT
,
[
1
,
1
,
5
,
7
])
...
...
@@ -953,16 +895,14 @@ def gemm_ex_test():
transA
=
1
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_gemm_ex'
,
[
m1
,
m2
,
m3
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'gemm-example'
)
onnx
.
save
(
model_def
,
'gemm_ex_test.onnx'
)
@
onnx_test
def
gemm_ex_brcst_test
():
m1
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
1
,
1
,
5
,
6
])
m2
=
helper
.
make_tensor_value_info
(
'2'
,
TensorProto
.
FLOAT
,
[
1
,
1
,
5
,
7
])
...
...
@@ -978,16 +918,14 @@ def gemm_ex_brcst_test():
transA
=
1
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_gemm_ex'
,
[
m1
,
m2
,
m3
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'gemm-example'
)
onnx
.
save
(
model_def
,
'gemm_ex_brcst_test.onnx'
)
@
onnx_test
def
globalavgpool_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
16
,
16
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
1
,
1
])
...
...
@@ -998,16 +936,14 @@ def globalavgpool_test():
outputs
=
[
'1'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test-globalavgpool'
,
[
x
],
[
y
]
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'globalavgpool-example'
)
onnx
.
save
(
model_def
,
'globalavgpool_test.onnx'
)
@
onnx_test
def
globalmaxpool_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
16
,
16
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
1
,
1
])
...
...
@@ -1018,16 +954,14 @@ def globalmaxpool_test():
outputs
=
[
'1'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test-globalmaxpool'
,
[
x
],
[
y
]
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'globalmaxpool-example'
)
onnx
.
save
(
model_def
,
'globalmaxpool_test.onnx'
)
@
onnx_test
def
group_conv_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
4
,
16
,
16
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
4
,
1
,
3
,
3
])
...
...
@@ -1040,16 +974,14 @@ def group_conv_test():
outputs
=
[
'2'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test-group_conv'
,
[
x
,
y
],
[
z
]
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'group_conv-example'
)
onnx
.
save
(
model_def
,
'group_conv_test.onnx'
)
@
onnx_test
def
imagescaler_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
16
,
16
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
16
,
16
])
...
...
@@ -1062,16 +994,14 @@ def imagescaler_test():
scale
=
0.5
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test-imagescaler'
,
[
x
],
[
y
]
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'imagescaler-example'
)
onnx
.
save
(
model_def
,
'imagescaler_test.onnx'
)
@
onnx_test
def
implicit_add_bcast_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
2
,
3
,
4
,
5
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
3
,
4
,
1
])
...
...
@@ -1083,16 +1013,14 @@ def implicit_add_bcast_test():
outputs
=
[
'2'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test-multi_bcast'
,
[
x
,
y
],
[
z
]
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'implicit_bcast-example'
)
onnx
.
save
(
model_def
,
'implicit_add_bcast_test.onnx'
)
@
onnx_test
def
implicit_pow_bcast_test
():
arg0
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
2
,
3
,
4
,
5
])
arg1
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
3
,
4
,
1
])
...
...
@@ -1104,16 +1032,14 @@ def implicit_pow_bcast_test():
outputs
=
[
'out'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'pow_test'
,
[
arg0
,
arg1
],
[
arg_out
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'pow2'
)
onnx
.
save
(
model_def
,
'implicit_pow_bcast_test.onnx'
)
@
onnx_test
def
implicit_sub_bcast_test
():
arg0
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
2
,
3
,
4
,
5
])
arg1
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
4
,
5
])
...
...
@@ -1125,16 +1051,14 @@ def implicit_sub_bcast_test():
outputs
=
[
'out'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'subtraction2'
,
[
arg0
,
arg1
],
[
arg_out
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'add2'
)
onnx
.
save
(
model_def
,
'implicit_sub_bcast_test.onnx'
)
@
onnx_test
def
leaky_relu_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
3
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
3
])
...
...
@@ -1146,16 +1070,14 @@ def leaky_relu_test():
alpha
=
0.01
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test-model'
,
[
x
],
[
y
]
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'leaky_relu-example'
)
onnx
.
save
(
model_def
,
'leaky_relu_test.onnx'
)
@
onnx_test
def
log_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
10
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
10
])
...
...
@@ -1166,16 +1088,14 @@ def log_test():
outputs
=
[
'y'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_log'
,
[
x
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'log-example'
)
onnx
.
save
(
model_def
,
'log_test.onnx'
)
@
onnx_test
def
logsoftmax_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
3
,
4
,
5
,
6
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
3
,
4
,
5
,
6
])
...
...
@@ -1187,16 +1107,14 @@ def logsoftmax_test():
axis
=
1
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_logsoftmax'
,
[
x
],
[
y
]
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'logsoftmax-example'
)
onnx
.
save
(
model_def
,
'logsoftmax_test.onnx'
)
@
onnx_test
def
lrn_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
28
,
24
,
24
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
1
,
28
,
24
,
24
])
...
...
@@ -1211,16 +1129,14 @@ def lrn_test():
outputs
=
[
'1'
]
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test-lrn'
,
[
x
],
[
y
]
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
(
'lrn-example'
))
onnx
.
save
(
model_def
,
'lrn_test.onnx'
)
@
onnx_test
def
matmul_bmbm_test
():
m1
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
3
,
6
,
7
])
m2
=
helper
.
make_tensor_value_info
(
'2'
,
TensorProto
.
FLOAT
,
[
5
,
2
,
1
,
7
,
8
])
...
...
@@ -1232,16 +1148,14 @@ def matmul_bmbm_test():
outputs
=
[
'y'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_matmul'
,
[
m1
,
m2
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'matmul-example'
)
onnx
.
save
(
model_def
,
'matmul_bmbm_test.onnx'
)
@
onnx_test
def
matmul_bmv_test
():
m1
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
3
,
6
,
7
])
m2
=
helper
.
make_tensor_value_info
(
'2'
,
TensorProto
.
FLOAT
,
[
7
])
...
...
@@ -1253,16 +1167,14 @@ def matmul_bmv_test():
outputs
=
[
'y'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_matmul'
,
[
m1
,
m2
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'matmul-example'
)
onnx
.
save
(
model_def
,
'matmul_bmv_test.onnx'
)
@
onnx_test
def
matmul_mv_test
():
m1
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
6
,
7
])
m2
=
helper
.
make_tensor_value_info
(
'2'
,
TensorProto
.
FLOAT
,
[
7
])
...
...
@@ -1274,16 +1186,14 @@ def matmul_mv_test():
outputs
=
[
'y'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_matmul'
,
[
m1
,
m2
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'matmul-example'
)
onnx
.
save
(
model_def
,
'matmul_mv_test.onnx'
)
@
onnx_test
def
matmul_vbm_test
():
m1
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
7
])
m2
=
helper
.
make_tensor_value_info
(
'2'
,
TensorProto
.
FLOAT
,
[
5
,
7
,
8
])
...
...
@@ -1295,16 +1205,14 @@ def matmul_vbm_test():
outputs
=
[
'y'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_matmul'
,
[
m1
,
m2
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'matmul-example'
)
onnx
.
save
(
model_def
,
'matmul_vbm_test.onnx'
)
@
onnx_test
def
matmul_vm_test
():
m1
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
7
])
m2
=
helper
.
make_tensor_value_info
(
'2'
,
TensorProto
.
FLOAT
,
[
7
,
8
])
...
...
@@ -1316,16 +1224,14 @@ def matmul_vm_test():
outputs
=
[
'y'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_matmul'
,
[
m1
,
m2
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'matmul-example'
)
onnx
.
save
(
model_def
,
'matmul_vm_test.onnx'
)
@
onnx_test
def
matmul_vv_test
():
m1
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
7
])
m2
=
helper
.
make_tensor_value_info
(
'2'
,
TensorProto
.
FLOAT
,
[
7
])
...
...
@@ -1337,16 +1243,14 @@ def matmul_vv_test():
outputs
=
[
'y'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_matmul'
,
[
m1
,
m2
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'matmul-example'
)
onnx
.
save
(
model_def
,
'matmul_vv_test.onnx'
)
@
onnx_test
def
max_test
():
a
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
3
])
b
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
3
])
...
...
@@ -1359,16 +1263,14 @@ def max_test():
outputs
=
[
'3'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test-dropout'
,
[
a
,
b
,
c
],
[
y
]
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'max-example'
)
onnx
.
save
(
model_def
,
'max_test.onnx'
)
@
onnx_test
def
min_test
():
a
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
3
])
b
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
3
])
...
...
@@ -1381,16 +1283,14 @@ def min_test():
outputs
=
[
'3'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test-dropout'
,
[
a
,
b
,
c
],
[
y
]
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'min-example'
)
onnx
.
save
(
model_def
,
'min_test.onnx'
)
@
onnx_test
def
no_pad_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
2
,
2
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
2
,
2
])
...
...
@@ -1403,16 +1303,14 @@ def no_pad_test():
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test-no-pad'
,
[
x
],
[
y
]
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'no-pad-example'
)
onnx
.
save
(
model_def
,
'no_pad_test.onnx'
)
@
onnx_test
def
pad_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
2
,
2
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
4
,
4
])
...
...
@@ -1425,16 +1323,14 @@ def pad_test():
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test-pad'
,
[
x
],
[
y
]
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'pad-example'
)
onnx
.
save
(
model_def
,
'pad_test.onnx'
)
@
onnx_test
def
pow_test
():
arg0
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
2
,
3
,
4
,
5
])
arg1
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
2
,
3
,
4
,
5
])
...
...
@@ -1447,16 +1343,14 @@ def pow_test():
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'pow_test'
,
[
arg0
,
arg1
],
[
arg_out
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'pow2'
)
onnx
.
save
(
model_def
,
'pow_test.onnx'
)
@
onnx_test
def
reducemean_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
3
,
4
,
5
,
6
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
...
...
@@ -1470,16 +1364,14 @@ def reducemean_test():
keepdims
=
0
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_reducemean'
,
[
x
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'reducemean-example'
)
onnx
.
save
(
model_def
,
'reducemean_test.onnx'
)
@
onnx_test
def
reducemean_keepdims_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
3
,
4
,
5
,
6
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
3
,
4
,
1
,
6
])
...
...
@@ -1493,16 +1385,14 @@ def reducemean_keepdims_test():
keepdims
=
1
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_reducemean'
,
[
x
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'reducemean-example'
)
onnx
.
save
(
model_def
,
'reducemean_keepdims_test.onnx'
)
@
onnx_test
def
reducesum_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
3
,
4
,
5
,
6
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
3
,
4
,
1
,
1
])
...
...
@@ -1516,16 +1406,14 @@ def reducesum_test():
keepdims
=
0
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_reducesum'
,
[
x
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'reducesum-example'
)
onnx
.
save
(
model_def
,
'reducesum_test.onnx'
)
@
onnx_test
def
reducesum_multiaxis_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
3
,
4
,
5
,
6
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
3
,
4
,
1
,
1
])
...
...
@@ -1539,16 +1427,14 @@ def reducesum_multiaxis_test():
keepdims
=
0
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_reducesum'
,
[
x
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'reducesum-example'
)
onnx
.
save
(
model_def
,
'reducesum_multiaxis_test.onnx'
)
@
onnx_test
def
reducesum_keepdims_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
3
,
4
,
5
,
6
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
3
,
4
,
1
,
1
])
...
...
@@ -1562,16 +1448,14 @@ def reducesum_keepdims_test():
keepdims
=
1
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_reducesum'
,
[
x
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'reducesum-example'
)
onnx
.
save
(
model_def
,
'reducesum_keepdims_test.onnx'
)
@
onnx_test
def
reshape_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
4
,
2
,
3
])
x_shape
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
INT64
,
[
2
])
...
...
@@ -1592,7 +1476,7 @@ def reshape_test():
outputs
=
[
'3'
]
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
,
node2
],
'test-reshape'
,
[
x
,
x_shape
],
...
...
@@ -1600,9 +1484,7 @@ def reshape_test():
initializer
=
[
helper
.
make_tensor
(
'1'
,
TensorProto
.
INT64
,
[
2
],
[
3
,
8
])]
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
(
'reshape-example'
))
onnx
.
save
(
model_def
,
'reshape_test.onnx'
)
@
onnx_test
def
reshape_non_standard_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
2
,
3
,
4
])
trans_x
=
helper
.
make_tensor_value_info
(
'trans_x'
,
TensorProto
.
FLOAT
,
[
2
,
4
,
3
])
...
...
@@ -1622,16 +1504,14 @@ def reshape_non_standard_test():
shape
=
[
4
,
3
,
2
]
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
trans
,
res
],
'reshape-ns'
,
[
x
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'reshape'
)
onnx
.
save
(
model_def
,
'reshape_non_standard_test.onnx'
)
@
onnx_test
def
shape_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
3
,
4
,
5
,
6
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
INT64
,
[
4
])
...
...
@@ -1642,16 +1522,14 @@ def shape_test():
outputs
=
[
'y'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_shape'
,
[
x
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'shape-example'
)
onnx
.
save
(
model_def
,
'shape_test.onnx'
)
@
onnx_test
def
shape_gather_test
():
values
=
np
.
array
([
1
])
value
=
helper
.
make_tensor_value_info
(
'value'
,
TensorProto
.
INT32
,
[
1
])
...
...
@@ -1685,16 +1563,14 @@ def shape_gather_test():
axis
=
0
,
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node_const
,
node_shape
,
node_gather
],
'shape_gather'
,
[
x
],
[
z
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'shape-gather-example'
)
onnx
.
save
(
model_def
,
'shape_gather_test.onnx'
)
@
onnx_test
def
sign_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
DOUBLE
,
[
10
,
5
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
DOUBLE
,
[
10
,
5
])
...
...
@@ -1705,16 +1581,14 @@ def sign_test():
outputs
=
[
'y'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_sign'
,
[
x
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'sign-example'
)
onnx
.
save
(
model_def
,
'sign_test.onnx'
)
@
onnx_test
def
sin_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
10
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
10
])
...
...
@@ -1725,16 +1599,14 @@ def sin_test():
outputs
=
[
'y'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_sin'
,
[
x
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'sin-example'
)
onnx
.
save
(
model_def
,
'sin_test.onnx'
)
@
onnx_test
def
sinh_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
10
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
10
])
...
...
@@ -1745,16 +1617,14 @@ def sinh_test():
outputs
=
[
'y'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_sinh'
,
[
x
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'sinh-example'
)
onnx
.
save
(
model_def
,
'sinh_test.onnx'
)
@
onnx_test
def
slice_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
3
,
2
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
1
,
2
])
...
...
@@ -1768,16 +1638,14 @@ def slice_test():
outputs
=
[
'1'
]
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test-slice'
,
[
x
],
[
y
]
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
(
'slice-example'
))
onnx
.
save
(
model_def
,
'slice_test.onnx'
)
@
onnx_test
def
softmax_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
1
,
3
])
...
...
@@ -1788,16 +1656,14 @@ def softmax_test():
outputs
=
[
'1'
]
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test-softmax'
,
[
x
],
[
y
]
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
(
'softmax-example'
))
onnx
.
save
(
model_def
,
'softmax_test.onnx'
)
@
onnx_test
def
sqrt_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
10
,
15
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
10
,
15
])
...
...
@@ -1808,16 +1674,14 @@ def sqrt_test():
outputs
=
[
'y'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_sqrt'
,
[
x
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'sqrt-example'
)
onnx
.
save
(
model_def
,
'sqrt_test.onnx'
)
@
onnx_test
def
squeeze_unsqueeze_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
1
,
1
,
2
,
1
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
3
,
2
])
...
...
@@ -1837,16 +1701,14 @@ def squeeze_unsqueeze_test():
outputs
=
[
'2'
]
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
,
node2
],
'test-squeeze-unsqueeze'
,
[
x
],
[
z
]
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
(
'squeeze-unsqueeze-example'
))
onnx
.
save
(
model_def
,
'squeeze_unsqueeze_test.onnx'
)
@
onnx_test
def
sub_bcast_test
():
arg0
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
2
,
3
,
4
,
5
])
arg1
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
...
...
@@ -1861,16 +1723,14 @@ def sub_bcast_test():
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'subtraction2'
,
[
arg0
,
arg1
],
[
arg_out
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'subtraction2'
)
onnx
.
save
(
model_def
,
'sub_bcast_test.onnx'
)
@
onnx_test
def
sub_scalar_test
():
values
=
np
.
array
([
1
])
arg_node
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
2
,
3
,
4
,
5
])
...
...
@@ -1897,16 +1757,14 @@ def sub_scalar_test():
outputs
=
[
'out'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
arg_const
,
node
],
'subtraction1'
,
[
arg_node
],
[
arg_out
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'subtraction1'
)
onnx
.
save
(
model_def
,
'sub_scalar_test.onnx'
)
@
onnx_test
def
sum_test
():
a
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
3
])
b
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
3
])
...
...
@@ -1920,16 +1778,14 @@ def sum_test():
outputs
=
[
'3'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test-sum'
,
[
a
,
b
,
c
],
[
y
]
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'sum-example'
)
onnx
.
save
(
model_def
,
'sum_test.onnx'
)
@
onnx_test
def
sum_test
():
a
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
3
])
b
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
3
])
...
...
@@ -1942,16 +1798,14 @@ def sum_test():
outputs
=
[
'3'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test-sum'
,
[
a
,
b
,
c
],
[
y
]
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'sum-example'
)
onnx
.
save
(
model_def
,
'sum_test.onnx'
)
@
onnx_test
def
tan_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
10
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
10
])
...
...
@@ -1962,16 +1816,14 @@ def tan_test():
outputs
=
[
'y'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_tan'
,
[
x
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'tan-example'
)
onnx
.
save
(
model_def
,
'tan_test.onnx'
)
@
onnx_test
def
tanh_test
():
x
=
helper
.
make_tensor_value_info
(
'x'
,
TensorProto
.
FLOAT
,
[
1
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
1
])
...
...
@@ -1982,16 +1834,14 @@ def tanh_test():
outputs
=
[
'y'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test_tanh'
,
[
x
],
[
y
],
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'tanh-example'
)
onnx
.
save
(
model_def
,
'tahn_test.onnx'
)
@
onnx_test
def
transpose_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
1
,
2
,
2
,
3
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
1
,
3
,
2
,
2
])
...
...
@@ -2003,16 +1853,49 @@ def transpose_test():
outputs
=
[
'1'
],
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
],
'test-transpose'
,
[
x
],
[
y
]
)
model_def
=
helper
.
make_model
(
graph_def
,
producer_name
=
'transpose-example'
)
onnx
.
save
(
model_def
,
'transpose_test.onnx'
)
@
onnx_test
def
transpose_gather_test
():
x
=
helper
.
make_tensor_value_info
(
'data'
,
TensorProto
.
FLOAT
,
[
3
,
5
,
4
,
6
])
i
=
helper
.
make_tensor_value_info
(
'indices'
,
TensorProto
.
INT32
,
[
2
,
4
,
3
,
5
])
y
=
helper
.
make_tensor_value_info
(
'y'
,
TensorProto
.
FLOAT
,
[
3
,
2
,
3
,
4
,
5
,
4
,
5
,
6
])
td
=
onnx
.
helper
.
make_node
(
'Transpose'
,
inputs
=
[
'data'
],
outputs
=
[
'tdata'
],
perm
=
[
0
,
2
,
1
,
3
],
)
ti
=
onnx
.
helper
.
make_node
(
'Transpose'
,
inputs
=
[
'indices'
],
outputs
=
[
'tindices'
],
perm
=
[
0
,
2
,
1
,
3
]
)
node
=
onnx
.
helper
.
make_node
(
'Gather'
,
inputs
=
[
'tdata'
,
'tindices'
],
outputs
=
[
'y'
],
axis
=
1
,
)
return
helper
.
make_graph
(
[
td
,
ti
,
node
],
'test_gather'
,
[
x
,
i
],
[
y
],
)
@
onnx_test
def
unknown_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
2
,
3
,
4
,
5
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
3
,
4
])
...
...
@@ -2031,7 +1914,7 @@ def unknown_test():
outputs
=
[
'3'
]
)
graph_def
=
helper
.
make_graph
(
return
helper
.
make_graph
(
[
node
,
node2
],
'test-unknown'
,
[
x
,
y
],
...
...
test/onnx/onnx_test.cpp
View file @
34150e61
...
...
@@ -4,6 +4,7 @@
#include <migraphx/operators.hpp>
#include <migraphx/program.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/instruction_ref.hpp>
#include <migraphx/onnx.hpp>
#include "test.hpp"
...
...
@@ -1015,6 +1016,32 @@ TEST_CASE(transpose_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
transpose_gather_test
)
{
migraphx
::
program
p
;
auto
make_contiguous
=
[
&
p
](
migraphx
::
instruction_ref
ins
)
{
if
(
ins
->
get_shape
().
standard
())
{
return
ins
;
}
return
p
.
add_instruction
(
migraphx
::
op
::
contiguous
{},
ins
);
};
auto
data
=
p
.
add_parameter
(
"data"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
3
,
5
,
4
,
6
}});
auto
ind
=
p
.
add_parameter
(
"indices"
,
migraphx
::
shape
{
migraphx
::
shape
::
int32_type
,
{
2
,
4
,
3
,
5
}});
auto
tr_data
=
p
.
add_instruction
(
migraphx
::
op
::
transpose
{{
0
,
2
,
1
,
3
}},
data
);
auto
tr_ind
=
p
.
add_instruction
(
migraphx
::
op
::
transpose
{{
0
,
2
,
1
,
3
}},
ind
);
int
axis
=
1
;
p
.
add_instruction
(
migraphx
::
op
::
gather
{
axis
},
make_contiguous
(
tr_data
),
make_contiguous
(
tr_ind
));
auto
prog
=
migraphx
::
parse_onnx
(
"transpose_gather_test.onnx"
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
unknown_test
)
{
migraphx
::
program
p
;
...
...
test/onnx/transpose_gather_test.onnx
0 → 100644
View file @
34150e61
File added
test/tf/assert_less_equal_test.pb
0 → 100644
View file @
34150e61
File added
test/tf/gen_tf_pb.py
View file @
34150e61
import
numpy
as
np
import
tensorflow
as
tf
def
tf_test
(
op_test
):
def
run_test
():
g1
=
tf
.
Graph
()
op_test
(
g1
)
tf
.
io
.
write_graph
(
g1
,
'.'
,
'{}.pb'
.
format
(
op_test
.
__name__
),
as_text
=
False
)
return
run_test
def
add_test
(
g1
=
tf
.
Graph
()):
@
tf_test
def
add_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
2
,
2
,
3
),
name
=
'0'
)
g2_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
2
,
2
,
3
),
name
=
'1'
)
tf
.
add
(
g1_input
,
g2_input
,
name
=
'add1'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'add_test.pb'
,
as_text
=
False
)
def
add_bcast_test
(
g1
=
tf
.
Graph
()):
@
tf_test
def
add_bcast_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
2
,
3
),
name
=
'0'
)
g2_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
2
,
1
),
name
=
'1'
)
tf
.
math
.
add
(
g1_input
,
g2_input
,
name
=
'add_bcast1'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'add_bcast_test.pb'
,
as_text
=
False
)
def
assert_less_equal_test
(
g1
=
tf
.
Graph
()):
@
tf_test
def
assert_less_equal_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
2
,
3
),
name
=
'0'
)
g2_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
2
,
3
),
name
=
'1'
)
with
tf
.
control_dependencies
([
tf
.
assert_less_equal
(
g1_input
,
g2_input
)]):
tf
.
add
(
g1_input
,
g2_input
,
name
=
'add1'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'assert_less_equal_test.pb'
,
as_text
=
False
)
def
batchmatmul_test
(
g1
=
tf
.
Graph
()):
@
tf_test
def
batchmatmul_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
2
,
8
,
4
),
name
=
'0'
)
g2_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
2
,
4
,
8
),
name
=
'1'
)
tf
.
matmul
(
g1_input
,
g2_input
,
transpose_a
=
True
,
transpose_b
=
True
,
name
=
'batchmatmul1'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'batchmatmul_test.pb'
,
as_text
=
False
)
def
batchnorm_test
(
g1
=
tf
.
Graph
()):
@
tf_test
def
batchnorm_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
16
,
16
,
32
),
name
=
'0'
)
g1_scale
=
tf
.
constant
(
1.0
,
dtype
=
tf
.
float32
,
shape
=
[
32
],
name
=
'1'
)
...
...
@@ -43,271 +46,241 @@ def batchnorm_test(g1=tf.Graph()):
g1_mean
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
32
),
name
=
'3'
)
g1_variance
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
32
),
name
=
'4'
)
tf
.
nn
.
fused_batch_norm
(
g1_input
,
g1_scale
,
g1_offset
,
g1_mean
,
g1_variance
,
epsilon
=
0.00001
,
is_training
=
False
,
name
=
'batchnorm1'
)
g1_input
,
g1_scale
,
g1_offset
,
g1_mean
,
g1_variance
,
epsilon
=
0.00001
,
is_training
=
False
,
name
=
'batchnorm1'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'batchnorm_test.pb'
,
as_text
=
False
)
def
biasadd_test
(
g1
=
tf
.
Graph
()):
@
tf_test
def
biasadd_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
1
,
1
,
500
),
name
=
'0'
)
g2_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
500
),
name
=
'1'
)
tf
.
nn
.
bias_add
(
g1_input
,
g2_input
,
name
=
'bias_add1'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'biasadd_test.pb'
,
as_text
=
False
)
def
cast_test
(
g1
=
tf
.
Graph
()):
@
tf_test
def
cast_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
3
,
16
,
16
),
name
=
'0'
)
tf
.
cast
(
g1_input
,
dtype
=
tf
.
int32
,
name
=
'cast1'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'cast_test.pb'
,
as_text
=
False
)
def
concat_test
(
g1
=
tf
.
Graph
()
):
@
tf_test
def
concat_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
4
,
7
,
3
),
name
=
'0'
)
g2_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
4
,
2
,
3
),
name
=
'1'
)
tf
.
concat
([
g1_input
,
g2_input
],
axis
=
1
,
name
=
'concat1'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'concat_test.pb'
,
as_text
=
False
)
def
const_test
(
g1
=
tf
.
Graph
()):
@
tf_test
def
const_test
(
g1
):
with
g1
.
as_default
():
tf
.
constant
(
1.0
,
dtype
=
tf
.
float32
,
name
=
'constant1'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'constant_test.pb'
,
as_text
=
False
)
def
conv_test
(
g1
=
tf
.
Graph
()):
@
tf_test
def
conv_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
16
,
16
,
3
),
name
=
'0'
)
g1_weights
=
tf
.
constant
(
value
=
1.0
,
dtype
=
tf
.
float32
,
shape
=
(
3
,
3
,
3
,
32
),
name
=
'1'
)
tf
.
nn
.
conv2d
(
g1_input
,
g1_weights
,
[
1
,
1
,
1
,
1
],
"SAME"
,
name
=
'conv1'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'conv_test.pb'
,
as_text
=
False
)
def
depthwiseconv_test
(
g1
=
tf
.
Graph
()):
@
tf_test
def
depthwiseconv_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
16
,
16
,
3
),
name
=
'0'
)
g1_weights
=
tf
.
constant
(
value
=
1.0
,
dtype
=
tf
.
float32
,
shape
=
(
3
,
3
,
3
,
1
),
name
=
'1'
)
tf
.
nn
.
depthwise_conv2d_native
(
g1_input
,
g1_weights
,
[
1
,
1
,
1
,
1
],
"SAME"
,
name
=
'depthwiseconv1'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'depthwise_conv_test.pb'
,
as_text
=
False
)
def
expanddims_test
(
g1
=
tf
.
Graph
()):
@
tf_test
def
expanddims_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
2
,
3
,
4
),
name
=
'0'
)
tf
.
expand_dims
(
g1_input
,
axis
=-
1
,
name
=
'expanddims_neg'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'expanddims_neg_test.pb'
,
as_text
=
False
)
def
gather_test
(
g1
=
tf
.
Graph
()):
@
tf_test
def
gather_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
2
,
4
),
name
=
'0'
)
tf
.
gather
(
g1_input
,
[
1
,
1
],
axis
=
1
,
name
=
'gather1'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'gather_test.pb'
,
as_text
=
False
)
def
identity_test
(
g1
=
tf
.
Graph
()):
@
tf_test
def
identity_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
3
,
16
,
16
),
name
=
'0'
)
tf
.
identity
(
g1_input
,
'identity'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'identity_test.pb'
,
as_text
=
False
)
def
matmul_test
(
g1
=
tf
.
Graph
()):
@
tf_test
def
matmul_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
8
,
4
),
name
=
'0'
)
g2_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
4
,
8
),
name
=
'1'
)
tf
.
matmul
(
g1_input
,
g2_input
,
transpose_a
=
True
,
transpose_b
=
True
,
name
=
'matmul1'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'matmul_test.pb'
,
as_text
=
False
)
def
mean_test
(
g1
=
tf
.
Graph
()):
@
tf_test
def
mean_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
3
,
16
,
16
),
name
=
'0'
)
tf
.
math
.
reduce_mean
(
g1_input
,
axis
=
(
2
,
3
),
keepdims
=
True
,
name
=
'mean1'
)
g1_input
,
axis
=
(
2
,
3
),
keepdims
=
True
,
name
=
'mean1'
)
tf
.
math
.
reduce_mean
(
g1_input
,
axis
=
(
2
,
3
),
keepdims
=
False
,
name
=
'mean2'
)
g1_input
,
axis
=
(
2
,
3
),
keepdims
=
False
,
name
=
'mean2'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'mean_test.pb'
,
as_text
=
False
)
def
mean_test_nhwc
(
g1
=
tf
.
Graph
()):
@
tf_test
def
mean_test_nhwc
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
16
,
16
,
3
),
name
=
'0'
)
tf
.
math
.
reduce_mean
(
g1_input
,
axis
=
(
1
,
2
),
keepdims
=
True
,
name
=
'mean1'
)
g1_input
,
axis
=
(
1
,
2
),
keepdims
=
True
,
name
=
'mean1'
)
tf
.
math
.
reduce_mean
(
g1_input
,
axis
=
(
1
,
2
),
keepdims
=
False
,
name
=
'mean2'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'mean_test_nhwc.pb'
,
as_text
=
False
)
g1_input
,
axis
=
(
1
,
2
),
keepdims
=
False
,
name
=
'mean2'
)
def
mul_test
(
g1
=
tf
.
Graph
()):
@
tf_test
def
mul_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
1
,
1
,
16
),
name
=
'0'
)
g2_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
1
,
1
,
16
),
name
=
'1'
)
tf
.
multiply
(
g1_input
,
g2_input
,
name
=
'mul1'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'mul_test.pb'
,
as_text
=
False
)
def
pack_test
(
g1
=
tf
.
Graph
()):
@
tf_test
def
pack_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
2
),
name
=
'0'
)
g2_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
2
),
name
=
'1'
)
g3_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
2
),
name
=
'2'
)
tf
.
stack
([
g1_input
,
g2_input
,
g3_input
],
axis
=
1
,
name
=
'pack1'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'pack_test.pb'
,
as_text
=
False
)
def
pack_test_nhwc
(
g1
=
tf
.
Graph
()):
@
tf_test
def
pack_test_nhwc
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
1
,
1
,
2
),
name
=
'0'
)
g2_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
1
,
1
,
2
),
name
=
'1'
)
g3_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
1
,
1
,
2
),
name
=
'2'
)
tf
.
stack
([
g1_input
,
g2_input
,
g3_input
],
axis
=
3
,
name
=
'pack1'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'pack_test_nhwc.pb'
,
as_text
=
False
)
def
pooling_test
(
g1
=
tf
.
Graph
()):
@
tf_test
def
pooling_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
16
,
16
,
3
),
name
=
'0'
)
tf
.
nn
.
avg_pool
(
value
=
g1_input
,
ksize
=
(
1
,
2
,
2
,
1
),
strides
=
(
1
,
2
,
2
,
1
),
padding
=
'VALID'
,
data_format
=
'NHWC'
,
name
=
'avg_pooling'
)
value
=
g1_input
,
ksize
=
(
1
,
2
,
2
,
1
),
strides
=
(
1
,
2
,
2
,
1
),
padding
=
'VALID'
,
data_format
=
'NHWC'
,
name
=
'avg_pooling'
)
tf
.
nn
.
max_pool
(
value
=
g1_input
,
ksize
=
(
1
,
2
,
2
,
1
),
strides
=
(
1
,
2
,
2
,
1
),
padding
=
'VALID'
,
data_format
=
'NHWC'
,
name
=
'max_pooling'
)
value
=
g1_input
,
ksize
=
(
1
,
2
,
2
,
1
),
strides
=
(
1
,
2
,
2
,
1
),
padding
=
'VALID'
,
data_format
=
'NHWC'
,
name
=
'max_pooling'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'pooling_test.pb'
,
as_text
=
False
)
def
pow_test
(
g1
=
tf
.
Graph
()):
@
tf_test
def
pow_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
2
,
2
,
3
),
name
=
'0'
)
g2_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
2
,
2
,
3
),
name
=
'1'
)
tf
.
pow
(
g1_input
,
g2_input
,
name
=
'pow1'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'pow_test.pb'
,
as_text
=
False
)
def
relu_test
(
g1
=
tf
.
Graph
()):
@
tf_test
def
relu_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
3
,
16
,
16
),
name
=
'0'
)
tf
.
nn
.
relu
(
g1_input
,
'relu'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'relu_test.pb'
,
as_text
=
False
)
def
relu6_test
(
g1
=
tf
.
Graph
()):
@
tf_test
def
relu6_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
3
,
16
,
16
),
name
=
'0'
)
tf
.
nn
.
relu6
(
g1_input
,
'relu6'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'relu6_test.pb'
,
as_text
=
False
)
def
reshape_test
(
g1
=
tf
.
Graph
()):
@
tf_test
def
reshape_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
16
),
name
=
'0'
)
tf
.
reshape
(
g1_input
,
(
1
,
1
,
1
,
16
),
'reshape'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'reshape_test.pb'
,
as_text
=
False
)
def
rsqrt_test
(
g1
=
tf
.
Graph
()):
@
tf_test
def
rsqrt_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
3
,
16
,
16
),
name
=
'0'
)
tf
.
math
.
rsqrt
(
g1_input
,
'rsqrt'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'rsqrt_test.pb'
,
as_text
=
False
)
def
slice_test
(
g1
=
tf
.
Graph
()):
@
tf_test
def
slice_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
5
,
10
),
name
=
'0'
)
tf
.
slice
(
g1_input
,
[
1
,
0
],
[
2
,
-
1
],
name
=
'slice1'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'slice_test.pb'
,
as_text
=
False
)
def
softmax_test
(
g1
=
tf
.
Graph
()):
@
tf_test
def
softmax_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
3
),
name
=
'0'
)
tf
.
nn
.
softmax
(
g1_input
,
name
=
'softmax'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'softmax_test.pb'
,
as_text
=
False
)
def
sqdiff_test
(
g1
=
tf
.
Graph
()):
@
tf_test
def
sqdiff_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
2
,
2
,
3
),
name
=
'0'
)
g2_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
2
,
2
,
3
),
name
=
'1'
)
tf
.
squared_difference
(
g1_input
,
g2_input
,
name
=
'sqdiff'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'sqdiff_test.pb'
,
as_text
=
False
)
def
squeeze_test
(
g1
=
tf
.
Graph
()):
@
tf_test
def
squeeze_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
2
,
3
,
1
),
name
=
'0'
)
tf
.
squeeze
(
g1_input
,
name
=
'squeeze'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'squeeze_test.pb'
,
as_text
=
False
)
def
stopgradient_test
(
g1
=
tf
.
Graph
()):
@
tf_test
def
stopgradient_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
3
,
16
,
16
),
name
=
'0'
)
tf
.
stop_gradient
(
g1_input
,
'stopgradient'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'stopgradient_test.pb'
,
as_text
=
False
)
def
stridedslice_test
(
g1
=
tf
.
Graph
()):
@
tf_test
def
stridedslice_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
1
,
1
,
10
),
name
=
'0'
)
tf
.
strided_slice
(
g1_input
,
[
0
,
0
,
0
,
0
],
[
1
,
1
,
1
,
5
],
[
1
,
1
,
1
,
1
],
shrink_axis_mask
=
2
,
name
=
'stridedslice1'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'stridedslice_test.pb'
,
as_text
=
False
)
def
stridedslice_masks_test
(
g1
=
tf
.
Graph
()):
@
tf_test
def
stridedslice_masks_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
3
,
3
,
10
),
name
=
'0'
)
tf
.
strided_slice
(
g1_input
,
[
0
,
1
,
1
,
0
],
[
0
,
0
,
0
,
0
],
[
1
,
1
,
1
,
1
],
begin_mask
=
9
,
end_mask
=
15
,
name
=
'stridedslice1'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'stridedslice_masks_test.pb'
,
as_text
=
False
)
def
sub_test
(
g1
=
tf
.
Graph
()):
@
tf_test
def
sub_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
2
,
2
,
3
),
name
=
'0'
)
g2_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
2
,
2
,
3
),
name
=
'1'
)
tf
.
subtract
(
g1_input
,
g2_input
,
name
=
'sub1'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'sub_test.pb'
,
as_text
=
False
)
def
tanh_test
(
g1
=
tf
.
Graph
()):
@
tf_test
def
tanh_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
3
,
16
,
16
),
name
=
'0'
)
tf
.
tanh
(
g1_input
,
'tanh'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'tanh_test.pb'
,
as_text
=
False
)
def
transpose_test
(
g1
=
tf
.
Graph
()):
@
tf_test
def
transpose_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
3
,
16
,
16
),
name
=
'0'
)
tf
.
transpose
(
g1_input
,
perm
=
[
0
,
2
,
3
,
1
],
name
=
'transpose'
)
tf
.
train
.
write_graph
(
g1
,
'.'
,
'transpose_test.pb'
,
as_text
=
False
)
test/tf/onehot_test.pb
0 → 100644
View file @
34150e61
File added
test/tf/stridedslice_masks_test.pb
0 → 100644
View file @
34150e61
File added
test/tf/tf_test.cpp
View file @
34150e61
...
...
@@ -48,6 +48,22 @@ TEST_CASE(add_bcast_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
assert_less_equal_test
)
{
migraphx
::
program
p
;
migraphx
::
shape
s0
{
migraphx
::
shape
::
float_type
,
{
2
,
3
}};
auto
l0
=
p
.
add_parameter
(
"0"
,
s0
);
auto
l1
=
p
.
add_parameter
(
"1"
,
s0
);
migraphx
::
literal
l
{
migraphx
::
shape
{
migraphx
::
shape
::
int32_type
,
{
2
}},
{
0
,
1
}};
auto
l2
=
p
.
add_literal
(
l
);
p
.
add_instruction
(
migraphx
::
op
::
add
{},
l0
,
l1
);
auto
l3
=
p
.
add_instruction
(
migraphx
::
op
::
identity
{},
l0
,
l1
);
p
.
add_instruction
(
migraphx
::
op
::
identity
{},
l3
,
l2
);
auto
prog
=
optimize_tf
(
"assert_less_equal_test.pb"
,
false
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
batchmatmul_test
)
{
migraphx
::
program
p
;
...
...
@@ -100,6 +116,16 @@ TEST_CASE(biasadd_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
cast_test
)
{
migraphx
::
program
p
;
auto
l0
=
p
.
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
16
,
16
}});
p
.
add_instruction
(
migraphx
::
op
::
convert
{
migraphx
::
shape
::
int32_type
},
l0
);
auto
prog
=
optimize_tf
(
"cast_test.pb"
,
false
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
concat_test
)
{
migraphx
::
program
p
;
...
...
@@ -118,16 +144,6 @@ TEST_CASE(concat_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
cast_test
)
{
migraphx
::
program
p
;
auto
l0
=
p
.
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
16
,
16
}});
p
.
add_instruction
(
migraphx
::
op
::
convert
{
migraphx
::
shape
::
int32_type
},
l0
);
auto
prog
=
optimize_tf
(
"cast_test.pb"
,
false
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
const_test
)
{
migraphx
::
program
p
;
...
...
@@ -271,9 +287,10 @@ TEST_CASE(mean_test_nhwc)
migraphx
::
program
p
;
migraphx
::
literal
l
{
migraphx
::
shape
{
migraphx
::
shape
::
int32_type
,
{
2
}},
{
1
,
2
}};
auto
l0
=
p
.
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
3
,
16
,
16
}});
migraphx
::
op
::
reduce_mean
op
{{
2
,
3
}};
auto
l3
=
p
.
add_instruction
(
op
,
l0
);
p
.
add_instruction
(
migraphx
::
op
::
squeeze
{{
2
,
3
}},
l3
);
auto
l1
=
p
.
add_instruction
(
migraphx
::
op
::
transpose
{{
0
,
2
,
3
,
1
}},
l0
);
migraphx
::
op
::
reduce_mean
op
{{
1
,
2
}};
auto
l2
=
p
.
add_instruction
(
op
,
l1
);
p
.
add_instruction
(
migraphx
::
op
::
squeeze
{{
1
,
2
}},
l2
);
auto
prog
=
optimize_tf
(
"mean_test_nhwc.pb"
,
true
);
EXPECT
(
p
==
prog
);
...
...
@@ -291,6 +308,23 @@ TEST_CASE(mul_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
onehot_test
)
{
migraphx
::
program
p
;
auto
l0
=
p
.
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
{
migraphx
::
shape
::
int32_type
,
{
5
}},
{
1
,
1
,
1
,
1
,
1
}});
p
.
add_literal
(
2
);
p
.
add_literal
(
1.0
f
);
p
.
add_literal
(
0.0
f
);
auto
l1
=
p
.
add_literal
(
migraphx
::
literal
{
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
2
,
2
}},
{
1
,
0
,
0
,
1
}});
int
axis
=
0
;
p
.
add_instruction
(
migraphx
::
op
::
gather
{
axis
},
l1
,
l0
);
auto
prog
=
optimize_tf
(
"onehot_test.pb"
,
false
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
pack_test
)
{
migraphx
::
program
p
;
...
...
@@ -475,20 +509,44 @@ TEST_CASE(stridedslice_test)
{
migraphx
::
program
p
;
auto
l0
=
p
.
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
10
,
1
,
1
}});
auto
l1
=
p
.
add_instruction
(
migraphx
::
op
::
transpose
{{
0
,
2
,
3
,
1
}},
l0
);
std
::
size_t
num_axes
=
4
;
migraphx
::
op
::
slice
op
;
op
.
starts
=
{
0
,
0
,
0
,
0
};
op
.
ends
=
{
1
,
1
,
1
,
5
};
op
.
axes
=
std
::
vector
<
int64_t
>
(
num_axes
);
std
::
iota
(
op
.
axes
.
begin
(),
op
.
axes
.
end
(),
0
);
auto
l
1
=
p
.
add_instruction
(
op
,
l
0
);
auto
l
2
=
p
.
add_instruction
(
op
,
l
1
);
auto
shrink_axis
=
1
;
p
.
add_instruction
(
migraphx
::
op
::
squeeze
{{
shrink_axis
}},
l
1
);
p
.
add_instruction
(
migraphx
::
op
::
squeeze
{{
shrink_axis
}},
l
2
);
auto
prog
=
optimize_tf
(
"stridedslice_test.pb"
,
true
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
stridedslice_masks_test
)
{
migraphx
::
program
p
;
auto
l0
=
p
.
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
10
,
3
,
3
}});
std
::
size_t
num_axes
=
4
;
migraphx
::
op
::
slice
op
;
op
.
starts
=
{
0
,
1
,
1
,
0
};
op
.
ends
=
{
1
,
3
,
3
,
10
};
op
.
axes
=
std
::
vector
<
int64_t
>
(
num_axes
);
std
::
iota
(
op
.
axes
.
begin
(),
op
.
axes
.
end
(),
0
);
// add literals for starts, ends, and strides in tf (NHWC format)
p
.
add_literal
(
migraphx
::
shape
{
migraphx
::
shape
::
int32_type
,
{
4
}},
std
::
vector
<
int
>
{
0
,
1
,
1
,
0
});
p
.
add_literal
(
migraphx
::
shape
{
migraphx
::
shape
::
int32_type
,
{
4
}},
std
::
vector
<
int
>
{
0
,
0
,
0
,
0
});
p
.
add_literal
(
migraphx
::
shape
{
migraphx
::
shape
::
int32_type
,
{
4
}},
std
::
vector
<
int
>
{
1
,
1
,
1
,
1
});
auto
l1
=
p
.
add_instruction
(
migraphx
::
op
::
transpose
{{
0
,
2
,
3
,
1
}},
l0
);
auto
l2
=
p
.
add_instruction
(
op
,
l1
);
p
.
add_instruction
(
migraphx
::
op
::
transpose
{{
0
,
3
,
1
,
2
}},
l2
);
auto
prog
=
migraphx
::
parse_tf
(
"stridedslice_masks_test.pb"
,
true
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
sub_test
)
{
migraphx
::
program
p
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment