Commit e2bbfca1 authored by turneram's avatar turneram
Browse files

Formatting

parent 6e67ccad
...@@ -87,8 +87,7 @@ struct layernorm ...@@ -87,8 +87,7 @@ struct layernorm
mean_square = sqrt(mean_square / norm_size - mean * mean + epsilon); mean_square = sqrt(mean_square / norm_size - mean * mean + epsilon);
for(std::size_t i = 0; i < norm_size; ++i) for(std::size_t i = 0; i < norm_size; ++i)
{ {
output[offset + i] = output[offset + i] = (data[offset + i] - mean) / mean_square;
(data[offset + i] - mean) / mean_square;
/* if(args.size() == 3) /* if(args.size() == 3)
output[offset + i] = output[offset + i] =
(data[offset + i] - mean) / mean_square * weights[i] + bias[i]; (data[offset + i] - mean) / mean_square * weights[i] + bias[i];
......
...@@ -15,25 +15,25 @@ struct parse_attention : op_parser<parse_attention> ...@@ -15,25 +15,25 @@ struct parse_attention : op_parser<parse_attention>
const onnx_parser& parser, const onnx_parser& parser,
onnx_parser::node_info info, onnx_parser::node_info info,
const std::vector<instruction_ref>& args) const const std::vector<instruction_ref>& args) const
{ {
auto input = args[0]; auto input = args[0];
auto weights = args[1]; auto weights = args[1];
auto bias = args[2]; auto bias = args[2];
auto mask_index = args[3]; auto mask_index = args[3];
instruction_ref past; instruction_ref past;
instruction_ref extra_add_qk; instruction_ref extra_add_qk;
bool is_past = false; bool is_past = false;
bool is_extra_add_qk = false; bool is_extra_add_qk = false;
if (args.size() > 4) if(args.size() > 4)
{ {
past = args[4]; past = args[4];
is_past = true; is_past = true;
} }
if (args.size() == 6) if(args.size() == 6)
{ {
is_extra_add_qk = true; is_extra_add_qk = true;
extra_add_qk = args[5]; extra_add_qk = args[5];
} }
// ORT default is 12 // ORT default is 12
...@@ -42,112 +42,123 @@ struct parse_attention : op_parser<parse_attention> ...@@ -42,112 +42,123 @@ struct parse_attention : op_parser<parse_attention>
num_heads = info.attributes.at("num_heads").i(); num_heads = info.attributes.at("num_heads").i();
// input shape: (batch_size, sequence_length, input_hidden_size) // input shape: (batch_size, sequence_length, input_hidden_size)
auto input_lens = input->get_shape().lens(); auto input_lens = input->get_shape().lens();
auto batch_size = input_lens.at(0); auto batch_size = input_lens.at(0);
auto sequence_length = input_lens.at(1); auto sequence_length = input_lens.at(1);
auto input_hidden_size = input_lens.at(2); auto input_hidden_size = input_lens.at(2);
// bias shape: (3 * hidden_size) // bias shape: (3 * hidden_size)
auto bias_lens = bias->get_shape().lens(); auto bias_lens = bias->get_shape().lens();
auto hidden_size = bias_lens.at(0) / 3; auto hidden_size = bias_lens.at(0) / 3;
auto head_size = hidden_size / num_heads; auto head_size = hidden_size / num_heads;
int past_sequence_length = 0; int past_sequence_length = 0;
// GetPresent // GetPresent
// Input and output shapes: // Input and output shapes:
// past : (2, batch_size, num_heads, past_sequence_length, head_size) // past : (2, batch_size, num_heads, past_sequence_length, head_size)
// present : (2, batch_size, num_heads, past_sequence_length + sequence_length, head_size) // present : (2, batch_size, num_heads, past_sequence_length + sequence_length,
// head_size)
std::vector<std::size_t> present_lens{2, batch_size, num_heads, sequence_length, head_size}; std::vector<std::size_t> present_lens{2, batch_size, num_heads, sequence_length, head_size};
if (is_past) if(is_past)
{ {
auto past_lens = past->get_shape().lens(); auto past_lens = past->get_shape().lens();
past_sequence_length = past_lens.at(3); past_sequence_length = past_lens.at(3);
present_lens[3] += past_lens[3]; present_lens[3] += past_lens[3];
} }
// Use GEMM for fully connection. // Use GEMM for fully connection.
auto m = batch_size * sequence_length; auto m = batch_size * sequence_length;
auto n = bias_lens.front(); auto n = bias_lens.front();
auto k = input_hidden_size; auto k = input_hidden_size;
// Bias shape is (N), broadcast using B(N, M) = 1 * bias(N, 1) x ones(1, M) + 0 * B. // Bias shape is (N), broadcast using B(N, M) = 1 * bias(N, 1) x ones(1, M) + 0 * B.
auto bias_type = bias->get_shape().type(); auto bias_type = bias->get_shape().type();
std::vector<float> ones_vec(m, 1); std::vector<float> ones_vec(m, 1);
std::vector<std::size_t> ones_lens{1, m}; std::vector<std::size_t> ones_lens{1, m};
auto ones = info.add_literal(migraphx::literal{migraphx::shape{bias_type, ones_lens}, ones_vec}); auto ones =
bias = info.add_instruction(migraphx::make_op("reshape", {{"dims", {n, 1}}}), bias); info.add_literal(migraphx::literal{migraphx::shape{bias_type, ones_lens}, ones_vec});
auto gemm_1 = info.add_instruction(migraphx::make_op("dot"), bias, ones/* info.make_contiguous(mb_bias), info.make_contiguous(ones) */); bias = info.add_instruction(migraphx::make_op("reshape", {{"dims", {n, 1}}}), bias);
gemm_1 = info.add_instruction(migraphx::make_op("transpose", {{"permutation", {1, 0}}}), gemm_1); auto gemm_1 = info.add_instruction(
migraphx::make_op("dot"),
bias,
/// ORT: Gemm, note that ROCM assumes col-major, so result(N, M) = 1 * weights x input + 1 x B. ones /* info.make_contiguous(mb_bias), info.make_contiguous(ones) */);
/// Assume row-major => results(N, M) = 1 * input x weights + 1 x B ? gemm_1 =
auto input_sq = info.add_instruction(migraphx::make_op("reshape", {{"dims", {batch_size * sequence_length, hidden_size}}}), input); info.add_instruction(migraphx::make_op("transpose", {{"permutation", {1, 0}}}), gemm_1);
auto gemm_2 = info.add_instruction(migraphx::make_op("dot"), input_sq, weights);
/// ORT: Gemm, note that ROCM assumes col-major, so result(N, M) = 1 * weights x input + 1 x
/// B. Assume row-major => results(N, M) = 1 * input x weights + 1 x B ?
auto input_sq = info.add_instruction(
migraphx::make_op("reshape", {{"dims", {batch_size * sequence_length, hidden_size}}}),
input);
auto gemm_2 = info.add_instruction(migraphx::make_op("dot"), input_sq, weights);
auto add_gemms = info.add_instruction(migraphx::make_op("add"), gemm_1, gemm_2); auto add_gemms = info.add_instruction(migraphx::make_op("add"), gemm_1, gemm_2);
// LaunchAttentionKernel: // LaunchAttentionKernel:
// LaunchTransQkv // LaunchTransQkv
// input should be BxSx3xNxH => scratch3: 3xBxNxSxH // input should be BxSx3xNxH => scratch3: 3xBxNxSxH
add_gemms = info.add_instruction(migraphx::make_op("reshape", {{"dims", {batch_size, sequence_length, 3, num_heads, head_size}}}), add_gemms); add_gemms = info.add_instruction(
migraphx::make_op("reshape",
{{"dims", {batch_size, sequence_length, 3, num_heads, head_size}}}),
add_gemms);
std::vector<std::size_t> qkv_perm{2, 0, 3, 1, 4}; std::vector<std::size_t> qkv_perm{2, 0, 3, 1, 4};
auto transqkv = info.add_instruction(migraphx::make_op("transpose", {{"permutation", qkv_perm}}), add_gemms); auto transqkv = info.add_instruction(
migraphx::make_op("transpose", {{"permutation", qkv_perm}}), add_gemms);
// now scratch3 has Q, K, V: each has size BxNxSxH // now scratch3 has Q, K, V: each has size BxNxSxH
// => transqkv has shape 3xBxNxSxH // => transqkv has shape 3xBxNxSxH
auto batches = batch_size * num_heads; auto batches = batch_size * num_heads;
auto size_per_batch = sequence_length * head_size; auto size_per_batch = sequence_length * head_size;
auto total_size = batches * size_per_batch; auto total_size = batches * size_per_batch;
auto q_t = info.add_instruction(migraphx::make_op("slice", {{"axes", {0}}, {"starts", {0}}, {"ends", {1}}}), transqkv); auto q_t = info.add_instruction(
auto k_t = info.add_instruction(migraphx::make_op("slice", {{"axes", {0}}, {"starts", {1}}, {"ends", {2}}}), transqkv); migraphx::make_op("slice", {{"axes", {0}}, {"starts", {0}}, {"ends", {1}}}), transqkv);
auto v_t = info.add_instruction(migraphx::make_op("slice", {{"axes", {0}}, {"starts", {2}}, {"ends", {3}}}), transqkv); auto k_t = info.add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {1}}, {"ends", {2}}}), transqkv);
auto v_t = info.add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {2}}, {"ends", {3}}}), transqkv);
q_t = info.add_instruction(make_op("squeeze", {{"axes", {0}}}), q_t); q_t = info.add_instruction(make_op("squeeze", {{"axes", {0}}}), q_t);
k_t = info.add_instruction(make_op("squeeze", {{"axes", {0}}}), k_t); k_t = info.add_instruction(make_op("squeeze", {{"axes", {0}}}), k_t);
v_t = info.add_instruction(make_op("squeeze", {{"axes", {0}}}), v_t); v_t = info.add_instruction(make_op("squeeze", {{"axes", {0}}}), v_t);
if (is_past) if(is_past)
{ {
k_t = info.add_instruction(migraphx::make_op("concat", {{"axis", 3}}), past, k_t); k_t = info.add_instruction(migraphx::make_op("concat", {{"axis", 3}}), past, k_t);
v_t = info.add_instruction(migraphx::make_op("slice", {{"axes", {0}}, {"starts", {1}}, {"ends", {3}}}), k_t); v_t = info.add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {1}}, {"ends", {3}}}), k_t);
} }
// Raw attention mask could be 2D (BxS) or 3D (BxSxS*) or 4D(Bx1xMxM), where M is the max sequence length. // Raw attention mask could be 2D (BxS) or 3D (BxSxS*) or 4D(Bx1xMxM), where M is the max
auto mask_index_lens = mask_index->get_shape().lens(); // sequence length.
auto mask_index_lens = mask_index->get_shape().lens();
bool use_raw_attention_mask = mask_index_lens.size() >= 2; bool use_raw_attention_mask = mask_index_lens.size() >= 2;
// compute Q*K' (as K'*Q), scaled by 1/sqrt(H) and store in scratch1: BxNxSxS* // compute Q*K' (as K'*Q), scaled by 1/sqrt(H) and store in scratch1: BxNxSxS*
// Q: BxNxSxH, K (present_k): BxNxS*xH, Q*K': BxNxSxS* // Q: BxNxSxH, K (present_k): BxNxS*xH, Q*K': BxNxSxS*
const float rsqrt_head_size = 1.f / sqrt(static_cast<float>(head_size)); const float rsqrt_head_size = 1.f / sqrt(static_cast<float>(head_size));
const int all_sequence_length = past_sequence_length + sequence_length; const int all_sequence_length = past_sequence_length + sequence_length;
const int temp_matrix_size = sequence_length * all_sequence_length; const int temp_matrix_size = sequence_length * all_sequence_length;
// For raw attention mask, the scalar if 1/sqrt(H) is moved to softmax computation. // For raw attention mask, the scalar if 1/sqrt(H) is moved to softmax computation.
const float alpha = use_raw_attention_mask ? 1.0 : rsqrt_head_size; const float alpha = use_raw_attention_mask ? 1.0 : rsqrt_head_size;
// K{B,N,S,H} -> K'{B,N,H,S} // K{B,N,S,H} -> K'{B,N,H,S}
k_t = info.add_instruction(make_op("transpose", {{"permutation", {0, 1, 3, 2}}}), k_t); k_t = info.add_instruction(make_op("transpose", {{"permutation", {0, 1, 3, 2}}}), k_t);
auto gemm3 = info.add_instruction(migraphx::make_op("dot"), q_t, k_t); auto gemm3 = info.add_instruction(migraphx::make_op("dot"), q_t, k_t);
if (is_extra_add_qk) if(is_extra_add_qk)
gemm3 = info.add_instruction(make_op("add"), gemm3, extra_add_qk); gemm3 = info.add_instruction(make_op("add"), gemm3, extra_add_qk);
auto alpha_lit = info.add_instruction( auto alpha_lit = info.add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", gemm3->get_shape().lens()}}), migraphx::make_op("multibroadcast", {{"out_lens", gemm3->get_shape().lens()}}),
info.add_literal(migraphx::literal{migraphx::shape{gemm3->get_shape().type()}, {alpha}})); info.add_literal(
gemm3 = info.add_instruction(migraphx::make_op("mul"), gemm3, info.make_contiguous(alpha_lit)); migraphx::literal{migraphx::shape{gemm3->get_shape().type()}, {alpha}}));
gemm3 =
info.add_instruction(migraphx::make_op("mul"), gemm3, info.make_contiguous(alpha_lit));
// apply softmax and store result P to scratch2: BxNxSxS* // apply softmax and store result P to scratch2: BxNxSxS*
std::vector<float> mask(batch_size*num_heads*sequence_length*all_sequence_length, 0); std::vector<float> mask(batch_size * num_heads * sequence_length * all_sequence_length, 0);
if (false and mask_index_lens.size() >= 2) if(false and mask_index_lens.size() >= 2) {}
{ else if(false and mask_index_lens.size() == 1)
}
else if (false and mask_index_lens.size() == 1)
{ {
} }
// else => no mask // else => no mask
auto softmax = info.add_instruction(migraphx::make_op("softmax", {{"axis", 3}}), gemm3); auto softmax = info.add_instruction(migraphx::make_op("softmax", {{"axis", 3}}), gemm3);
...@@ -156,8 +167,11 @@ struct parse_attention : op_parser<parse_attention> ...@@ -156,8 +167,11 @@ struct parse_attention : op_parser<parse_attention>
auto gemm4 = info.add_instruction(migraphx::make_op("dot"), softmax, v_t); auto gemm4 = info.add_instruction(migraphx::make_op("dot"), softmax, v_t);
// scratch3 is BxNxSxH, transpose to output BxSxNxH // scratch3 is BxNxSxH, transpose to output BxSxNxH
gemm4 = info.add_instruction(migraphx::make_op("transpose", {{"permutation", {0, 2, 1, 3}}}), gemm4); gemm4 = info.add_instruction(
gemm4 = info.add_instruction(make_op("reshape", {{"dims", {batch_size, sequence_length, num_heads * head_size}}}), info.make_contiguous(gemm4)); migraphx::make_op("transpose", {{"permutation", {0, 2, 1, 3}}}), gemm4);
gemm4 = info.add_instruction(
make_op("reshape", {{"dims", {batch_size, sequence_length, num_heads * head_size}}}),
info.make_contiguous(gemm4));
return gemm4; return gemm4;
} }
}; };
......
...@@ -17,7 +17,7 @@ struct parse_layernorm : op_parser<parse_layernorm> ...@@ -17,7 +17,7 @@ struct parse_layernorm : op_parser<parse_layernorm>
const std::vector<instruction_ref>& args) const const std::vector<instruction_ref>& args) const
{ {
float epsilon = 1e-3f; float epsilon = 1e-3f;
int64_t axis = -1; int64_t axis = -1;
if(contains(info.attributes, "epsilon")) if(contains(info.attributes, "epsilon"))
{ {
epsilon = parser.parse_value(info.attributes.at("epsilon")).at<float>(); epsilon = parser.parse_value(info.attributes.at("epsilon")).at<float>();
...@@ -27,9 +27,10 @@ struct parse_layernorm : op_parser<parse_layernorm> ...@@ -27,9 +27,10 @@ struct parse_layernorm : op_parser<parse_layernorm>
epsilon = parser.parse_value(info.attributes.at("axis")).at<int64_t>(); epsilon = parser.parse_value(info.attributes.at("axis")).at<int64_t>();
} }
auto layernorm = info.add_instruction(make_op("layernorm", {{"epsilon", epsilon}, {"axis", axis}}), args.front()); auto layernorm = info.add_instruction(
make_op("layernorm", {{"epsilon", epsilon}, {"axis", axis}}), args.front());
if (args.size() == 3)
if(args.size() == 3)
{ {
layernorm = info.add_instruction(make_op("mul"), layernorm, args.at(1)); layernorm = info.add_instruction(make_op("mul"), layernorm, args.at(1));
layernorm = info.add_instruction(make_op("add"), layernorm, args.at(2)); layernorm = info.add_instruction(make_op("add"), layernorm, args.at(2));
......
...@@ -12,7 +12,8 @@ namespace device { ...@@ -12,7 +12,8 @@ namespace device {
void layernorm(hipStream_t stream, const argument& result, const argument& arg1); void layernorm(hipStream_t stream, const argument& result, const argument& arg1);
//void layernorm(hipStream_t stream, const argument& result, const argument& arg1, const argument& arg2, const argument& arg3, const int64_t axis); // void layernorm(hipStream_t stream, const argument& result, const argument& arg1, const argument&
// arg2, const argument& arg3, const int64_t axis);
void triadd_layernorm(hipStream_t stream, void triadd_layernorm(hipStream_t stream,
const argument& result, const argument& result,
......
...@@ -19,12 +19,13 @@ argument hip_layernorm::compute(context& ctx, const shape&, const std::vector<ar ...@@ -19,12 +19,13 @@ argument hip_layernorm::compute(context& ctx, const shape&, const std::vector<ar
{ {
auto n_dim = args.front().get_shape().lens().size(); auto n_dim = args.front().get_shape().lens().size();
auto tuned_axis = tune_axis(n_dim, op.axis, op.name()); auto tuned_axis = tune_axis(n_dim, op.axis, op.name());
device::layernorm(ctx.get_stream().get(), args.back(), args[0], args[1], args[2], tuned_axis); device::layernorm(ctx.get_stream().get(), args.back(), args[0], args[1], args[2],
tuned_axis);
} }
else */ else */
std::cout << "calling device::ln" << std::endl; std::cout << "calling device::ln" << std::endl;
{ {
device::layernorm(ctx.get_stream().get(), args.back(), args[0]); device::layernorm(ctx.get_stream().get(), args.back(), args[0]);
std::cout << "called device::ln" << std::endl; std::cout << "called device::ln" << std::endl;
} }
......
...@@ -394,7 +394,7 @@ struct miopen_apply ...@@ -394,7 +394,7 @@ struct miopen_apply
apply_map.emplace(op_name, [=](instruction_ref ins) { apply_map.emplace(op_name, [=](instruction_ref ins) {
auto output = insert_allocation(ins, ins->get_shape()); auto output = insert_allocation(ins, ins->get_shape());
std::vector<instruction_ref> refs = ins->inputs(); std::vector<instruction_ref> refs = ins->inputs();
if (op_name == "layernorm") if(op_name == "layernorm")
{ {
std::cout << "layernorm op" << std::endl; std::cout << "layernorm op" << std::endl;
} }
......
...@@ -11,17 +11,16 @@ def onnx_test(op_test): ...@@ -11,17 +11,16 @@ def onnx_test(op_test):
def run_test(): def run_test():
op_info = op_test() op_info = op_test()
if len(op_info) > 3: if len(op_info) > 3:
graph_def = helper.make_graph( graph_def = helper.make_graph(op_info[0],
op_info[0], op_test.__name__,
op_test.__name__, op_info[1],
op_info[1], op_info[2],
op_info[2], initializer=op_info[3])
initializer=op_info[3])
else: else:
graph_def = helper.make_graph(op_info[0], op_test.__name__, graph_def = helper.make_graph(op_info[0], op_test.__name__,
op_info[1], op_info[2]) op_info[1], op_info[2])
model_def = helper.make_model( model_def = helper.make_model(graph_def,
graph_def, producer_name=op_test.__name__) producer_name=op_test.__name__)
onnx.save(model_def, '{}.onnx'.format(op_test.__name__)) onnx.save(model_def, '{}.onnx'.format(op_test.__name__))
return run_test return run_test
...@@ -61,8 +60,11 @@ def add_bcast_test(): ...@@ -61,8 +60,11 @@ def add_bcast_test():
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 4]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 4])
z = helper.make_tensor_value_info('2', TensorProto.FLOAT, [2, 3, 4, 5]) z = helper.make_tensor_value_info('2', TensorProto.FLOAT, [2, 3, 4, 5])
node = onnx.helper.make_node( node = onnx.helper.make_node('Add',
'Add', inputs=['0', '1'], broadcast=1, axis=1, outputs=['2']) inputs=['0', '1'],
broadcast=1,
axis=1,
outputs=['2'])
return ([node], [x, y], [z]) return ([node], [x, y], [z])
...@@ -106,8 +108,11 @@ def argmax_test(): ...@@ -106,8 +108,11 @@ def argmax_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 6]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 6])
node = onnx.helper.make_node( node = onnx.helper.make_node('ArgMax',
'ArgMax', inputs=['x'], outputs=['y'], axis=2, keepdims=0) inputs=['x'],
outputs=['y'],
axis=2,
keepdims=0)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -117,8 +122,11 @@ def argmin_test(): ...@@ -117,8 +122,11 @@ def argmin_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 5]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 5])
node = onnx.helper.make_node( node = onnx.helper.make_node('ArgMin',
'ArgMin', inputs=['x'], outputs=['y'], axis=3, keepdims=0) inputs=['x'],
outputs=['y'],
axis=3,
keepdims=0)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -184,8 +192,10 @@ def averagepool_1d_test(): ...@@ -184,8 +192,10 @@ def averagepool_1d_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5])
out = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 3]) out = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 3])
node = onnx.helper.make_node( node = onnx.helper.make_node('AveragePool',
'AveragePool', inputs=['0'], outputs=['1'], kernel_shape=[3]) inputs=['0'],
outputs=['1'],
kernel_shape=[3])
return ([node], [x], [out]) return ([node], [x], [out])
...@@ -196,8 +206,10 @@ def averagepool_3d_test(): ...@@ -196,8 +206,10 @@ def averagepool_3d_test():
out = helper.make_tensor_value_info('1', TensorProto.FLOAT, out = helper.make_tensor_value_info('1', TensorProto.FLOAT,
[1, 3, 3, 3, 3]) [1, 3, 3, 3, 3])
node = onnx.helper.make_node( node = onnx.helper.make_node('AveragePool',
'AveragePool', inputs=['0'], outputs=['1'], kernel_shape=[3, 3, 3]) inputs=['0'],
outputs=['1'],
kernel_shape=[3, 3, 3])
return ([node], [x], [out]) return ([node], [x], [out])
...@@ -207,14 +219,13 @@ def averagepool_notset_test(): ...@@ -207,14 +219,13 @@ def averagepool_notset_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 1, 1]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 1, 1])
node = onnx.helper.make_node( node = onnx.helper.make_node('AveragePool',
'AveragePool', inputs=['x'],
inputs=['x'], outputs=['y'],
outputs=['y'], kernel_shape=[6, 6],
kernel_shape=[6, 6], strides=[2, 2],
strides=[2, 2], pads=[0, 0, 1, 1],
pads=[0, 0, 1, 1], auto_pad='NOTSET')
auto_pad='NOTSET')
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -224,15 +235,14 @@ def averagepool_nt_cip_test(): ...@@ -224,15 +235,14 @@ def averagepool_nt_cip_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 1, 1]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 1, 1])
node = onnx.helper.make_node( node = onnx.helper.make_node('AveragePool',
'AveragePool', inputs=['x'],
inputs=['x'], outputs=['y'],
outputs=['y'], kernel_shape=[6, 6],
kernel_shape=[6, 6], strides=[2, 2],
strides=[2, 2], pads=[0, 0, 1, 1],
pads=[0, 0, 1, 1], auto_pad='NOTSET',
auto_pad='NOTSET', count_include_pad=1)
count_include_pad=1)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -242,12 +252,11 @@ def averagepool_same_lower_test(): ...@@ -242,12 +252,11 @@ def averagepool_same_lower_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 5, 5]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 5, 5])
node = onnx.helper.make_node( node = onnx.helper.make_node('AveragePool',
'AveragePool', inputs=['x'],
inputs=['x'], outputs=['y'],
outputs=['y'], kernel_shape=[2, 2],
kernel_shape=[2, 2], auto_pad='SAME_LOWER')
auto_pad='SAME_LOWER')
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -257,13 +266,12 @@ def averagepool_sl_cip_test(): ...@@ -257,13 +266,12 @@ def averagepool_sl_cip_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 5, 5]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 5, 5])
node = onnx.helper.make_node( node = onnx.helper.make_node('AveragePool',
'AveragePool', inputs=['x'],
inputs=['x'], outputs=['y'],
outputs=['y'], kernel_shape=[2, 2],
kernel_shape=[2, 2], auto_pad='SAME_LOWER',
auto_pad='SAME_LOWER', count_include_pad=1)
count_include_pad=1)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -273,12 +281,11 @@ def averagepool_same_upper_test(): ...@@ -273,12 +281,11 @@ def averagepool_same_upper_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 5, 5]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 5, 5])
node = onnx.helper.make_node( node = onnx.helper.make_node('AveragePool',
'AveragePool', inputs=['x'],
inputs=['x'], outputs=['y'],
outputs=['y'], kernel_shape=[2, 2],
kernel_shape=[2, 2], auto_pad='SAME_UPPER')
auto_pad='SAME_UPPER')
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -292,12 +299,11 @@ def batchnorm_1d_test(): ...@@ -292,12 +299,11 @@ def batchnorm_1d_test():
var = helper.make_tensor_value_info('4', TensorProto.FLOAT, [3]) var = helper.make_tensor_value_info('4', TensorProto.FLOAT, [3])
out = helper.make_tensor_value_info('5', TensorProto.FLOAT, [1, 3, 5]) out = helper.make_tensor_value_info('5', TensorProto.FLOAT, [1, 3, 5])
node = onnx.helper.make_node( node = onnx.helper.make_node('BatchNormalization',
'BatchNormalization', inputs=['0', '1', '2', '3', '4'],
inputs=['0', '1', '2', '3', '4'], outputs=['5'],
outputs=['5'], epsilon=1e-6,
epsilon=1e-6, momentum=0.9)
momentum=0.9)
return ([node], [x, scale, bias, mean, var], [out]) return ([node], [x, scale, bias, mean, var], [out])
...@@ -312,12 +318,11 @@ def batchnorm_3d_test(): ...@@ -312,12 +318,11 @@ def batchnorm_3d_test():
out = helper.make_tensor_value_info('5', TensorProto.FLOAT, out = helper.make_tensor_value_info('5', TensorProto.FLOAT,
[1, 3, 5, 5, 5]) [1, 3, 5, 5, 5])
node = onnx.helper.make_node( node = onnx.helper.make_node('BatchNormalization',
'BatchNormalization', inputs=['0', '1', '2', '3', '4'],
inputs=['0', '1', '2', '3', '4'], outputs=['5'],
outputs=['5'], epsilon=1e-6,
epsilon=1e-6, momentum=0.9)
momentum=0.9)
return ([node], [x, scale, bias, mean, var], [out]) return ([node], [x, scale, bias, mean, var], [out])
...@@ -351,8 +356,10 @@ def celu_alpha_test(): ...@@ -351,8 +356,10 @@ def celu_alpha_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3])
node = onnx.helper.make_node( node = onnx.helper.make_node('Celu',
'Celu', inputs=['x'], outputs=['y'], alpha=0.8) inputs=['x'],
outputs=['y'],
alpha=0.8)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -372,8 +379,10 @@ def celu_verify_test(): ...@@ -372,8 +379,10 @@ def celu_verify_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node( node = onnx.helper.make_node('Celu',
'Celu', inputs=['x'], outputs=['y'], alpha=0.5) inputs=['x'],
outputs=['y'],
alpha=0.5)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -393,8 +402,10 @@ def celu_zero_alpha_test(): ...@@ -393,8 +402,10 @@ def celu_zero_alpha_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node( node = onnx.helper.make_node('Celu',
'Celu', inputs=['x'], outputs=['y'], alpha=0.0) inputs=['x'],
outputs=['y'],
alpha=0.0)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -404,8 +415,11 @@ def clip_test(): ...@@ -404,8 +415,11 @@ def clip_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
node = onnx.helper.make_node( node = onnx.helper.make_node('Clip',
'Clip', inputs=['0'], outputs=['1'], max=6.0, min=0.0) inputs=['0'],
outputs=['1'],
max=6.0,
min=0.0)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -418,8 +432,9 @@ def clip_test_op11(): ...@@ -418,8 +432,9 @@ def clip_test_op11():
min_val = helper.make_tensor('min', TensorProto.FLOAT, [], [0.0]) min_val = helper.make_tensor('min', TensorProto.FLOAT, [], [0.0])
max_val = helper.make_tensor('max', TensorProto.FLOAT, [], [6.0]) max_val = helper.make_tensor('max', TensorProto.FLOAT, [], [6.0])
node = onnx.helper.make_node( node = onnx.helper.make_node('Clip',
'Clip', inputs=['0', 'min', 'max'], outputs=['1']) inputs=['0', 'min', 'max'],
outputs=['1'])
return ([node], [x], [y], [min_val, max_val]) return ([node], [x], [y], [min_val, max_val])
...@@ -431,8 +446,9 @@ def clip_test_op11_max_only(): ...@@ -431,8 +446,9 @@ def clip_test_op11_max_only():
max_val = helper.make_tensor('max', TensorProto.FLOAT, [], [0.0]) max_val = helper.make_tensor('max', TensorProto.FLOAT, [], [0.0])
node = onnx.helper.make_node( node = onnx.helper.make_node('Clip',
'Clip', inputs=['0', '', 'max'], outputs=['1']) inputs=['0', '', 'max'],
outputs=['1'])
return ([node], [x], [y], [max_val]) return ([node], [x], [y], [max_val])
...@@ -478,8 +494,9 @@ def clip_test_args_type_mismatch(): ...@@ -478,8 +494,9 @@ def clip_test_args_type_mismatch():
[1.5, 2.5, 3.5]) [1.5, 2.5, 3.5])
max_val = helper.make_tensor('max', TensorProto.INT64, [3, 1], [2, 3, 4]) max_val = helper.make_tensor('max', TensorProto.INT64, [3, 1], [2, 3, 4])
node = onnx.helper.make_node( node = onnx.helper.make_node('Clip',
'Clip', inputs=['0', 'min', 'max'], outputs=['1']) inputs=['0', 'min', 'max'],
outputs=['1'])
return ([node], [x], [y], [min_val, max_val]) return ([node], [x], [y], [min_val, max_val])
...@@ -542,11 +559,10 @@ def constant_fill_input_as_shape_test(): ...@@ -542,11 +559,10 @@ def constant_fill_input_as_shape_test():
np_shape = np.array([2, 3]) np_shape = np.array([2, 3])
value = helper.make_tensor_value_info('value', TensorProto.FLOAT, [2, 3]) value = helper.make_tensor_value_info('value', TensorProto.FLOAT, [2, 3])
ts_shape = helper.make_tensor( ts_shape = helper.make_tensor(name='shape_tensor',
name='shape_tensor', data_type=TensorProto.INT32,
data_type=TensorProto.INT32, dims=np_shape.shape,
dims=np_shape.shape, vals=np_shape.flatten().astype(int))
vals=np_shape.flatten().astype(int))
const_shape_node = onnx.helper.make_node( const_shape_node = onnx.helper.make_node(
'Constant', 'Constant',
...@@ -592,11 +608,10 @@ def const_of_shape_empty_input_test(): ...@@ -592,11 +608,10 @@ def const_of_shape_empty_input_test():
tensor_val = onnx.helper.make_tensor('value', onnx.TensorProto.INT64, [1], tensor_val = onnx.helper.make_tensor('value', onnx.TensorProto.INT64, [1],
[10]) [10])
empty_val = np.array([]).astype(np.int64) empty_val = np.array([]).astype(np.int64)
empty_ts = helper.make_tensor( empty_ts = helper.make_tensor(name='empty_tensor',
name='empty_tensor', data_type=TensorProto.INT32,
data_type=TensorProto.INT32, dims=empty_val.shape,
dims=empty_val.shape, vals=empty_val.flatten().astype(int))
vals=empty_val.flatten().astype(int))
shape_const = helper.make_node( shape_const = helper.make_node(
'Constant', 'Constant',
inputs=[], inputs=[],
...@@ -621,11 +636,10 @@ def const_of_shape_float_test(): ...@@ -621,11 +636,10 @@ def const_of_shape_float_test():
[10]) [10])
shape_val = np.array([2, 3, 4]).astype(np.int64) shape_val = np.array([2, 3, 4]).astype(np.int64)
shape_ts = helper.make_tensor( shape_ts = helper.make_tensor(name='shape_tensor',
name='shape_tensor', data_type=TensorProto.INT32,
data_type=TensorProto.INT32, dims=shape_val.shape,
dims=shape_val.shape, vals=shape_val.flatten().astype(int))
vals=shape_val.flatten().astype(int))
shape_const = helper.make_node( shape_const = helper.make_node(
'Constant', 'Constant',
...@@ -635,8 +649,10 @@ def const_of_shape_float_test(): ...@@ -635,8 +649,10 @@ def const_of_shape_float_test():
) )
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3, 4]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3, 4])
node = onnx.helper.make_node( node = onnx.helper.make_node('ConstantOfShape',
'ConstantOfShape', inputs=['shape'], outputs=['y'], value=tensor_val) inputs=['shape'],
outputs=['y'],
value=tensor_val)
return ([shape_const, node], [], [y]) return ([shape_const, node], [], [y])
...@@ -646,11 +662,10 @@ def const_of_shape_int64_test(): ...@@ -646,11 +662,10 @@ def const_of_shape_int64_test():
tensor_val = onnx.helper.make_tensor('value', onnx.TensorProto.INT64, [1], tensor_val = onnx.helper.make_tensor('value', onnx.TensorProto.INT64, [1],
[10]) [10])
shape_val = np.array([2, 3, 4]).astype(np.int64) shape_val = np.array([2, 3, 4]).astype(np.int64)
shape_ts = helper.make_tensor( shape_ts = helper.make_tensor(name='shape_tensor',
name='shape_tensor', data_type=TensorProto.INT32,
data_type=TensorProto.INT32, dims=shape_val.shape,
dims=shape_val.shape, vals=shape_val.flatten().astype(int))
vals=shape_val.flatten().astype(int))
shape_const = helper.make_node( shape_const = helper.make_node(
'Constant', 'Constant',
inputs=[], inputs=[],
...@@ -659,8 +674,10 @@ def const_of_shape_int64_test(): ...@@ -659,8 +674,10 @@ def const_of_shape_int64_test():
) )
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3, 4]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3, 4])
node = onnx.helper.make_node( node = onnx.helper.make_node('ConstantOfShape',
'ConstantOfShape', inputs=['shape'], outputs=['y'], value=tensor_val) inputs=['shape'],
outputs=['y'],
value=tensor_val)
return ([shape_const, node], [], [y]) return ([shape_const, node], [], [y])
...@@ -668,11 +685,10 @@ def const_of_shape_int64_test(): ...@@ -668,11 +685,10 @@ def const_of_shape_int64_test():
@onnx_test @onnx_test
def const_of_shape_no_value_attr_test(): def const_of_shape_no_value_attr_test():
shape_val = np.array([2, 3, 4]).astype(np.int64) shape_val = np.array([2, 3, 4]).astype(np.int64)
shape_ts = helper.make_tensor( shape_ts = helper.make_tensor(name='shape_tensor',
name='shape_tensor', data_type=TensorProto.INT32,
data_type=TensorProto.INT32, dims=shape_val.shape,
dims=shape_val.shape, vals=shape_val.flatten().astype(int))
vals=shape_val.flatten().astype(int))
shape_const = helper.make_node( shape_const = helper.make_node(
'Constant', 'Constant',
inputs=[], inputs=[],
...@@ -719,8 +735,10 @@ def conv_attr_fail_test(): ...@@ -719,8 +735,10 @@ def conv_attr_fail_test():
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 3]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 3])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1, 1, 3]) out = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1, 1, 3])
node = onnx.helper.make_node( node = onnx.helper.make_node('Conv',
'Conv', inputs=['0', '1'], strides=[1, 1], outputs=['2']) inputs=['0', '1'],
strides=[1, 1],
outputs=['2'])
return ([node], [x, y], [out]) return ([node], [x, y], [out])
...@@ -731,14 +749,13 @@ def conv_autopad_fail_test(): ...@@ -731,14 +749,13 @@ def conv_autopad_fail_test():
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 1, 1]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 1, 1])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1, 1, 34, 34]) out = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1, 1, 34, 34])
node = onnx.helper.make_node( node = onnx.helper.make_node('Conv',
'Conv', inputs=['0', '1'],
inputs=['0', '1'], outputs=['2'],
outputs=['2'], dilations=[1, 1],
dilations=[1, 1], strides=[1, 1],
strides=[1, 1], auto_pad='SAME',
auto_pad='SAME', pads=[0, 0, 1, 1, 0, 0, 1, 1])
pads=[0, 0, 1, 1, 0, 0, 1, 1])
return ([node], [x, y], [out]) return ([node], [x, y], [out])
...@@ -749,13 +766,12 @@ def conv_autopad_same_test(): ...@@ -749,13 +766,12 @@ def conv_autopad_same_test():
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 3, 3]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 3, 3])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1, 1, 32, 32]) out = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1, 1, 32, 32])
node = onnx.helper.make_node( node = onnx.helper.make_node('Conv',
'Conv', inputs=['0', '1'],
inputs=['0', '1'], outputs=['2'],
outputs=['2'], dilations=[1, 1],
dilations=[1, 1], strides=[1, 1],
strides=[1, 1], auto_pad='SAME')
auto_pad='SAME')
return ([node], [x, y], [out]) return ([node], [x, y], [out])
...@@ -767,12 +783,11 @@ def conv_bias_test(): ...@@ -767,12 +783,11 @@ def conv_bias_test():
z = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1]) z = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1])
out = helper.make_tensor_value_info('3', TensorProto.FLOAT, [1, 2, 28, 28]) out = helper.make_tensor_value_info('3', TensorProto.FLOAT, [1, 2, 28, 28])
node = onnx.helper.make_node( node = onnx.helper.make_node('Conv',
'Conv', inputs=['0', '1', '2'],
inputs=['0', '1', '2'], outputs=['3'],
outputs=['3'], dilations=[1, 1],
dilations=[1, 1], strides=[1, 1])
strides=[1, 1])
return ([node], [x, y, z], [out]) return ([node], [x, y, z], [out])
...@@ -789,29 +804,26 @@ def conv_bn_relu_maxpool_test(): ...@@ -789,29 +804,26 @@ def conv_bn_relu_maxpool_test():
out = helper.make_tensor_value_info('10', TensorProto.FLOAT, out = helper.make_tensor_value_info('10', TensorProto.FLOAT,
[1, 1, 14, 14]) [1, 1, 14, 14])
node0 = onnx.helper.make_node( node0 = onnx.helper.make_node('Conv',
'Conv', inputs=['0', '1', '2'],
inputs=['0', '1', '2'], outputs=['7'],
outputs=['7'], dilations=[1, 1],
dilations=[1, 1], strides=[1, 1],
strides=[1, 1], pads=[0, 0, 0, 0])
pads=[0, 0, 0, 0])
node1 = onnx.helper.make_node('BatchNormalization',
node1 = onnx.helper.make_node( inputs=['7', '3', '4', '5', '6'],
'BatchNormalization', outputs=['8'],
inputs=['7', '3', '4', '5', '6'], epsilon=9.99999974737875e-06,
outputs=['8'], momentum=0.899999976158142)
epsilon=9.99999974737875e-06,
momentum=0.899999976158142)
node2 = onnx.helper.make_node('Relu', inputs=['8'], outputs=['9']) node2 = onnx.helper.make_node('Relu', inputs=['8'], outputs=['9'])
node3 = onnx.helper.make_node( node3 = onnx.helper.make_node('MaxPool',
'MaxPool', inputs=['9'],
inputs=['9'], outputs=['10'],
outputs=['10'], pads=[0, 0, 0, 0],
pads=[0, 0, 0, 0], strides=[2, 2],
strides=[2, 2], kernel_shape=[2, 2])
kernel_shape=[2, 2])
return ([node0, node1, node2, node3], [x, y, z, m, n, k, l], [out]) return ([node0, node1, node2, node3], [x, y, z, m, n, k, l], [out])
...@@ -823,23 +835,21 @@ def conv_relu_maxpool_test(): ...@@ -823,23 +835,21 @@ def conv_relu_maxpool_test():
z = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1]) z = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1])
out = helper.make_tensor_value_info('5', TensorProto.FLOAT, [1, 1, 14, 14]) out = helper.make_tensor_value_info('5', TensorProto.FLOAT, [1, 1, 14, 14])
node1 = onnx.helper.make_node( node1 = onnx.helper.make_node('Conv',
'Conv', inputs=['0', '1', '2'],
inputs=['0', '1', '2'], outputs=['3'],
outputs=['3'], dilations=[1, 1],
dilations=[1, 1], strides=[1, 1],
strides=[1, 1], pads=[0, 0, 0, 0])
pads=[0, 0, 0, 0])
node2 = onnx.helper.make_node('Relu', inputs=['3'], outputs=['4']) node2 = onnx.helper.make_node('Relu', inputs=['3'], outputs=['4'])
node3 = onnx.helper.make_node( node3 = onnx.helper.make_node('MaxPool',
'MaxPool', inputs=['4'],
inputs=['4'], outputs=['5'],
outputs=['5'], pads=[0, 0, 0, 0],
pads=[0, 0, 0, 0], strides=[2, 2],
strides=[2, 2], kernel_shape=[2, 2])
kernel_shape=[2, 2])
return ([node1, node2, node3], [x, y, z], [out]) return ([node1, node2, node3], [x, y, z], [out])
...@@ -853,41 +863,37 @@ def conv_relu_maxpool_x2_test(): ...@@ -853,41 +863,37 @@ def conv_relu_maxpool_x2_test():
n = helper.make_tensor_value_info('4', TensorProto.FLOAT, [1]) n = helper.make_tensor_value_info('4', TensorProto.FLOAT, [1])
out = helper.make_tensor_value_info('10', TensorProto.FLOAT, [1, 1, 5, 5]) out = helper.make_tensor_value_info('10', TensorProto.FLOAT, [1, 1, 5, 5])
node1 = onnx.helper.make_node( node1 = onnx.helper.make_node('Conv',
'Conv', inputs=['0', '1', '2'],
inputs=['0', '1', '2'], outputs=['5'],
outputs=['5'], dilations=[1, 1],
dilations=[1, 1], strides=[1, 1],
strides=[1, 1], pads=[0, 0, 0, 0])
pads=[0, 0, 0, 0])
node2 = onnx.helper.make_node('Relu', inputs=['5'], outputs=['6']) node2 = onnx.helper.make_node('Relu', inputs=['5'], outputs=['6'])
node3 = onnx.helper.make_node( node3 = onnx.helper.make_node('MaxPool',
'MaxPool', inputs=['6'],
inputs=['6'], outputs=['7'],
outputs=['7'], pads=[0, 0, 0, 0],
pads=[0, 0, 0, 0], strides=[2, 2],
strides=[2, 2], kernel_shape=[2, 2])
kernel_shape=[2, 2])
node4 = onnx.helper.make_node( node4 = onnx.helper.make_node('Conv',
'Conv', inputs=['7', '3', '4'],
inputs=['7', '3', '4'], outputs=['8'],
outputs=['8'], dilations=[1, 1],
dilations=[1, 1], strides=[1, 1],
strides=[1, 1], pads=[0, 0, 0, 0])
pads=[0, 0, 0, 0])
node5 = onnx.helper.make_node('Relu', inputs=['8'], outputs=['9']) node5 = onnx.helper.make_node('Relu', inputs=['8'], outputs=['9'])
node6 = onnx.helper.make_node( node6 = onnx.helper.make_node('MaxPool',
'MaxPool', inputs=['9'],
inputs=['9'], outputs=['10'],
outputs=['10'], pads=[0, 0, 0, 0],
pads=[0, 0, 0, 0], strides=[2, 2],
strides=[2, 2], kernel_shape=[2, 2])
kernel_shape=[2, 2])
return ([node1, node2, node3, node4, node5, node6], [x, y, z, m, n], [out]) return ([node1, node2, node3, node4, node5, node6], [x, y, z, m, n], [out])
...@@ -899,12 +905,11 @@ def convinteger_bias_test(): ...@@ -899,12 +905,11 @@ def convinteger_bias_test():
z = helper.make_tensor_value_info('2', TensorProto.INT32, [1]) z = helper.make_tensor_value_info('2', TensorProto.INT32, [1])
out = helper.make_tensor_value_info('3', TensorProto.INT32, [1, 2, 28, 28]) out = helper.make_tensor_value_info('3', TensorProto.INT32, [1, 2, 28, 28])
node = onnx.helper.make_node( node = onnx.helper.make_node('ConvInteger',
'ConvInteger', inputs=['0', '1', '2'],
inputs=['0', '1', '2'], outputs=['3'],
outputs=['3'], dilations=[1, 1],
dilations=[1, 1], strides=[1, 1])
strides=[1, 1])
return ([node], [x, y, z], [out]) return ([node], [x, y, z], [out])
...@@ -943,8 +948,10 @@ def deconv_test(): ...@@ -943,8 +948,10 @@ def deconv_test():
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 1, 3, 3]) w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 1, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 5, 5]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 5, 5])
node = onnx.helper.make_node( node = onnx.helper.make_node('ConvTranspose',
'ConvTranspose', name='conv1', inputs=['x', 'w'], outputs=['y']) name='conv1',
inputs=['x', 'w'],
outputs=['y'])
return ([node], [x, w], [y]) return ([node], [x, w], [y])
...@@ -956,8 +963,10 @@ def deconv_bias_test(): ...@@ -956,8 +963,10 @@ def deconv_bias_test():
b = helper.make_tensor_value_info('b', TensorProto.FLOAT, [1]) b = helper.make_tensor_value_info('b', TensorProto.FLOAT, [1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 5, 5]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 5, 5])
node = onnx.helper.make_node( node = onnx.helper.make_node('ConvTranspose',
'ConvTranspose', name='conv1', inputs=['x', 'w', 'b'], outputs=['y']) name='conv1',
inputs=['x', 'w', 'b'],
outputs=['y'])
return ([node], [x, w, b], [y]) return ([node], [x, w, b], [y])
...@@ -968,12 +977,11 @@ def deconv_input_pads_strides_test(): ...@@ -968,12 +977,11 @@ def deconv_input_pads_strides_test():
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3]) w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 7, 5]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 7, 5])
node = onnx.helper.make_node( node = onnx.helper.make_node('ConvTranspose',
'ConvTranspose', inputs=['x', 'w'],
inputs=['x', 'w'], outputs=['y'],
outputs=['y'], strides=[3, 2],
strides=[3, 2], pads=[1, 1, 1, 1])
pads=[1, 1, 1, 1])
return ([node], [x, w], [y]) return ([node], [x, w], [y])
...@@ -984,12 +992,11 @@ def deconv_input_pads_asymm_test(): ...@@ -984,12 +992,11 @@ def deconv_input_pads_asymm_test():
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3]) w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 8, 6]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 8, 6])
node = onnx.helper.make_node( node = onnx.helper.make_node('ConvTranspose',
'ConvTranspose', inputs=['x', 'w'],
inputs=['x', 'w'], outputs=['y'],
outputs=['y'], strides=[3, 2],
strides=[3, 2], pads=[0, 0, 1, 1])
pads=[0, 0, 1, 1])
return ([node], [x, w], [y]) return ([node], [x, w], [y])
...@@ -1000,13 +1007,12 @@ def deconv_input_pads_asymm_1d_test(): ...@@ -1000,13 +1007,12 @@ def deconv_input_pads_asymm_1d_test():
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3]) w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 6]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 6])
node = onnx.helper.make_node( node = onnx.helper.make_node('ConvTranspose',
'ConvTranspose', inputs=['x', 'w'],
inputs=['x', 'w'], outputs=['y'],
outputs=['y'], strides=[2],
strides=[2], pads=[0, 1],
pads=[0, 1], dilations=[1])
dilations=[1])
return ([node], [x, w], [y]) return ([node], [x, w], [y])
...@@ -1017,12 +1023,11 @@ def deconv_output_padding_test(): ...@@ -1017,12 +1023,11 @@ def deconv_output_padding_test():
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3]) w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 10, 8]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 10, 8])
node = onnx.helper.make_node( node = onnx.helper.make_node('ConvTranspose',
'ConvTranspose', inputs=['x', 'w'],
inputs=['x', 'w'], outputs=['y'],
outputs=['y'], strides=[3, 2],
strides=[3, 2], output_padding=[1, 1])
output_padding=[1, 1])
return ([node], [x, w], [y]) return ([node], [x, w], [y])
...@@ -1033,12 +1038,11 @@ def deconv_output_padding_3d_test(): ...@@ -1033,12 +1038,11 @@ def deconv_output_padding_3d_test():
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3, 3]) w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 10, 8, 8]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 10, 8, 8])
node = onnx.helper.make_node( node = onnx.helper.make_node('ConvTranspose',
'ConvTranspose', inputs=['x', 'w'],
inputs=['x', 'w'], outputs=['y'],
outputs=['y'], strides=[3, 2, 2],
strides=[3, 2, 2], output_padding=[1, 1, 1])
output_padding=[1, 1, 1])
return ([node], [x, w], [y]) return ([node], [x, w], [y])
...@@ -1049,12 +1053,11 @@ def deconv_output_shape_test(): ...@@ -1049,12 +1053,11 @@ def deconv_output_shape_test():
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3]) w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 10, 8]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 10, 8])
node = onnx.helper.make_node( node = onnx.helper.make_node('ConvTranspose',
'ConvTranspose', inputs=['x', 'w'],
inputs=['x', 'w'], outputs=['y'],
outputs=['y'], strides=[3, 2],
strides=[3, 2], output_shape=[10, 8])
output_shape=[10, 8])
return ([node], [x, w], [y]) return ([node], [x, w], [y])
...@@ -1065,12 +1068,11 @@ def deconv_output_shape_3d_test(): ...@@ -1065,12 +1068,11 @@ def deconv_output_shape_3d_test():
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3, 3]) w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 10, 8, 8]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 10, 8, 8])
node = onnx.helper.make_node( node = onnx.helper.make_node('ConvTranspose',
'ConvTranspose', inputs=['x', 'w'],
inputs=['x', 'w'], outputs=['y'],
outputs=['y'], strides=[3, 2, 2],
strides=[3, 2, 2], output_shape=[10, 8, 8])
output_shape=[10, 8, 8])
return ([node], [x, w], [y]) return ([node], [x, w], [y])
...@@ -1081,8 +1083,10 @@ def deconv_stride_test(): ...@@ -1081,8 +1083,10 @@ def deconv_stride_test():
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3]) w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 7, 3]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 7, 3])
node = onnx.helper.make_node( node = onnx.helper.make_node('ConvTranspose',
'ConvTranspose', inputs=['x', 'w'], outputs=['y'], strides=[3, 2]) inputs=['x', 'w'],
outputs=['y'],
strides=[3, 2])
return ([node], [x, w], [y]) return ([node], [x, w], [y])
...@@ -1093,8 +1097,11 @@ def depthtospace_test(): ...@@ -1093,8 +1097,11 @@ def depthtospace_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 8, 5, 5]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 8, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 2, 10, 10]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 2, 10, 10])
node = onnx.helper.make_node( node = onnx.helper.make_node('DepthToSpace',
'DepthToSpace', inputs=['x'], outputs=['y'], blocksize=2, mode='DCR') inputs=['x'],
outputs=['y'],
blocksize=2,
mode='DCR')
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -1105,8 +1112,11 @@ def depthtospace_simple_test(): ...@@ -1105,8 +1112,11 @@ def depthtospace_simple_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 8, 2, 3]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 8, 2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 4, 6]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 4, 6])
node = onnx.helper.make_node( node = onnx.helper.make_node('DepthToSpace',
'DepthToSpace', inputs=['x'], outputs=['y'], blocksize=2, mode='DCR') inputs=['x'],
outputs=['y'],
blocksize=2,
mode='DCR')
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -1117,8 +1127,11 @@ def depthtospace_crd_test(): ...@@ -1117,8 +1127,11 @@ def depthtospace_crd_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 8, 5, 5]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 8, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 2, 10, 10]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 2, 10, 10])
node = onnx.helper.make_node( node = onnx.helper.make_node('DepthToSpace',
'DepthToSpace', inputs=['x'], outputs=['y'], blocksize=2, mode='CRD') inputs=['x'],
outputs=['y'],
blocksize=2,
mode='CRD')
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -1129,8 +1142,10 @@ def spacetodepth_test(): ...@@ -1129,8 +1142,10 @@ def spacetodepth_test():
x = helper.make_tensor_value_info('x', TensorProto.float, [2, 2, 10, 10]) x = helper.make_tensor_value_info('x', TensorProto.float, [2, 2, 10, 10])
y = helper.make_tensor_value_info('y', TensorProto.float, [2, 8, 5, 5]) y = helper.make_tensor_value_info('y', TensorProto.float, [2, 8, 5, 5])
node = onnx.helper.make_node( node = onnx.helper.make_node('spacetodepth',
'spacetodepth', inputs=['x'], outputs=['y'], blocksize=2) inputs=['x'],
outputs=['y'],
blocksize=2)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -1141,8 +1156,10 @@ def spacetodepth_simple_test(): ...@@ -1141,8 +1156,10 @@ def spacetodepth_simple_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 2, 4, 6]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 2, 4, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 8, 2, 3]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 8, 2, 3])
node = onnx.helper.make_node( node = onnx.helper.make_node('SpaceToDepth',
'SpaceToDepth', inputs=['x'], outputs=['y'], blocksize=2) inputs=['x'],
outputs=['y'],
blocksize=2)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -1153,8 +1170,10 @@ def spacetodepth_invalid_blocksize_test(): ...@@ -1153,8 +1170,10 @@ def spacetodepth_invalid_blocksize_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 2, 4, 6]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 2, 4, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 8, 2, 3]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 8, 2, 3])
node = onnx.helper.make_node( node = onnx.helper.make_node('SpaceToDepth',
'SpaceToDepth', inputs=['x'], outputs=['y'], blocksize=0.3) inputs=['x'],
outputs=['y'],
blocksize=0.3)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -1165,8 +1184,10 @@ def spacetodepth_nondivisibility_test(): ...@@ -1165,8 +1184,10 @@ def spacetodepth_nondivisibility_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 2, 5, 5]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 2, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 8, 2, 2]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 8, 2, 2])
node = onnx.helper.make_node( node = onnx.helper.make_node('SpaceToDepth',
'SpaceToDepth', inputs=['x'], outputs=['y'], blocksize=2) inputs=['x'],
outputs=['y'],
blocksize=2)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -1209,8 +1230,10 @@ def make_dequantizelinear_axis_graph(axis): ...@@ -1209,8 +1230,10 @@ def make_dequantizelinear_axis_graph(axis):
arg_out = helper.make_tensor_value_info('out', TensorProto.FLOAT, arg_out = helper.make_tensor_value_info('out', TensorProto.FLOAT,
[1, 1, 5, 1]) [1, 1, 5, 1])
node = onnx.helper.make_node( node = onnx.helper.make_node('DequantizeLinear',
'DequantizeLinear', inputs=['0', '1', '2'], outputs=['out'], axis=axis) inputs=['0', '1', '2'],
outputs=['out'],
axis=axis)
return ([node], [arg0, arg1, arg2], [arg_out]) return ([node], [arg0, arg1, arg2], [arg_out])
...@@ -1244,8 +1267,10 @@ def elu_test(): ...@@ -1244,8 +1267,10 @@ def elu_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
node = onnx.helper.make_node( node = onnx.helper.make_node('Elu',
'Elu', inputs=['0'], outputs=['1'], alpha=0.01) inputs=['0'],
outputs=['1'],
alpha=0.01)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -1256,23 +1281,25 @@ def embedding_bag_test(): ...@@ -1256,23 +1281,25 @@ def embedding_bag_test():
index_val = np.array([1, 0, 2]) index_val = np.array([1, 0, 2])
offset_val = np.array([0]) offset_val = np.array([0])
index_tensor = helper.make_tensor( index_tensor = helper.make_tensor(name='index_val',
name='index_val', data_type=TensorProto.INT32,
data_type=TensorProto.INT32, dims=index_val.shape,
dims=index_val.shape, vals=index_val.astype(np.int32))
vals=index_val.astype(np.int32))
index = onnx.helper.make_node( index = onnx.helper.make_node('Constant',
'Constant', inputs=[], outputs=['index'], value=index_tensor) inputs=[],
outputs=['index'],
value=index_tensor)
offset_tensor = helper.make_tensor( offset_tensor = helper.make_tensor(name='offset_val',
name='offset_val', data_type=TensorProto.INT32,
data_type=TensorProto.INT32, dims=offset_val.reshape(()).shape,
dims=offset_val.reshape(()).shape, vals=offset_val.astype(np.int32))
vals=offset_val.astype(np.int32))
offset = onnx.helper.make_node( offset = onnx.helper.make_node('Constant',
'Constant', inputs=[], outputs=['offset'], value=offset_tensor) inputs=[],
outputs=['offset'],
value=offset_tensor)
weight = helper.make_tensor_value_info('weight', TensorProto.FLOAT, [4, 2]) weight = helper.make_tensor_value_info('weight', TensorProto.FLOAT, [4, 2])
...@@ -1280,26 +1307,23 @@ def embedding_bag_test(): ...@@ -1280,26 +1307,23 @@ def embedding_bag_test():
y2 = helper.make_tensor_value_info('y2', TensorProto.FLOAT, [1, 2]) y2 = helper.make_tensor_value_info('y2', TensorProto.FLOAT, [1, 2])
y3 = helper.make_tensor_value_info('y3', TensorProto.FLOAT, [1, 2]) y3 = helper.make_tensor_value_info('y3', TensorProto.FLOAT, [1, 2])
node1 = onnx.helper.make_node( node1 = onnx.helper.make_node('ATen',
'ATen', inputs=['weight', 'index', 'offset'],
inputs=['weight', 'index', 'offset'], outputs=['y1'],
outputs=['y1'], mode=0,
mode=0, operator='embedding_bag')
operator='embedding_bag')
node2 = onnx.helper.make_node( node2 = onnx.helper.make_node('ATen',
'ATen', inputs=['weight', 'index', 'offset'],
inputs=['weight', 'index', 'offset'], outputs=['y2'],
outputs=['y2'], mode=1,
mode=1, operator='embedding_bag')
operator='embedding_bag')
node3 = onnx.helper.make_node('ATen',
node3 = onnx.helper.make_node( inputs=['weight', 'index', 'offset'],
'ATen', outputs=['y3'],
inputs=['weight', 'index', 'offset'], mode=2,
outputs=['y3'], operator='embedding_bag')
mode=2,
operator='embedding_bag')
return ([index, offset, node1, node2, node3], [weight], [y1, y2, y3]) return ([index, offset, node1, node2, node3], [weight], [y1, y2, y3])
...@@ -1310,34 +1334,35 @@ def embedding_bag_offset_test(): ...@@ -1310,34 +1334,35 @@ def embedding_bag_offset_test():
index_val = np.array([1, 0]) index_val = np.array([1, 0])
offset_val = np.array([0, 1]) offset_val = np.array([0, 1])
index_tensor = helper.make_tensor( index_tensor = helper.make_tensor(name='index_val',
name='index_val', data_type=TensorProto.INT32,
data_type=TensorProto.INT32, dims=index_val.shape,
dims=index_val.shape, vals=index_val.astype(np.int32))
vals=index_val.astype(np.int32))
index = onnx.helper.make_node( index = onnx.helper.make_node('Constant',
'Constant', inputs=[], outputs=['index'], value=index_tensor) inputs=[],
outputs=['index'],
value=index_tensor)
offset_tensor = helper.make_tensor( offset_tensor = helper.make_tensor(name='offset_val',
name='offset_val', data_type=TensorProto.INT32,
data_type=TensorProto.INT32, dims=offset_val.shape,
dims=offset_val.shape, vals=offset_val.astype(np.int32))
vals=offset_val.astype(np.int32))
offset = onnx.helper.make_node( offset = onnx.helper.make_node('Constant',
'Constant', inputs=[], outputs=['offset'], value=offset_tensor) inputs=[],
outputs=['offset'],
value=offset_tensor)
weight = helper.make_tensor_value_info('weight', TensorProto.FLOAT, [2, 3]) weight = helper.make_tensor_value_info('weight', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node( node = onnx.helper.make_node('ATen',
'ATen', inputs=['weight', 'index', 'offset'],
inputs=['weight', 'index', 'offset'], outputs=['y'],
outputs=['y'], mode=0,
mode=0, operator='embedding_bag')
operator='embedding_bag')
return ([index, offset, node], [weight], [y]) return ([index, offset, node], [weight], [y])
...@@ -1345,11 +1370,10 @@ def embedding_bag_offset_test(): ...@@ -1345,11 +1370,10 @@ def embedding_bag_offset_test():
@onnx_test @onnx_test
def equal_test(): def equal_test():
ax1 = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]) ax1 = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
x1 = helper.make_tensor( x1 = helper.make_tensor("x1",
"x1", data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=(2, 3),
dims=(2, 3), vals=ax1.astype(np.float32))
vals=ax1.astype(np.float32))
x2 = helper.make_tensor_value_info('x2', TensorProto.FLOAT, [2, 3]) x2 = helper.make_tensor_value_info('x2', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
...@@ -1412,11 +1436,10 @@ def exp_test(): ...@@ -1412,11 +1436,10 @@ def exp_test():
@onnx_test @onnx_test
def expand_test(): def expand_test():
shape_val = np.array([2, 3, 4, 5]).astype(np.int64) shape_val = np.array([2, 3, 4, 5]).astype(np.int64)
shape_ts = helper.make_tensor( shape_ts = helper.make_tensor(name='shape_tensor',
name='shape_tensor', data_type=TensorProto.INT32,
data_type=TensorProto.INT32, dims=shape_val.shape,
dims=shape_val.shape, vals=shape_val.flatten().astype(int))
vals=shape_val.flatten().astype(int))
shape_const = helper.make_node( shape_const = helper.make_node(
'Constant', 'Constant',
inputs=[], inputs=[],
...@@ -1426,8 +1449,9 @@ def expand_test(): ...@@ -1426,8 +1449,9 @@ def expand_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 1, 1]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 1, 1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3, 4, 5]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3, 4, 5])
node = onnx.helper.make_node( node = onnx.helper.make_node('Expand',
'Expand', inputs=['x', 'shape'], outputs=['y']) inputs=['x', 'shape'],
outputs=['y'])
return ([shape_const, node], [x], [y]) return ([shape_const, node], [x], [y])
...@@ -1483,8 +1507,10 @@ def eyelike_k_test(): ...@@ -1483,8 +1507,10 @@ def eyelike_k_test():
def eyelike_k_outofbounds_neg_test(): def eyelike_k_outofbounds_neg_test():
T1 = helper.make_tensor_value_info('T1', TensorProto.FLOAT, [2, 4]) T1 = helper.make_tensor_value_info('T1', TensorProto.FLOAT, [2, 4])
T2 = helper.make_tensor_value_info('T2', TensorProto.FLOAT, [2, 4]) T2 = helper.make_tensor_value_info('T2', TensorProto.FLOAT, [2, 4])
node = onnx.helper.make_node( node = onnx.helper.make_node('EyeLike',
'EyeLike', inputs=['T1'], outputs=['T2'], k=-2) inputs=['T1'],
outputs=['T2'],
k=-2)
return ([node], [T1], [T2]) return ([node], [T1], [T2])
...@@ -1520,8 +1546,10 @@ def eyelike_verify_test(): ...@@ -1520,8 +1546,10 @@ def eyelike_verify_test():
def eyelike_verify_negk_test(): def eyelike_verify_negk_test():
T1 = helper.make_tensor_value_info('T1', TensorProto.FLOAT, [3, 4]) T1 = helper.make_tensor_value_info('T1', TensorProto.FLOAT, [3, 4])
T2 = helper.make_tensor_value_info('T2', TensorProto.FLOAT, [3, 4]) T2 = helper.make_tensor_value_info('T2', TensorProto.FLOAT, [3, 4])
node = onnx.helper.make_node( node = onnx.helper.make_node('EyeLike',
'EyeLike', inputs=['T1'], outputs=['T2'], k=-2) inputs=['T1'],
outputs=['T2'],
k=-2)
return ([node], [T1], [T2]) return ([node], [T1], [T2])
...@@ -1529,8 +1557,10 @@ def eyelike_verify_negk_test(): ...@@ -1529,8 +1557,10 @@ def eyelike_verify_negk_test():
def eyelike_set_dtype_test(): def eyelike_set_dtype_test():
T1 = helper.make_tensor_value_info('T1', TensorProto.FLOAT, [3, 4]) T1 = helper.make_tensor_value_info('T1', TensorProto.FLOAT, [3, 4])
T2 = helper.make_tensor_value_info('T2', TensorProto.DOUBLE, [3, 4]) T2 = helper.make_tensor_value_info('T2', TensorProto.DOUBLE, [3, 4])
node = onnx.helper.make_node( node = onnx.helper.make_node('EyeLike',
'EyeLike', inputs=['T1'], outputs=['T2'], dtype=TensorProto.DOUBLE) inputs=['T1'],
outputs=['T2'],
dtype=TensorProto.DOUBLE)
return ([node], [T1], [T2]) return ([node], [T1], [T2])
...@@ -1540,8 +1570,10 @@ def flatten_test(): ...@@ -1540,8 +1570,10 @@ def flatten_test():
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [6, 20]) y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [6, 20])
y2 = helper.make_tensor_value_info('3', TensorProto.FLOAT, [2, 60]) y2 = helper.make_tensor_value_info('3', TensorProto.FLOAT, [2, 60])
node = onnx.helper.make_node( node = onnx.helper.make_node('Flatten',
'Flatten', inputs=['0'], axis=2, outputs=['2']) inputs=['0'],
axis=2,
outputs=['2'])
node2 = onnx.helper.make_node('Flatten', inputs=['0'], outputs=['3']) node2 = onnx.helper.make_node('Flatten', inputs=['0'], outputs=['3'])
...@@ -1561,8 +1593,10 @@ def flatten_nonstd_test(): ...@@ -1561,8 +1593,10 @@ def flatten_nonstd_test():
perm=[0, 1, 3, 2], perm=[0, 1, 3, 2],
) )
node = onnx.helper.make_node( node = onnx.helper.make_node('Flatten',
'Flatten', inputs=['tx'], axis=2, outputs=['2']) inputs=['tx'],
axis=2,
outputs=['2'])
node2 = onnx.helper.make_node('Flatten', inputs=['tx'], outputs=['3']) node2 = onnx.helper.make_node('Flatten', inputs=['tx'], outputs=['3'])
...@@ -1639,14 +1673,13 @@ def gemm_test(): ...@@ -1639,14 +1673,13 @@ def gemm_test():
z = helper.make_tensor_value_info('2', TensorProto.FLOAT, []) z = helper.make_tensor_value_info('2', TensorProto.FLOAT, [])
a = helper.make_tensor_value_info('3', TensorProto.FLOAT, [7, 11]) a = helper.make_tensor_value_info('3', TensorProto.FLOAT, [7, 11])
node = onnx.helper.make_node( node = onnx.helper.make_node('Gemm',
'Gemm', inputs=['0', '1', '2'],
inputs=['0', '1', '2'], outputs=['3'],
outputs=['3'], alpha=2.0,
alpha=2.0, beta=2.0,
beta=2.0, transA=1,
transA=1, transB=1)
transB=1)
return ([node], [x, y, z], [a]) return ([node], [x, y, z], [a])
...@@ -1658,13 +1691,12 @@ def gemm_ex_test(): ...@@ -1658,13 +1691,12 @@ def gemm_ex_test():
m3 = helper.make_tensor_value_info('3', TensorProto.FLOAT, [1, 1, 6, 7]) m3 = helper.make_tensor_value_info('3', TensorProto.FLOAT, [1, 1, 6, 7])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 6, 7]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 6, 7])
node = onnx.helper.make_node( node = onnx.helper.make_node('Gemm',
'Gemm', inputs=['1', '2', '3'],
inputs=['1', '2', '3'], outputs=['y'],
outputs=['y'], alpha=0.5,
alpha=0.5, beta=0.8,
beta=0.8, transA=1)
transA=1)
return ([node], [m1, m2, m3], [y]) return ([node], [m1, m2, m3], [y])
...@@ -1676,13 +1708,12 @@ def gemm_ex_brcst_test(): ...@@ -1676,13 +1708,12 @@ def gemm_ex_brcst_test():
m3 = helper.make_tensor_value_info('3', TensorProto.FLOAT, [1, 1, 6, 1]) m3 = helper.make_tensor_value_info('3', TensorProto.FLOAT, [1, 1, 6, 1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 6, 7]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 6, 7])
node = onnx.helper.make_node( node = onnx.helper.make_node('Gemm',
'Gemm', inputs=['1', '2', '3'],
inputs=['1', '2', '3'], outputs=['y'],
outputs=['y'], alpha=0.5,
alpha=0.5, beta=0.8,
beta=0.8, transA=1)
transA=1)
return ([node], [m1, m2, m3], [y]) return ([node], [m1, m2, m3], [y])
...@@ -1694,13 +1725,12 @@ def gemm_half_test(): ...@@ -1694,13 +1725,12 @@ def gemm_half_test():
m3 = helper.make_tensor_value_info('3', TensorProto.FLOAT16, [1, 1, 6, 1]) m3 = helper.make_tensor_value_info('3', TensorProto.FLOAT16, [1, 1, 6, 1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT16, [1, 1, 6, 7]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT16, [1, 1, 6, 7])
node = onnx.helper.make_node( node = onnx.helper.make_node('Gemm',
'Gemm', inputs=['1', '2', '3'],
inputs=['1', '2', '3'], outputs=['y'],
outputs=['y'], alpha=0.5,
alpha=0.5, beta=0.8,
beta=0.8, transA=1)
transA=1)
return ([node], [m1, m2, m3], [y]) return ([node], [m1, m2, m3], [y])
...@@ -1736,11 +1766,10 @@ def globalmaxpool_test(): ...@@ -1736,11 +1766,10 @@ def globalmaxpool_test():
@onnx_test @onnx_test
def greater_test(): def greater_test():
ax1 = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]) ax1 = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
x1 = helper.make_tensor( x1 = helper.make_tensor("x1",
"x1", data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=(2, 3),
dims=(2, 3), vals=ax1.astype(np.float32))
vals=ax1.astype(np.float32))
x2 = helper.make_tensor_value_info('x2', TensorProto.FLOAT, [2, 3]) x2 = helper.make_tensor_value_info('x2', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
...@@ -1819,8 +1848,11 @@ def hardsigmoid_double_test(): ...@@ -1819,8 +1848,11 @@ def hardsigmoid_double_test():
x = helper.make_tensor_value_info('x', TensorProto.DOUBLE, [1, 3, 4, 5]) x = helper.make_tensor_value_info('x', TensorProto.DOUBLE, [1, 3, 4, 5])
y = helper.make_tensor_value_info('y', TensorProto.DOUBLE, [1, 3, 4, 5]) y = helper.make_tensor_value_info('y', TensorProto.DOUBLE, [1, 3, 4, 5])
node = onnx.helper.make_node( node = onnx.helper.make_node('HardSigmoid',
'HardSigmoid', inputs=['x'], outputs=['y'], alpha=0.3, beta=0.7) inputs=['x'],
outputs=['y'],
alpha=0.3,
beta=0.7)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -1860,30 +1892,32 @@ def if_else_test(): ...@@ -1860,30 +1892,32 @@ def if_else_test():
x = onnx.helper.make_tensor_value_info('x', onnx.TensorProto.FLOAT, [2, 3]) x = onnx.helper.make_tensor_value_info('x', onnx.TensorProto.FLOAT, [2, 3])
y = onnx.helper.make_tensor_value_info('y', onnx.TensorProto.FLOAT, [2, 3]) y = onnx.helper.make_tensor_value_info('y', onnx.TensorProto.FLOAT, [2, 3])
then_out = onnx.helper.make_tensor_value_info( then_out = onnx.helper.make_tensor_value_info('then_out',
'then_out', onnx.TensorProto.FLOAT, [2, 3]) onnx.TensorProto.FLOAT,
else_out = onnx.helper.make_tensor_value_info( [2, 3])
'else_out', onnx.TensorProto.FLOAT, [2, 3]) else_out = onnx.helper.make_tensor_value_info('else_out',
onnx.TensorProto.FLOAT,
[2, 3])
xt = np.ones((2, 3)).astype(np.float) xt = np.ones((2, 3)).astype(np.float)
xt_tensor = helper.make_tensor( xt_tensor = helper.make_tensor(name='xt',
name='xt', data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=xt.shape,
dims=xt.shape, vals=xt.flatten().astype(np.float32))
vals=xt.flatten().astype(np.float32))
yt = np.random.randn(2, 3).astype(np.float) yt = np.random.randn(2, 3).astype(np.float)
yt_tensor = helper.make_tensor( yt_tensor = helper.make_tensor(name='yt',
name='yt', data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=yt.shape,
dims=yt.shape, vals=yt.flatten().astype(np.float32))
vals=yt.flatten().astype(np.float32))
then_add_node = onnx.helper.make_node( then_add_node = onnx.helper.make_node('Add',
'Add', inputs=['x', 'xt'], outputs=['then_out']) inputs=['x', 'xt'],
outputs=['then_out'])
else_mul_node = onnx.helper.make_node( else_mul_node = onnx.helper.make_node('Mul',
'Mul', inputs=['y', 'yt'], outputs=['else_out']) inputs=['y', 'yt'],
outputs=['else_out'])
then_body = onnx.helper.make_graph([then_add_node], 'then_body', [], then_body = onnx.helper.make_graph([then_add_node], 'then_body', [],
[then_out]) [then_out])
...@@ -1892,19 +1926,17 @@ def if_else_test(): ...@@ -1892,19 +1926,17 @@ def if_else_test():
[else_out]) [else_out])
cond = np.array([0]).astype(np.bool) cond = np.array([0]).astype(np.bool)
cond_tensor = helper.make_tensor( cond_tensor = helper.make_tensor(name="cond",
name="cond", data_type=TensorProto.BOOL,
data_type=TensorProto.BOOL, dims=cond.shape,
dims=cond.shape, vals=cond.astype(bool))
vals=cond.astype(bool))
res = onnx.helper.make_tensor_value_info('res', TensorProto.FLOAT, []) res = onnx.helper.make_tensor_value_info('res', TensorProto.FLOAT, [])
node = onnx.helper.make_node( node = onnx.helper.make_node('If',
'If', inputs=['cond'],
inputs=['cond'], outputs=['res'],
outputs=['res'], then_branch=then_body,
then_branch=then_body, else_branch=else_body)
else_branch=else_body)
return ([node], [x, y], [res], [cond_tensor, xt_tensor, yt_tensor]) return ([node], [x, y], [res], [cond_tensor, xt_tensor, yt_tensor])
...@@ -1948,22 +1980,23 @@ def if_literal_test(): ...@@ -1948,22 +1980,23 @@ def if_literal_test():
onnx.TensorProto.BOOL, []) onnx.TensorProto.BOOL, [])
ret = onnx.helper.make_tensor_value_info('ret', TensorProto.FLOAT, []) ret = onnx.helper.make_tensor_value_info('ret', TensorProto.FLOAT, [])
node = onnx.helper.make_node( node = onnx.helper.make_node('If',
'If', inputs=['cond'],
inputs=['cond'], outputs=['ret'],
outputs=['ret'], then_branch=then_body,
then_branch=then_body, else_branch=else_body)
else_branch=else_body)
return ([node], [cond_input], [ret]) return ([node], [cond_input], [ret])
@onnx_test @onnx_test
def if_param_excp_test(): def if_param_excp_test():
then_out = onnx.helper.make_tensor_value_info( then_out = onnx.helper.make_tensor_value_info('then_out',
'then_out', onnx.TensorProto.FLOAT, [2, 3]) onnx.TensorProto.FLOAT,
else_out = onnx.helper.make_tensor_value_info( [2, 3])
'else_out', onnx.TensorProto.FLOAT, [2, 3]) else_out = onnx.helper.make_tensor_value_info('else_out',
onnx.TensorProto.FLOAT,
[2, 3])
x = onnx.helper.make_tensor_value_info('x', onnx.TensorProto.FLOAT, [2, 3]) x = onnx.helper.make_tensor_value_info('x', onnx.TensorProto.FLOAT, [2, 3])
y = onnx.helper.make_tensor_value_info('y', onnx.TensorProto.FLOAT, [2, 4]) y = onnx.helper.make_tensor_value_info('y', onnx.TensorProto.FLOAT, [2, 4])
...@@ -1971,23 +2004,23 @@ def if_param_excp_test(): ...@@ -1971,23 +2004,23 @@ def if_param_excp_test():
yt = np.random.randn(2, 4).astype(np.float) yt = np.random.randn(2, 4).astype(np.float)
xt = np.random.randn(2, 3).astype(np.float) xt = np.random.randn(2, 3).astype(np.float)
xt_tensor = helper.make_tensor( xt_tensor = helper.make_tensor(name='xt',
name='xt', data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=xt.shape,
dims=xt.shape, vals=xt.flatten().astype(np.float32))
vals=xt.flatten().astype(np.float32))
yt_tensor = helper.make_tensor( yt_tensor = helper.make_tensor(name='yt',
name='yt', data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=yt.shape,
dims=yt.shape, vals=yt.flatten().astype(np.float32))
vals=yt.flatten().astype(np.float32))
then_add_node = onnx.helper.make_node( then_add_node = onnx.helper.make_node('Add',
'Add', inputs=['x', 'xt'], outputs=['then_out']) inputs=['x', 'xt'],
outputs=['then_out'])
else_mul_node = onnx.helper.make_node( else_mul_node = onnx.helper.make_node('Mul',
'Mul', inputs=['y', 'yt'], outputs=['else_out']) inputs=['y', 'yt'],
outputs=['else_out'])
then_body = onnx.helper.make_graph([then_add_node], 'then_body', [], then_body = onnx.helper.make_graph([then_add_node], 'then_body', [],
[then_out], [xt_tensor]) [then_out], [xt_tensor])
...@@ -1999,33 +2032,33 @@ def if_param_excp_test(): ...@@ -1999,33 +2032,33 @@ def if_param_excp_test():
onnx.TensorProto.BOOL, []) onnx.TensorProto.BOOL, [])
ret = onnx.helper.make_tensor_value_info('ret', TensorProto.FLOAT, []) ret = onnx.helper.make_tensor_value_info('ret', TensorProto.FLOAT, [])
node = onnx.helper.make_node( node = onnx.helper.make_node('If',
'If', inputs=['cond'],
inputs=['cond'], outputs=['ret'],
outputs=['ret'], then_branch=then_body,
then_branch=then_body, else_branch=else_body)
else_branch=else_body)
return ([node], [cond_input, x, y], [ret]) return ([node], [cond_input, x, y], [ret])
@onnx_test @onnx_test
def if_param_excp1_test(): def if_param_excp1_test():
then_out = onnx.helper.make_tensor_value_info( then_out = onnx.helper.make_tensor_value_info('sub_out',
'sub_out', onnx.TensorProto.FLOAT, [2, 3]) onnx.TensorProto.FLOAT,
[2, 3])
x = onnx.helper.make_tensor_value_info('x', onnx.TensorProto.FLOAT, [2, 3]) x = onnx.helper.make_tensor_value_info('x', onnx.TensorProto.FLOAT, [2, 3])
xt = np.random.randn(2, 3).astype(np.float) xt = np.random.randn(2, 3).astype(np.float)
xt_tensor = helper.make_tensor( xt_tensor = helper.make_tensor(name='xt',
name='xt', data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=xt.shape,
dims=xt.shape, vals=xt.flatten().astype(np.float32))
vals=xt.flatten().astype(np.float32))
then_add_node = onnx.helper.make_node( then_add_node = onnx.helper.make_node('Add',
'Add', inputs=['x', 'xt'], outputs=['sub_out']) inputs=['x', 'xt'],
outputs=['sub_out'])
sub_body = onnx.helper.make_graph([then_add_node], 'sub_body', [], sub_body = onnx.helper.make_graph([then_add_node], 'sub_body', [],
[then_out], [xt_tensor]) [then_out], [xt_tensor])
...@@ -2034,22 +2067,23 @@ def if_param_excp1_test(): ...@@ -2034,22 +2067,23 @@ def if_param_excp1_test():
onnx.TensorProto.BOOL, [2]) onnx.TensorProto.BOOL, [2])
ret = onnx.helper.make_tensor_value_info('ret', TensorProto.FLOAT, []) ret = onnx.helper.make_tensor_value_info('ret', TensorProto.FLOAT, [])
node = onnx.helper.make_node( node = onnx.helper.make_node('If',
'If', inputs=['cond'],
inputs=['cond'], outputs=['ret'],
outputs=['ret'], then_branch=sub_body,
then_branch=sub_body, else_branch=sub_body)
else_branch=sub_body)
return ([node], [cond_input, x], [ret]) return ([node], [cond_input, x], [ret])
@onnx_test @onnx_test
def if_param_test(): def if_param_test():
then_out = onnx.helper.make_tensor_value_info( then_out = onnx.helper.make_tensor_value_info('then_out',
'then_out', onnx.TensorProto.FLOAT, [2, 3]) onnx.TensorProto.FLOAT,
else_out = onnx.helper.make_tensor_value_info( [2, 3])
'else_out', onnx.TensorProto.FLOAT, [2, 3]) else_out = onnx.helper.make_tensor_value_info('else_out',
onnx.TensorProto.FLOAT,
[2, 3])
x = onnx.helper.make_tensor_value_info('x', onnx.TensorProto.FLOAT, [2, 3]) x = onnx.helper.make_tensor_value_info('x', onnx.TensorProto.FLOAT, [2, 3])
y = onnx.helper.make_tensor_value_info('y', onnx.TensorProto.FLOAT, [2, 3]) y = onnx.helper.make_tensor_value_info('y', onnx.TensorProto.FLOAT, [2, 3])
...@@ -2057,23 +2091,23 @@ def if_param_test(): ...@@ -2057,23 +2091,23 @@ def if_param_test():
yt = np.random.randn(2, 3).astype(np.float) yt = np.random.randn(2, 3).astype(np.float)
xt = np.random.randn(2, 3).astype(np.float) xt = np.random.randn(2, 3).astype(np.float)
xt_tensor = helper.make_tensor( xt_tensor = helper.make_tensor(name='xt',
name='xt', data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=xt.shape,
dims=xt.shape, vals=xt.flatten().astype(np.float32))
vals=xt.flatten().astype(np.float32))
yt_tensor = helper.make_tensor( yt_tensor = helper.make_tensor(name='yt',
name='yt', data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=yt.shape,
dims=yt.shape, vals=yt.flatten().astype(np.float32))
vals=yt.flatten().astype(np.float32))
then_add_node = onnx.helper.make_node( then_add_node = onnx.helper.make_node('Add',
'Add', inputs=['x', 'xt'], outputs=['then_out']) inputs=['x', 'xt'],
outputs=['then_out'])
else_mul_node = onnx.helper.make_node( else_mul_node = onnx.helper.make_node('Mul',
'Mul', inputs=['y', 'yt'], outputs=['else_out']) inputs=['y', 'yt'],
outputs=['else_out'])
then_body = onnx.helper.make_graph([then_add_node], 'then_body', [], then_body = onnx.helper.make_graph([then_add_node], 'then_body', [],
[then_out], [xt_tensor]) [then_out], [xt_tensor])
...@@ -2085,12 +2119,11 @@ def if_param_test(): ...@@ -2085,12 +2119,11 @@ def if_param_test():
onnx.TensorProto.BOOL, []) onnx.TensorProto.BOOL, [])
ret = onnx.helper.make_tensor_value_info('ret', TensorProto.FLOAT, []) ret = onnx.helper.make_tensor_value_info('ret', TensorProto.FLOAT, [])
node = onnx.helper.make_node( node = onnx.helper.make_node('If',
'If', inputs=['cond'],
inputs=['cond'], outputs=['ret'],
outputs=['ret'], then_branch=then_body,
then_branch=then_body, else_branch=else_body)
else_branch=else_body)
return ([node], [cond_input, x, y], [ret]) return ([node], [cond_input, x, y], [ret])
...@@ -2099,12 +2132,14 @@ def if_param_test(): ...@@ -2099,12 +2132,14 @@ def if_param_test():
def if_pl_test(): def if_pl_test():
out_x = onnx.helper.make_tensor_value_info('out_x', onnx.TensorProto.FLOAT, out_x = onnx.helper.make_tensor_value_info('out_x', onnx.TensorProto.FLOAT,
[2, 3]) [2, 3])
out_l_x = onnx.helper.make_tensor_value_info( out_l_x = onnx.helper.make_tensor_value_info('out_l_x',
'out_l_x', onnx.TensorProto.FLOAT, [2, 3]) onnx.TensorProto.FLOAT,
[2, 3])
out_y = onnx.helper.make_tensor_value_info('out_y', onnx.TensorProto.FLOAT, out_y = onnx.helper.make_tensor_value_info('out_y', onnx.TensorProto.FLOAT,
[3, 3]) [3, 3])
out_l_y = onnx.helper.make_tensor_value_info( out_l_y = onnx.helper.make_tensor_value_info('out_l_y',
'out_l_y', onnx.TensorProto.FLOAT, [3, 3]) onnx.TensorProto.FLOAT,
[3, 3])
x = onnx.helper.make_tensor_value_info('x', onnx.TensorProto.FLOAT, [2, 3]) x = onnx.helper.make_tensor_value_info('x', onnx.TensorProto.FLOAT, [2, 3])
y = onnx.helper.make_tensor_value_info('y', onnx.TensorProto.FLOAT, [3, 3]) y = onnx.helper.make_tensor_value_info('y', onnx.TensorProto.FLOAT, [3, 3])
...@@ -2112,23 +2147,23 @@ def if_pl_test(): ...@@ -2112,23 +2147,23 @@ def if_pl_test():
xt = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) xt = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
yt = np.array([[8, 7, 6], [5, 4, 3], [2, 1, 0]]).astype(np.float32) yt = np.array([[8, 7, 6], [5, 4, 3], [2, 1, 0]]).astype(np.float32)
xt_tensor = helper.make_tensor( xt_tensor = helper.make_tensor(name='xt',
name='xt', data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=xt.shape,
dims=xt.shape, vals=xt.flatten().astype(np.float32))
vals=xt.flatten().astype(np.float32))
yt_tensor = helper.make_tensor( yt_tensor = helper.make_tensor(name='yt',
name='yt', data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=yt.shape,
dims=yt.shape, vals=yt.flatten().astype(np.float32))
vals=yt.flatten().astype(np.float32))
then_add_node = onnx.helper.make_node( then_add_node = onnx.helper.make_node('Add',
'Add', inputs=['x', 'xt'], outputs=['out_x']) inputs=['x', 'xt'],
outputs=['out_x'])
else_mul_node = onnx.helper.make_node( else_mul_node = onnx.helper.make_node('Mul',
'Mul', inputs=['y', 'yt'], outputs=['out_y']) inputs=['y', 'yt'],
outputs=['out_y'])
then_const_node = onnx.helper.make_node( then_const_node = onnx.helper.make_node(
'Constant', 'Constant',
...@@ -2152,12 +2187,11 @@ def if_pl_test(): ...@@ -2152,12 +2187,11 @@ def if_pl_test():
onnx.TensorProto.BOOL, []) onnx.TensorProto.BOOL, [])
ret = onnx.helper.make_tensor_value_info('ret', TensorProto.FLOAT, []) ret = onnx.helper.make_tensor_value_info('ret', TensorProto.FLOAT, [])
node = onnx.helper.make_node( node = onnx.helper.make_node('If',
'If', inputs=['cond'],
inputs=['cond'], outputs=['ret'],
outputs=['ret'], then_branch=then_body,
then_branch=then_body, else_branch=else_body)
else_branch=else_body)
return ([node], [cond_input, x, y], [ret], [xt_tensor, yt_tensor]) return ([node], [cond_input, x, y], [ret], [xt_tensor, yt_tensor])
...@@ -2167,30 +2201,32 @@ def if_then_test(): ...@@ -2167,30 +2201,32 @@ def if_then_test():
x = onnx.helper.make_tensor_value_info('x', onnx.TensorProto.FLOAT, [2, 3]) x = onnx.helper.make_tensor_value_info('x', onnx.TensorProto.FLOAT, [2, 3])
y = onnx.helper.make_tensor_value_info('y', onnx.TensorProto.FLOAT, [2, 3]) y = onnx.helper.make_tensor_value_info('y', onnx.TensorProto.FLOAT, [2, 3])
then_out = onnx.helper.make_tensor_value_info( then_out = onnx.helper.make_tensor_value_info('then_out',
'then_out', onnx.TensorProto.FLOAT, [2, 3]) onnx.TensorProto.FLOAT,
else_out = onnx.helper.make_tensor_value_info( [2, 3])
'else_out', onnx.TensorProto.FLOAT, [2, 3]) else_out = onnx.helper.make_tensor_value_info('else_out',
onnx.TensorProto.FLOAT,
[2, 3])
xt = np.ones((2, 3)).astype(np.float) xt = np.ones((2, 3)).astype(np.float)
xt_tensor = helper.make_tensor( xt_tensor = helper.make_tensor(name='xt',
name='xt', data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=xt.shape,
dims=xt.shape, vals=xt.flatten().astype(np.float32))
vals=xt.flatten().astype(np.float32))
yt = np.random.randn(2, 3).astype(np.float) yt = np.random.randn(2, 3).astype(np.float)
yt_tensor = helper.make_tensor( yt_tensor = helper.make_tensor(name='yt',
name='yt', data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=yt.shape,
dims=yt.shape, vals=yt.flatten().astype(np.float32))
vals=yt.flatten().astype(np.float32))
then_add_node = onnx.helper.make_node( then_add_node = onnx.helper.make_node('Add',
'Add', inputs=['x', 'xt'], outputs=['then_out']) inputs=['x', 'xt'],
outputs=['then_out'])
else_mul_node = onnx.helper.make_node( else_mul_node = onnx.helper.make_node('Mul',
'Mul', inputs=['y', 'yt'], outputs=['else_out']) inputs=['y', 'yt'],
outputs=['else_out'])
then_body = onnx.helper.make_graph([then_add_node], 'then_body', [], then_body = onnx.helper.make_graph([then_add_node], 'then_body', [],
[then_out]) [then_out])
...@@ -2199,19 +2235,17 @@ def if_then_test(): ...@@ -2199,19 +2235,17 @@ def if_then_test():
[else_out]) [else_out])
cond = np.array([1]).astype(np.bool) cond = np.array([1]).astype(np.bool)
cond_tensor = helper.make_tensor( cond_tensor = helper.make_tensor(name="cond",
name="cond", data_type=TensorProto.BOOL,
data_type=TensorProto.BOOL, dims=cond.shape,
dims=cond.shape, vals=cond.astype(bool))
vals=cond.astype(bool))
res = onnx.helper.make_tensor_value_info('res', TensorProto.FLOAT, []) res = onnx.helper.make_tensor_value_info('res', TensorProto.FLOAT, [])
node = onnx.helper.make_node( node = onnx.helper.make_node('If',
'If', inputs=['cond'],
inputs=['cond'], outputs=['res'],
outputs=['res'], then_branch=then_body,
then_branch=then_body, else_branch=else_body)
else_branch=else_body)
return ([node], [x, y], [res], [cond_tensor, xt_tensor, yt_tensor]) return ([node], [x, y], [res], [cond_tensor, xt_tensor, yt_tensor])
...@@ -2223,45 +2257,50 @@ def if_tuple_test(): ...@@ -2223,45 +2257,50 @@ def if_tuple_test():
cond_input = onnx.helper.make_tensor_value_info('cond', cond_input = onnx.helper.make_tensor_value_info('cond',
onnx.TensorProto.BOOL, []) onnx.TensorProto.BOOL, [])
then_out0 = onnx.helper.make_tensor_value_info( then_out0 = onnx.helper.make_tensor_value_info('then_out0',
'then_out0', onnx.TensorProto.FLOAT, [1, 4]) onnx.TensorProto.FLOAT,
then_out1 = onnx.helper.make_tensor_value_info( [1, 4])
'then_out1', onnx.TensorProto.FLOAT, [3, 4]) then_out1 = onnx.helper.make_tensor_value_info('then_out1',
else_out0 = onnx.helper.make_tensor_value_info( onnx.TensorProto.FLOAT,
'else_out0', onnx.TensorProto.FLOAT, [1, 4]) [3, 4])
else_out1 = onnx.helper.make_tensor_value_info( else_out0 = onnx.helper.make_tensor_value_info('else_out0',
'else_out1', onnx.TensorProto.FLOAT, [3, 4]) onnx.TensorProto.FLOAT,
[1, 4])
else_out1 = onnx.helper.make_tensor_value_info('else_out1',
onnx.TensorProto.FLOAT,
[3, 4])
one = np.ones([1]).astype(np.float) one = np.ones([1]).astype(np.float)
one_tensor = helper.make_tensor( one_tensor = helper.make_tensor(name='one',
name='one', data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=one.shape,
dims=one.shape, vals=one.flatten().astype(np.float32))
vals=one.flatten().astype(np.float32))
two = np.array([2]).astype(np.float) two = np.array([2]).astype(np.float)
two_tensor = helper.make_tensor( two_tensor = helper.make_tensor(name='two',
name='two', data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=two.shape,
dims=two.shape, vals=two.flatten().astype(np.float32))
vals=two.flatten().astype(np.float32))
three = np.array([3]).astype(np.float) three = np.array([3]).astype(np.float)
three_tensor = helper.make_tensor( three_tensor = helper.make_tensor(name='three',
name='three', data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=three.shape,
dims=three.shape, vals=three.flatten().astype(np.float32))
vals=three.flatten().astype(np.float32))
then_add_node = onnx.helper.make_node('Add',
then_add_node = onnx.helper.make_node( inputs=['x', 'one'],
'Add', inputs=['x', 'one'], outputs=['then_out0']) outputs=['then_out0'])
then_mul_node = onnx.helper.make_node( then_mul_node = onnx.helper.make_node('Mul',
'Mul', inputs=['y', 'two'], outputs=['then_out1']) inputs=['y', 'two'],
outputs=['then_out1'])
else_mul_node = onnx.helper.make_node(
'Mul', inputs=['x', 'three'], outputs=['else_out0']) else_mul_node = onnx.helper.make_node('Mul',
else_add_node = onnx.helper.make_node( inputs=['x', 'three'],
'Add', inputs=['y', 'three'], outputs=['else_out1']) outputs=['else_out0'])
else_add_node = onnx.helper.make_node('Add',
inputs=['y', 'three'],
outputs=['else_out1'])
then_body = onnx.helper.make_graph([then_add_node, then_mul_node], then_body = onnx.helper.make_graph([then_add_node, then_mul_node],
'then_body', [], [then_out0, then_out1]) 'then_body', [], [then_out0, then_out1])
...@@ -2272,15 +2311,14 @@ def if_tuple_test(): ...@@ -2272,15 +2311,14 @@ def if_tuple_test():
res0 = onnx.helper.make_tensor_value_info('res0', TensorProto.FLOAT, []) res0 = onnx.helper.make_tensor_value_info('res0', TensorProto.FLOAT, [])
res1 = onnx.helper.make_tensor_value_info('res1', TensorProto.FLOAT, []) res1 = onnx.helper.make_tensor_value_info('res1', TensorProto.FLOAT, [])
node = onnx.helper.make_node( node = onnx.helper.make_node('If',
'If', inputs=['cond'],
inputs=['cond'], outputs=['res0', 'res1'],
outputs=['res0', 'res1'], then_branch=then_body,
then_branch=then_body, else_branch=else_body)
else_branch=else_body)
return ([node], [cond_input, x, y], [res0, res1], return ([node], [cond_input, x,
[one_tensor, two_tensor, three_tensor]) y], [res0, res1], [one_tensor, two_tensor, three_tensor])
@onnx_test @onnx_test
...@@ -2288,12 +2326,11 @@ def imagescaler_test(): ...@@ -2288,12 +2326,11 @@ def imagescaler_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 16, 16]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 16, 16])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 16, 16]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 16, 16])
node = onnx.helper.make_node( node = onnx.helper.make_node('ImageScaler',
'ImageScaler', inputs=['0'],
inputs=['0'], outputs=['1'],
outputs=['1'], bias=[0.01, 0.02, 0.03],
bias=[0.01, 0.02, 0.03], scale=0.5)
scale=0.5)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -2303,12 +2340,11 @@ def imagescaler_half_test(): ...@@ -2303,12 +2340,11 @@ def imagescaler_half_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT16, [1, 3, 16, 16]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT16, [1, 3, 16, 16])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT16, [1, 3, 16, 16]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT16, [1, 3, 16, 16])
node = onnx.helper.make_node( node = onnx.helper.make_node('ImageScaler',
'ImageScaler', inputs=['0'],
inputs=['0'], outputs=['1'],
outputs=['1'], bias=[0.01, 0.02, 0.03],
bias=[0.01, 0.02, 0.03], scale=0.5)
scale=0.5)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -2363,11 +2399,10 @@ def implicit_sub_bcast_test(): ...@@ -2363,11 +2399,10 @@ def implicit_sub_bcast_test():
@onnx_test @onnx_test
def initializer_not_an_input(): def initializer_not_an_input():
values = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) values = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
w = helper.make_tensor( w = helper.make_tensor(name='w',
name='w', data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=values.shape,
dims=values.shape, vals=values.flatten().astype(np.float))
vals=values.flatten().astype(np.float))
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [5, 2]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [5, 2])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [5, 4]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [5, 4])
...@@ -2388,34 +2423,32 @@ def instance_norm_test(): ...@@ -2388,34 +2423,32 @@ def instance_norm_test():
bias = helper.make_tensor_value_info('2', TensorProto.FLOAT, [2]) bias = helper.make_tensor_value_info('2', TensorProto.FLOAT, [2])
y = helper.make_tensor_value_info('3', TensorProto.FLOAT, [1, 2, 3, 3]) y = helper.make_tensor_value_info('3', TensorProto.FLOAT, [1, 2, 3, 3])
node = onnx.helper.make_node( node = onnx.helper.make_node('InstanceNormalization',
'InstanceNormalization', inputs=['0', '1', '2'], outputs=['3']) inputs=['0', '1', '2'],
outputs=['3'])
return ([node], [x, scale, bias], [y]) return ([node], [x, scale, bias], [y])
@onnx_test @onnx_test
def instance_norm_val_test(): def instance_norm_val_test():
x = np.array([[[[0, 1, 2], [3, 4, 5], [6, 7, 8]], [[0, 1, 2], [3, 4, 5], x = np.array([[[[0, 1, 2], [3, 4, 5], [6, 7, 8]],
[6, 7, 8]]]]) [[0, 1, 2], [3, 4, 5], [6, 7, 8]]]])
scale = np.array([1, 2]) scale = np.array([1, 2])
bias = np.array([0, 1]) bias = np.array([0, 1])
x_tensor = helper.make_tensor( x_tensor = helper.make_tensor(name='x_tensor',
name='x_tensor', data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=x.shape,
dims=x.shape, vals=x.flatten().astype(np.float))
vals=x.flatten().astype(np.float)) scale_tensor = helper.make_tensor(name='scale_tensor',
scale_tensor = helper.make_tensor( data_type=TensorProto.FLOAT,
name='scale_tensor', dims=scale.shape,
data_type=TensorProto.FLOAT, vals=scale.flatten().astype(np.float))
dims=scale.shape, bias_tensor = helper.make_tensor(name='bias_tensor',
vals=scale.flatten().astype(np.float)) data_type=TensorProto.FLOAT,
bias_tensor = helper.make_tensor( dims=bias.shape,
name='bias_tensor', vals=bias.flatten().astype(np.float))
data_type=TensorProto.FLOAT,
dims=bias.shape,
vals=bias.flatten().astype(np.float))
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 3, 3]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 3, 3])
...@@ -2429,26 +2462,23 @@ def instance_norm_val_test(): ...@@ -2429,26 +2462,23 @@ def instance_norm_val_test():
@onnx_test @onnx_test
def instance_norm_val_3d_test(): def instance_norm_val_3d_test():
x = np.array([[[[[0, 1], [2, 3]], [[4, 5], [6, 7]]], [[[0, 1], [2, 3]], x = np.array([[[[[0, 1], [2, 3]], [[4, 5], [6, 7]]],
[[4, 5], [6, 7]]]]]) [[[0, 1], [2, 3]], [[4, 5], [6, 7]]]]])
scale = np.array([1, 2]) scale = np.array([1, 2])
bias = np.array([0, 1]) bias = np.array([0, 1])
x_tensor = helper.make_tensor( x_tensor = helper.make_tensor(name='x_tensor',
name='x_tensor', data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=x.shape,
dims=x.shape, vals=x.flatten().astype(np.float))
vals=x.flatten().astype(np.float)) scale_tensor = helper.make_tensor(name='scale_tensor',
scale_tensor = helper.make_tensor( data_type=TensorProto.FLOAT,
name='scale_tensor', dims=scale.shape,
data_type=TensorProto.FLOAT, vals=scale.flatten().astype(np.float))
dims=scale.shape, bias_tensor = helper.make_tensor(name='bias_tensor',
vals=scale.flatten().astype(np.float)) data_type=TensorProto.FLOAT,
bias_tensor = helper.make_tensor( dims=bias.shape,
name='bias_tensor', vals=bias.flatten().astype(np.float))
data_type=TensorProto.FLOAT,
dims=bias.shape,
vals=bias.flatten().astype(np.float))
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 2, 2, 2]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 2, 2, 2])
...@@ -2496,47 +2526,57 @@ def layernorm_test(): ...@@ -2496,47 +2526,57 @@ def layernorm_test():
pow_2 = np.array([[[2, 2, 2, 2, 2]]]) pow_2 = np.array([[[2, 2, 2, 2, 2]]])
epsilon = np.array([1e-12]) epsilon = np.array([1e-12])
pow_tensor = helper.make_tensor( pow_tensor = helper.make_tensor(name='pow',
name='pow', data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=pow_2.shape,
dims=pow_2.shape, vals=pow_2.flatten().astype(np.float))
vals=pow_2.flatten().astype(np.float))
epsilon_tensor = helper.make_tensor( epsilon_tensor = helper.make_tensor(name='epsilon',
name='epsilon', data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=epsilon.shape,
dims=epsilon.shape, vals=epsilon.flatten().astype(
vals=epsilon.flatten().astype(np.float)) np.float))
mean = onnx.helper.make_node( mean = onnx.helper.make_node('ReduceMean',
'ReduceMean', inputs=['0'], outputs=['mean_out'], axes=axes) inputs=['0'],
outputs=['mean_out'],
axes=axes)
sub_mean = onnx.helper.make_node( sub_mean = onnx.helper.make_node('Sub',
'Sub', inputs=['0', 'mean_out'], outputs=['sub_out']) inputs=['0', 'mean_out'],
outputs=['sub_out'])
sub_pow = onnx.helper.make_node( sub_pow = onnx.helper.make_node('Pow',
'Pow', inputs=['sub_out', 'pow'], outputs=['pow_out']) inputs=['sub_out', 'pow'],
outputs=['pow_out'])
var = onnx.helper.make_node( var = onnx.helper.make_node('ReduceMean',
'ReduceMean', inputs=['pow_out'], outputs=['var_out'], axes=axes) inputs=['pow_out'],
outputs=['var_out'],
axes=axes)
add = onnx.helper.make_node( add = onnx.helper.make_node('Add',
'Add', inputs=['var_out', 'epsilon'], outputs=['add_out']) inputs=['var_out', 'epsilon'],
outputs=['add_out'])
sqrt = onnx.helper.make_node( sqrt = onnx.helper.make_node('Sqrt',
'Sqrt', inputs=['add_out'], outputs=['sqrt_out']) inputs=['add_out'],
outputs=['sqrt_out'])
div = onnx.helper.make_node( div = onnx.helper.make_node('Div',
'Div', inputs=['sub_out', 'sqrt_out'], outputs=['div_out']) inputs=['sub_out', 'sqrt_out'],
outputs=['div_out'])
mul = onnx.helper.make_node( mul = onnx.helper.make_node('Mul',
'Mul', inputs=['scale', 'div_out'], outputs=['mul_out']) inputs=['scale', 'div_out'],
outputs=['mul_out'])
bias_add = onnx.helper.make_node( bias_add = onnx.helper.make_node('Add',
'Add', inputs=['mul_out', 'bias'], outputs=['1']) inputs=['mul_out', 'bias'],
outputs=['1'])
return ([mean, sub_mean, sub_pow, var, add, sqrt, div, mul, bias_add], return ([mean, sub_mean, sub_pow, var, add, sqrt, div, mul,
[x, scale, bias], [y], [pow_tensor, epsilon_tensor]) bias_add], [x, scale, bias], [y], [pow_tensor, epsilon_tensor])
@onnx_test @onnx_test
...@@ -2554,14 +2594,16 @@ def layernorm_op_test(): ...@@ -2554,14 +2594,16 @@ def layernorm_op_test():
return ([node], [x, w, b], [output]) return ([node], [x, w, b], [output])
@onnx_test @onnx_test
def leaky_relu_test(): def leaky_relu_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
node = onnx.helper.make_node( node = onnx.helper.make_node('LeakyRelu',
'LeakyRelu', inputs=['0'], outputs=['1'], alpha=0.01) inputs=['0'],
outputs=['1'],
alpha=0.01)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -2569,11 +2611,10 @@ def leaky_relu_test(): ...@@ -2569,11 +2611,10 @@ def leaky_relu_test():
@onnx_test @onnx_test
def less_test(): def less_test():
ax1 = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]) ax1 = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
x1 = helper.make_tensor( x1 = helper.make_tensor("x1",
"x1", data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=(2, 3),
dims=(2, 3), vals=ax1.astype(np.float32))
vals=ax1.astype(np.float32))
x2 = helper.make_tensor_value_info('x2', TensorProto.FLOAT, [2, 3]) x2 = helper.make_tensor_value_info('x2', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
...@@ -2673,8 +2714,10 @@ def logsoftmax_test(): ...@@ -2673,8 +2714,10 @@ def logsoftmax_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 5, 6]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 5, 6])
node = onnx.helper.make_node( node = onnx.helper.make_node('LogSoftmax',
'LogSoftmax', inputs=['x'], outputs=['y'], axis=1) inputs=['x'],
outputs=['y'],
axis=1)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -2684,16 +2727,17 @@ def logsoftmax_nonstd_input_test(): ...@@ -2684,16 +2727,17 @@ def logsoftmax_nonstd_input_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [6, 9]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [6, 9])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3, 4]) y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3, 4])
node0 = onnx.helper.make_node( node0 = onnx.helper.make_node('Slice',
'Slice', inputs=['0'],
inputs=['0'], axes=[0, 1],
axes=[0, 1], starts=[1, 0],
starts=[1, 0], ends=[4, 4],
ends=[4, 4], outputs=['1'])
outputs=['1'])
node1 = onnx.helper.make_node( node1 = onnx.helper.make_node('LogSoftmax',
'LogSoftmax', inputs=['1'], outputs=['2'], axis=-1) inputs=['1'],
outputs=['2'],
axis=-1)
return ([node0, node1], [x], [y]) return ([node0, node1], [x], [y])
...@@ -2781,8 +2825,10 @@ def lpnormalization_axis_error_test(): ...@@ -2781,8 +2825,10 @@ def lpnormalization_axis_error_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node( node = onnx.helper.make_node('LpNormalization',
'LpNormalization', inputs=['x'], outputs=['y'], axis=2) inputs=['x'],
outputs=['y'],
axis=2)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -2819,8 +2865,10 @@ def lpnormalization_l2_test(): ...@@ -2819,8 +2865,10 @@ def lpnormalization_l2_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4])
node = onnx.helper.make_node( node = onnx.helper.make_node('LpNormalization',
'LpNormalization', inputs=['x'], outputs=['y'], p=2) inputs=['x'],
outputs=['y'],
p=2)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -2829,8 +2877,10 @@ def lpnormalization_p_error_test(): ...@@ -2829,8 +2877,10 @@ def lpnormalization_p_error_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node( node = onnx.helper.make_node('LpNormalization',
'LpNormalization', inputs=['x'], outputs=['y'], p=3) inputs=['x'],
outputs=['y'],
p=3)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -2839,14 +2889,13 @@ def lrn_test(): ...@@ -2839,14 +2889,13 @@ def lrn_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 28, 24, 24]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 28, 24, 24])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 28, 24, 24]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 28, 24, 24])
node = onnx.helper.make_node( node = onnx.helper.make_node('LRN',
'LRN', inputs=['0'],
inputs=['0'], size=5,
size=5, alpha=0.0001,
alpha=0.0001, beta=0.75,
beta=0.75, bias=1.0,
bias=1.0, outputs=['1'])
outputs=['1'])
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -2977,14 +3026,13 @@ def maxpool_notset_test(): ...@@ -2977,14 +3026,13 @@ def maxpool_notset_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 1, 1]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 1, 1])
node = onnx.helper.make_node( node = onnx.helper.make_node('MaxPool',
'MaxPool', inputs=['x'],
inputs=['x'], outputs=['y'],
outputs=['y'], kernel_shape=[6, 6],
kernel_shape=[6, 6], strides=[2, 2],
strides=[2, 2], pads=[0, 0, 1, 1],
pads=[0, 0, 1, 1], auto_pad='NOTSET')
auto_pad='NOTSET')
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -2994,12 +3042,11 @@ def maxpool_same_upper_test(): ...@@ -2994,12 +3042,11 @@ def maxpool_same_upper_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 5, 5]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 5, 5])
node = onnx.helper.make_node( node = onnx.helper.make_node('MaxPool',
'MaxPool', inputs=['x'],
inputs=['x'], outputs=['y'],
outputs=['y'], kernel_shape=[2, 2],
kernel_shape=[2, 2], auto_pad='SAME_UPPER')
auto_pad='SAME_UPPER')
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -3016,8 +3063,9 @@ def mean_broadcast_test(): ...@@ -3016,8 +3063,9 @@ def mean_broadcast_test():
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT, mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT,
[1, 2, 3, 4]) [1, 2, 3, 4])
node = onnx.helper.make_node( node = onnx.helper.make_node("Mean",
"Mean", inputs=["0", "1", "2", "3", "4"], outputs=["mean"]) inputs=["0", "1", "2", "3", "4"],
outputs=["mean"])
return ([node], [data_0, data_1, data_2, data_3, data_4], [mean]) return ([node], [data_0, data_1, data_2, data_3, data_4], [mean])
...@@ -3031,8 +3079,9 @@ def mean_fp16_test(): ...@@ -3031,8 +3079,9 @@ def mean_fp16_test():
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT16, mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT16,
[1, 2, 3]) [1, 2, 3])
node = onnx.helper.make_node( node = onnx.helper.make_node("Mean",
"Mean", inputs=["0", "1", "2"], outputs=["mean"]) inputs=["0", "1", "2"],
outputs=["mean"])
return ([node], [data_0, data_1, data_2], [mean]) return ([node], [data_0, data_1, data_2], [mean])
...@@ -3045,8 +3094,9 @@ def mean_invalid_broadcast_test(): ...@@ -3045,8 +3094,9 @@ def mean_invalid_broadcast_test():
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT, [1, 2, 3]) mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT, [1, 2, 3])
node = onnx.helper.make_node( node = onnx.helper.make_node("Mean",
"Mean", inputs=["0", "1", "2"], outputs=["mean"]) inputs=["0", "1", "2"],
outputs=["mean"])
return ([node], [data_0, data_1, data_2], [mean]) return ([node], [data_0, data_1, data_2], [mean])
...@@ -3099,12 +3149,11 @@ def multinomial_test(): ...@@ -3099,12 +3149,11 @@ def multinomial_test():
output = helper.make_tensor_value_info("output", TensorProto.INT32, output = helper.make_tensor_value_info("output", TensorProto.INT32,
[1, 10]) [1, 10])
node = onnx.helper.make_node( node = onnx.helper.make_node('Multinomial',
'Multinomial', inputs=['input'],
inputs=['input'], sample_size=sample_size,
sample_size=sample_size, seed=seed,
seed=seed, outputs=['output'])
outputs=['output'])
return ([node], [input], [output]) return ([node], [input], [output])
...@@ -3116,11 +3165,10 @@ def multinomial_generated_seed_test(): ...@@ -3116,11 +3165,10 @@ def multinomial_generated_seed_test():
output = helper.make_tensor_value_info("output", TensorProto.INT32, output = helper.make_tensor_value_info("output", TensorProto.INT32,
[1, 10]) [1, 10])
node = onnx.helper.make_node( node = onnx.helper.make_node('Multinomial',
'Multinomial', inputs=['input'],
inputs=['input'], sample_size=sample_size,
sample_size=sample_size, outputs=['output'])
outputs=['output'])
return ([node], [input], [output]) return ([node], [input], [output])
...@@ -3133,12 +3181,11 @@ def multinomial_dtype_error_test(): ...@@ -3133,12 +3181,11 @@ def multinomial_dtype_error_test():
output = helper.make_tensor_value_info("output", TensorProto.INT64, output = helper.make_tensor_value_info("output", TensorProto.INT64,
[1, 10]) [1, 10])
node = onnx.helper.make_node( node = onnx.helper.make_node('Multinomial',
'Multinomial', inputs=['input'],
inputs=['input'], sample_size=sample_size,
sample_size=sample_size, dtype=dtype,
dtype=dtype, outputs=['output'])
outputs=['output'])
return ([node], [input], [output]) return ([node], [input], [output])
...@@ -3152,13 +3199,12 @@ def multinomial_int64_test(): ...@@ -3152,13 +3199,12 @@ def multinomial_int64_test():
output = helper.make_tensor_value_info("output", TensorProto.INT64, output = helper.make_tensor_value_info("output", TensorProto.INT64,
[1, 10]) [1, 10])
node = onnx.helper.make_node( node = onnx.helper.make_node('Multinomial',
'Multinomial', inputs=['input'],
inputs=['input'], sample_size=sample_size,
sample_size=sample_size, dtype=dtype,
dtype=dtype, seed=seed,
seed=seed, outputs=['output'])
outputs=['output'])
return ([node], [input], [output]) return ([node], [input], [output])
...@@ -3186,14 +3232,14 @@ def nms_test(): ...@@ -3186,14 +3232,14 @@ def nms_test():
out = helper.make_tensor_value_info('selected_indices', TensorProto.INT64, out = helper.make_tensor_value_info('selected_indices', TensorProto.INT64,
[6, 3]) [6, 3])
node = onnx.helper.make_node( node = onnx.helper.make_node('NonMaxSuppression',
'NonMaxSuppression', inputs=[
inputs=[ 'boxes', 'scores',
'boxes', 'scores', 'max_output_boxes_per_class', 'iou_threshold', 'max_output_boxes_per_class',
'score_threshold' 'iou_threshold', 'score_threshold'
], ],
outputs=['selected_indices'], outputs=['selected_indices'],
center_point_box=1) center_point_box=1)
return ([node], [b, s, mo, iou, st], [out]) return ([node], [b, s, mo, iou, st], [out])
...@@ -3223,8 +3269,10 @@ def no_pad_test(): ...@@ -3223,8 +3269,10 @@ def no_pad_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 2]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 2])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [2, 2]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [2, 2])
node = onnx.helper.make_node( node = onnx.helper.make_node('Pad',
'Pad', inputs=['0'], pads=[0, 0, 0, 0], outputs=['1']) inputs=['0'],
pads=[0, 0, 0, 0],
outputs=['1'])
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -3234,8 +3282,9 @@ def nonzero_dynamic_test(): ...@@ -3234,8 +3282,9 @@ def nonzero_dynamic_test():
x = helper.make_tensor_value_info('data', TensorProto.BOOL, [2, 2]) x = helper.make_tensor_value_info('data', TensorProto.BOOL, [2, 2])
y = helper.make_tensor_value_info('indices', TensorProto.INT64, [2, 3]) y = helper.make_tensor_value_info('indices', TensorProto.INT64, [2, 3])
node = onnx.helper.make_node( node = onnx.helper.make_node('NonZero',
'NonZero', inputs=['data'], outputs=['indices']) inputs=['data'],
outputs=['indices'])
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -3243,15 +3292,15 @@ def nonzero_dynamic_test(): ...@@ -3243,15 +3292,15 @@ def nonzero_dynamic_test():
@onnx_test @onnx_test
def nonzero_test(): def nonzero_test():
data1 = np.array([[1., 0.], [1., 1.]]) data1 = np.array([[1., 0.], [1., 1.]])
data = helper.make_tensor( data = helper.make_tensor(name='data',
name='data', data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=data1.shape,
dims=data1.shape, vals=data1.flatten().astype(np.float))
vals=data1.flatten().astype(np.float))
y = helper.make_tensor_value_info('indices', TensorProto.INT64, [2, 3]) y = helper.make_tensor_value_info('indices', TensorProto.INT64, [2, 3])
node = onnx.helper.make_node( node = onnx.helper.make_node('NonZero',
'NonZero', inputs=['data'], outputs=['indices']) inputs=['data'],
outputs=['indices'])
return ([node], [], [y], [data]) return ([node], [], [y], [data])
...@@ -3259,15 +3308,15 @@ def nonzero_test(): ...@@ -3259,15 +3308,15 @@ def nonzero_test():
@onnx_test @onnx_test
def nonzero_int_test(): def nonzero_int_test():
data1 = np.array([[1, 1, 0], [1, 0, 1]]) data1 = np.array([[1, 1, 0], [1, 0, 1]])
data = helper.make_tensor( data = helper.make_tensor(name='data',
name='data', data_type=TensorProto.INT16,
data_type=TensorProto.INT16, dims=data1.shape,
dims=data1.shape, vals=data1.flatten().astype(np.int16))
vals=data1.flatten().astype(np.int16))
y = helper.make_tensor_value_info('indices', TensorProto.INT64, [2, 4]) y = helper.make_tensor_value_info('indices', TensorProto.INT64, [2, 4])
node = onnx.helper.make_node( node = onnx.helper.make_node('NonZero',
'NonZero', inputs=['data'], outputs=['indices']) inputs=['data'],
outputs=['indices'])
return ([node], [], [y], [data]) return ([node], [], [y], [data])
...@@ -3281,17 +3330,15 @@ def onehot_test(): ...@@ -3281,17 +3330,15 @@ def onehot_test():
values = helper.make_tensor_value_info("values", TensorProto.FLOAT16, [2]) values = helper.make_tensor_value_info("values", TensorProto.FLOAT16, [2])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT16, [3, 5, 2]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT16, [3, 5, 2])
depth_tensor = helper.make_tensor( depth_tensor = helper.make_tensor(name="depth",
name="depth", data_type=TensorProto.INT32,
data_type=TensorProto.INT32, dims=None,
dims=None, vals=depth.astype(int))
vals=depth.astype(int))
node = onnx.helper.make_node( node = onnx.helper.make_node('OneHot',
'OneHot', inputs=['indices', 'depth', 'values'],
inputs=['indices', 'depth', 'values'], outputs=['y'],
outputs=['y'], axis=axis_value)
axis=axis_value)
return ([node], [indices, values], [y], [depth_tensor]) return ([node], [indices, values], [y], [depth_tensor])
...@@ -3301,8 +3348,10 @@ def pad_test(): ...@@ -3301,8 +3348,10 @@ def pad_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 2]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 2])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 4]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 4])
node = onnx.helper.make_node( node = onnx.helper.make_node('Pad',
'Pad', inputs=['0'], pads=[1, 1, 1, 1], outputs=['1']) inputs=['0'],
pads=[1, 1, 1, 1],
outputs=['1'])
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -3310,28 +3359,31 @@ def pad_test(): ...@@ -3310,28 +3359,31 @@ def pad_test():
@onnx_test @onnx_test
def pad_3arg_test(): def pad_3arg_test():
values = np.array([1]) values = np.array([1])
val_tensor = helper.make_tensor( val_tensor = helper.make_tensor(name='val',
name='val', data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=values.reshape(()).shape,
dims=values.reshape(()).shape, vals=values.astype(float))
vals=values.astype(float)) arg_val = onnx.helper.make_node('Constant',
arg_val = onnx.helper.make_node( inputs=[],
'Constant', inputs=[], outputs=['arg_val'], value=val_tensor) outputs=['arg_val'],
value=val_tensor)
sizes = np.array([1, 1, 2, 2]) sizes = np.array([1, 1, 2, 2])
pad_tensor = helper.make_tensor( pad_tensor = helper.make_tensor(name='pad_size',
name='pad_size', data_type=TensorProto.INT32,
data_type=TensorProto.INT32, dims=sizes.shape,
dims=sizes.shape, vals=sizes.astype(int))
vals=sizes.astype(int)) arg_pad = onnx.helper.make_node('Constant',
arg_pad = onnx.helper.make_node( inputs=[],
'Constant', inputs=[], outputs=['arg_pad'], value=pad_tensor) outputs=['arg_pad'],
value=pad_tensor)
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 2]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 2])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [5, 5]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [5, 5])
node = onnx.helper.make_node( node = onnx.helper.make_node('Pad',
'Pad', inputs=['0', 'arg_pad', 'arg_val'], outputs=['1']) inputs=['0', 'arg_pad', 'arg_val'],
outputs=['1'])
return ([arg_val, arg_pad, node], [x], [y]) return ([arg_val, arg_pad, node], [x], [y])
...@@ -3342,16 +3394,19 @@ def pad_reflect_test(): ...@@ -3342,16 +3394,19 @@ def pad_reflect_test():
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [2, 5]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [2, 5])
sizes = np.array([0, 2, 0, 1]) sizes = np.array([0, 2, 0, 1])
pad_tensor = helper.make_tensor( pad_tensor = helper.make_tensor(name='pad_size',
name='pad_size', data_type=TensorProto.INT32,
data_type=TensorProto.INT32, dims=sizes.shape,
dims=sizes.shape, vals=sizes.astype(int))
vals=sizes.astype(int)) arg_pad = onnx.helper.make_node('Constant',
arg_pad = onnx.helper.make_node( inputs=[],
'Constant', inputs=[], outputs=['arg_pad'], value=pad_tensor) outputs=['arg_pad'],
value=pad_tensor)
node = onnx.helper.make_node(
'Pad', mode='reflect', inputs=['0', 'arg_pad'], outputs=['1']) node = onnx.helper.make_node('Pad',
mode='reflect',
inputs=['0', 'arg_pad'],
outputs=['1'])
return ([arg_pad, node], [x], [y]) return ([arg_pad, node], [x], [y])
...@@ -3362,16 +3417,19 @@ def pad_reflect_multiaxis_test(): ...@@ -3362,16 +3417,19 @@ def pad_reflect_multiaxis_test():
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 5]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 5])
sizes = np.array([0, 2, 2, 0]) sizes = np.array([0, 2, 2, 0])
pad_tensor = helper.make_tensor( pad_tensor = helper.make_tensor(name='pad_size',
name='pad_size', data_type=TensorProto.INT32,
data_type=TensorProto.INT32, dims=sizes.shape,
dims=sizes.shape, vals=sizes.astype(int))
vals=sizes.astype(int)) arg_pad = onnx.helper.make_node('Constant',
arg_pad = onnx.helper.make_node( inputs=[],
'Constant', inputs=[], outputs=['arg_pad'], value=pad_tensor) outputs=['arg_pad'],
value=pad_tensor)
node = onnx.helper.make_node(
'Pad', mode='reflect', inputs=['0', 'arg_pad'], outputs=['1']) node = onnx.helper.make_node('Pad',
mode='reflect',
inputs=['0', 'arg_pad'],
outputs=['1'])
return ([arg_pad, node], [x], [y]) return ([arg_pad, node], [x], [y])
...@@ -3429,13 +3487,15 @@ def prefix_scan_sum_test(): ...@@ -3429,13 +3487,15 @@ def prefix_scan_sum_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 2, 2]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 2, 2])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 2, 2]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 2, 2])
axis_val = np.array([0]) axis_val = np.array([0])
axis_tensor = helper.make_tensor( axis_tensor = helper.make_tensor(name="axis",
name="axis", data_type=TensorProto.INT32,
data_type=TensorProto.INT32, dims=axis_val.shape,
dims=axis_val.shape, vals=axis_val.astype(int))
vals=axis_val.astype(int)) node = onnx.helper.make_node('CumSum',
node = onnx.helper.make_node( inputs=['x', 'axis'],
'CumSum', inputs=['x', 'axis'], outputs=['y'], exclusive=1, reverse=1) outputs=['y'],
exclusive=1,
reverse=1)
return ([node], [x], [y], [axis_tensor]) return ([node], [x], [y], [axis_tensor])
...@@ -3508,8 +3568,10 @@ def make_quantizelinear_axis_graph(axis): ...@@ -3508,8 +3568,10 @@ def make_quantizelinear_axis_graph(axis):
arg_out = helper.make_tensor_value_info('out', TensorProto.INT8, arg_out = helper.make_tensor_value_info('out', TensorProto.INT8,
[1, 1, 5, 1]) [1, 1, 5, 1])
node = onnx.helper.make_node( node = onnx.helper.make_node('QuantizeLinear',
'QuantizeLinear', inputs=['0', '1', '2'], outputs=['out'], axis=axis) inputs=['0', '1', '2'],
outputs=['out'],
axis=axis)
return ([node], [arg0, arg1, arg2], [arg_out]) return ([node], [arg0, arg1, arg2], [arg_out])
...@@ -3534,15 +3596,14 @@ def randomnormal_test(): ...@@ -3534,15 +3596,14 @@ def randomnormal_test():
output = helper.make_tensor_value_info('output', TensorProto.DOUBLE, output = helper.make_tensor_value_info('output', TensorProto.DOUBLE,
[2, 3, 4]) [2, 3, 4])
node = onnx.helper.make_node( node = onnx.helper.make_node('RandomNormal',
'RandomNormal', inputs=[],
inputs=[], outputs=['output'],
outputs=['output'], dtype=dtype,
dtype=dtype, mean=mean,
mean=mean, scale=scale,
scale=scale, seed=seed,
seed=seed, shape=shape)
shape=shape)
return ([node], [], [output]) return ([node], [], [output])
...@@ -3554,12 +3615,11 @@ def randomnormal_dtype_error_test(): ...@@ -3554,12 +3615,11 @@ def randomnormal_dtype_error_test():
output = helper.make_tensor_value_info('output', TensorProto.INT32, output = helper.make_tensor_value_info('output', TensorProto.INT32,
[2, 3, 4]) [2, 3, 4])
node = onnx.helper.make_node( node = onnx.helper.make_node('RandomNormal',
'RandomNormal', inputs=[],
inputs=[], outputs=['output'],
outputs=['output'], dtype=dtype,
dtype=dtype, shape=shape)
shape=shape)
return ([node], [], [output]) return ([node], [], [output])
...@@ -3571,11 +3631,10 @@ def randomnormal_generated_seed_test(): ...@@ -3571,11 +3631,10 @@ def randomnormal_generated_seed_test():
output = helper.make_tensor_value_info("output", TensorProto.INT32, output = helper.make_tensor_value_info("output", TensorProto.INT32,
[1, 10]) [1, 10])
node = onnx.helper.make_node( node = onnx.helper.make_node('RandomNormal',
'RandomNormal', inputs=['input'],
inputs=['input'], sample_size=sample_size,
sample_size=sample_size, outputs=['output'])
outputs=['output'])
return ([node], [input], [output]) return ([node], [input], [output])
...@@ -3586,8 +3645,10 @@ def randomnormal_shape_error_test(): ...@@ -3586,8 +3645,10 @@ def randomnormal_shape_error_test():
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
[2, 3, 4]) [2, 3, 4])
node = onnx.helper.make_node( node = onnx.helper.make_node('RandomNormal',
'RandomNormal', inputs=[], outputs=['output'], dtype=dtype) inputs=[],
outputs=['output'],
dtype=dtype)
return ([node], [], [output]) return ([node], [], [output])
...@@ -3603,14 +3664,13 @@ def randomnormallike_test(): ...@@ -3603,14 +3664,13 @@ def randomnormallike_test():
output = helper.make_tensor_value_info('output', TensorProto.FLOAT16, output = helper.make_tensor_value_info('output', TensorProto.FLOAT16,
[2, 3, 4]) [2, 3, 4])
node = onnx.helper.make_node( node = onnx.helper.make_node('RandomNormalLike',
'RandomNormalLike', inputs=['input'],
inputs=['input'], outputs=['output'],
outputs=['output'], dtype=dtype,
dtype=dtype, mean=mean,
mean=mean, scale=scale,
scale=scale, seed=seed)
seed=seed)
return ([node], [input], [output]) return ([node], [input], [output])
...@@ -3623,8 +3683,10 @@ def randomnormallike_type_error_test(): ...@@ -3623,8 +3683,10 @@ def randomnormallike_type_error_test():
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
[2, 3, 4]) [2, 3, 4])
node = onnx.helper.make_node( node = onnx.helper.make_node('RandomNormalLike',
'RandomNormalLike', inputs=['input'], outputs=['output'], seed=seed) inputs=['input'],
outputs=['output'],
seed=seed)
return ([node], [input], [output]) return ([node], [input], [output])
...@@ -3639,15 +3701,14 @@ def randomuniform_test(): ...@@ -3639,15 +3701,14 @@ def randomuniform_test():
output = helper.make_tensor_value_info('output', TensorProto.DOUBLE, output = helper.make_tensor_value_info('output', TensorProto.DOUBLE,
[2, 3, 4]) [2, 3, 4])
node = onnx.helper.make_node( node = onnx.helper.make_node('RandomUniform',
'RandomUniform', inputs=[],
inputs=[], outputs=['output'],
outputs=['output'], dtype=dtype,
dtype=dtype, high=high,
high=high, low=low,
low=low, seed=seed,
seed=seed, shape=shape)
shape=shape)
return ([node], [], [output]) return ([node], [], [output])
...@@ -3659,12 +3720,11 @@ def randomuniform_dtype_error_test(): ...@@ -3659,12 +3720,11 @@ def randomuniform_dtype_error_test():
output = helper.make_tensor_value_info('output', TensorProto.INT32, output = helper.make_tensor_value_info('output', TensorProto.INT32,
[2, 3, 4]) [2, 3, 4])
node = onnx.helper.make_node( node = onnx.helper.make_node('RandomUniform',
'RandomUniform', inputs=[],
inputs=[], outputs=['output'],
outputs=['output'], dtype=dtype,
dtype=dtype, shape=shape)
shape=shape)
return ([node], [], [output]) return ([node], [], [output])
...@@ -3676,11 +3736,10 @@ def randomuniform_generated_seed_test(): ...@@ -3676,11 +3736,10 @@ def randomuniform_generated_seed_test():
output = helper.make_tensor_value_info("output", TensorProto.INT32, output = helper.make_tensor_value_info("output", TensorProto.INT32,
[1, 10]) [1, 10])
node = onnx.helper.make_node( node = onnx.helper.make_node('RandomUniform',
'RandomUniform', inputs=['input'],
inputs=['input'], sample_size=sample_size,
sample_size=sample_size, outputs=['output'])
outputs=['output'])
return ([node], [input], [output]) return ([node], [input], [output])
...@@ -3691,8 +3750,10 @@ def randomuniform_shape_error_test(): ...@@ -3691,8 +3750,10 @@ def randomuniform_shape_error_test():
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
[2, 3, 4]) [2, 3, 4])
node = onnx.helper.make_node( node = onnx.helper.make_node('RandomUniform',
'RandomUniform', inputs=[], outputs=['output'], dtype=dtype) inputs=[],
outputs=['output'],
dtype=dtype)
return ([node], [], [output]) return ([node], [], [output])
...@@ -3708,14 +3769,13 @@ def randomuniformlike_test(): ...@@ -3708,14 +3769,13 @@ def randomuniformlike_test():
output = helper.make_tensor_value_info('output', TensorProto.FLOAT16, output = helper.make_tensor_value_info('output', TensorProto.FLOAT16,
[2, 3, 4]) [2, 3, 4])
node = onnx.helper.make_node( node = onnx.helper.make_node('RandomUniformLike',
'RandomUniformLike', inputs=['input'],
inputs=['input'], outputs=['output'],
outputs=['output'], dtype=dtype,
dtype=dtype, high=high,
high=high, low=low,
low=low, seed=seed)
seed=seed)
return ([node], [input], [output]) return ([node], [input], [output])
...@@ -3728,8 +3788,10 @@ def randomuniformlike_type_error_test(): ...@@ -3728,8 +3788,10 @@ def randomuniformlike_type_error_test():
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
[2, 3, 4]) [2, 3, 4])
node = onnx.helper.make_node( node = onnx.helper.make_node('RandomUniformLike',
'RandomUniformLike', inputs=['input'], outputs=['output'], seed=seed) inputs=['input'],
outputs=['output'],
seed=seed)
return ([node], [input], [output]) return ([node], [input], [output])
...@@ -3741,32 +3803,36 @@ def range_test(): ...@@ -3741,32 +3803,36 @@ def range_test():
limit_val = np.array([6]) limit_val = np.array([6])
delta_val = np.array([-3]) delta_val = np.array([-3])
start_tensor = helper.make_tensor( start_tensor = helper.make_tensor(name='start_val',
name='start_val', data_type=TensorProto.INT64,
data_type=TensorProto.INT64, dims=start_val.reshape(()).shape,
dims=start_val.reshape(()).shape, vals=start_val.astype(np.int64))
vals=start_val.astype(np.int64)) start = onnx.helper.make_node('Constant',
start = onnx.helper.make_node( inputs=[],
'Constant', inputs=[], outputs=['start'], value=start_tensor) outputs=['start'],
value=start_tensor)
limit_tensor = helper.make_tensor(
name='limit_val', limit_tensor = helper.make_tensor(name='limit_val',
data_type=TensorProto.INT64, data_type=TensorProto.INT64,
dims=limit_val.reshape(()).shape, dims=limit_val.reshape(()).shape,
vals=limit_val.astype(np.int64)) vals=limit_val.astype(np.int64))
limit = onnx.helper.make_node( limit = onnx.helper.make_node('Constant',
'Constant', inputs=[], outputs=['limit'], value=limit_tensor) inputs=[],
outputs=['limit'],
delta_tensor = helper.make_tensor( value=limit_tensor)
name='delta_val',
data_type=TensorProto.INT64, delta_tensor = helper.make_tensor(name='delta_val',
dims=delta_val.reshape(()).shape, data_type=TensorProto.INT64,
vals=delta_val.astype(np.int64)) dims=delta_val.reshape(()).shape,
delta = onnx.helper.make_node( vals=delta_val.astype(np.int64))
'Constant', inputs=[], outputs=['delta'], value=delta_tensor) delta = onnx.helper.make_node('Constant',
inputs=[],
node = onnx.helper.make_node( outputs=['delta'],
'Range', inputs=['start', 'limit', 'delta'], outputs=['1']) value=delta_tensor)
node = onnx.helper.make_node('Range',
inputs=['start', 'limit', 'delta'],
outputs=['1'])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
...@@ -3780,32 +3846,36 @@ def range_float_test(): ...@@ -3780,32 +3846,36 @@ def range_float_test():
limit_val = np.array([11]) limit_val = np.array([11])
delta_val = np.array([2]) delta_val = np.array([2])
start_tensor = helper.make_tensor( start_tensor = helper.make_tensor(name='start_val',
name='start_val', data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=start_val.reshape(()).shape,
dims=start_val.reshape(()).shape, vals=start_val.astype(np.float))
vals=start_val.astype(np.float)) start = onnx.helper.make_node('Constant',
start = onnx.helper.make_node( inputs=[],
'Constant', inputs=[], outputs=['start'], value=start_tensor) outputs=['start'],
value=start_tensor)
limit_tensor = helper.make_tensor(
name='limit_val', limit_tensor = helper.make_tensor(name='limit_val',
data_type=TensorProto.FLOAT, data_type=TensorProto.FLOAT,
dims=limit_val.reshape(()).shape, dims=limit_val.reshape(()).shape,
vals=limit_val.astype(np.float)) vals=limit_val.astype(np.float))
limit = onnx.helper.make_node( limit = onnx.helper.make_node('Constant',
'Constant', inputs=[], outputs=['limit'], value=limit_tensor) inputs=[],
outputs=['limit'],
delta_tensor = helper.make_tensor( value=limit_tensor)
name='delta_val',
data_type=TensorProto.FLOAT, delta_tensor = helper.make_tensor(name='delta_val',
dims=delta_val.reshape(()).shape, data_type=TensorProto.FLOAT,
vals=delta_val.astype(np.float)) dims=delta_val.reshape(()).shape,
delta = onnx.helper.make_node( vals=delta_val.astype(np.float))
'Constant', inputs=[], outputs=['delta'], value=delta_tensor) delta = onnx.helper.make_node('Constant',
inputs=[],
node = onnx.helper.make_node( outputs=['delta'],
'Range', inputs=['start', 'limit', 'delta'], outputs=['1']) value=delta_tensor)
node = onnx.helper.make_node('Range',
inputs=['start', 'limit', 'delta'],
outputs=['1'])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
...@@ -3832,8 +3902,11 @@ def reducel1_test(): ...@@ -3832,8 +3902,11 @@ def reducel1_test():
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 6]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 6])
axes = [-2] axes = [-2]
node = onnx.helper.make_node( node = onnx.helper.make_node('ReduceL1',
'ReduceL1', inputs=['x'], outputs=['y'], axes=axes, keepdims=0) inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=0)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -3844,8 +3917,11 @@ def reducel2_test(): ...@@ -3844,8 +3917,11 @@ def reducel2_test():
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 5]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 5])
axes = [-1] axes = [-1]
node = onnx.helper.make_node( node = onnx.helper.make_node('ReduceL2',
'ReduceL2', inputs=['x'], outputs=['y'], axes=axes, keepdims=0) inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=0)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -3856,8 +3932,11 @@ def reduce_log_sum_test(): ...@@ -3856,8 +3932,11 @@ def reduce_log_sum_test():
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 1, 5, 6]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 1, 5, 6])
axes = [-3] axes = [-3]
node = onnx.helper.make_node( node = onnx.helper.make_node('ReduceLogSum',
'ReduceLogSum', inputs=['x'], outputs=['y'], axes=axes, keepdims=1) inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=1)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -3868,8 +3947,11 @@ def reduce_log_sum_exp_test(): ...@@ -3868,8 +3947,11 @@ def reduce_log_sum_exp_test():
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [4, 5, 6]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [4, 5, 6])
axes = [-4] axes = [-4]
node = onnx.helper.make_node( node = onnx.helper.make_node('ReduceLogSumExp',
'ReduceLogSumExp', inputs=['x'], outputs=['y'], axes=axes, keepdims=1) inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=1)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -3880,8 +3962,11 @@ def reducemax_test(): ...@@ -3880,8 +3962,11 @@ def reducemax_test():
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 6]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 6])
axes = [2] axes = [2]
node = onnx.helper.make_node( node = onnx.helper.make_node('ReduceMax',
'ReduceMax', inputs=['x'], outputs=['y'], axes=axes, keepdims=0) inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=0)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -3892,8 +3977,11 @@ def reducemean_test(): ...@@ -3892,8 +3977,11 @@ def reducemean_test():
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4])
axes = [2, 3] axes = [2, 3]
node = onnx.helper.make_node( node = onnx.helper.make_node('ReduceMean',
'ReduceMean', inputs=['x'], outputs=['y'], axes=axes, keepdims=0) inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=0)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -3904,8 +3992,11 @@ def reducemean_keepdims_test(): ...@@ -3904,8 +3992,11 @@ def reducemean_keepdims_test():
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 6]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 6])
axes = [2] axes = [2]
node = onnx.helper.make_node( node = onnx.helper.make_node('ReduceMean',
'ReduceMean', inputs=['x'], outputs=['y'], axes=axes, keepdims=1) inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=1)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -3916,8 +4007,11 @@ def reducemin_test(): ...@@ -3916,8 +4007,11 @@ def reducemin_test():
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 1, 5, 1]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 1, 5, 1])
axes = [1, 3] axes = [1, 3]
node = onnx.helper.make_node( node = onnx.helper.make_node('ReduceMin',
'ReduceMin', inputs=['x'], outputs=['y'], axes=axes, keepdims=1) inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=1)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -3928,8 +4022,11 @@ def reduceprod_test(): ...@@ -3928,8 +4022,11 @@ def reduceprod_test():
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 6]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 6])
axes = [2] axes = [2]
node = onnx.helper.make_node( node = onnx.helper.make_node('ReduceProd',
'ReduceProd', inputs=['x'], outputs=['y'], axes=axes, keepdims=1) inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=1)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -3940,8 +4037,11 @@ def reducesum_test(): ...@@ -3940,8 +4037,11 @@ def reducesum_test():
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 6]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 6])
axes = [2] axes = [2]
node = onnx.helper.make_node( node = onnx.helper.make_node('ReduceSum',
'ReduceSum', inputs=['x'], outputs=['y'], axes=axes, keepdims=0) inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=0)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -3951,18 +4051,16 @@ def reducesum_empty_axes_test(): ...@@ -3951,18 +4051,16 @@ def reducesum_empty_axes_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 6]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 6])
axes = np.array([], dtype=np.int64) axes = np.array([], dtype=np.int64)
axes_tensor = helper.make_tensor( axes_tensor = helper.make_tensor(name="axes",
name="axes", data_type=TensorProto.INT64,
data_type=TensorProto.INT64, dims=axes.shape,
dims=axes.shape, vals=axes.astype(np.int64))
vals=axes.astype(np.int64))
node = onnx.helper.make_node( node = onnx.helper.make_node('ReduceSum',
'ReduceSum', inputs=['x', 'axes'],
inputs=['x', 'axes'], outputs=['y'],
outputs=['y'], keepdims=0,
keepdims=0, noop_with_empty_axes=False)
noop_with_empty_axes=False)
return ([node], [x], [y], [axes_tensor]) return ([node], [x], [y], [axes_tensor])
...@@ -3972,18 +4070,16 @@ def reducesum_noop_test(): ...@@ -3972,18 +4070,16 @@ def reducesum_noop_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 6]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 6])
axes = np.array([], dtype=np.int64) axes = np.array([], dtype=np.int64)
axes_tensor = helper.make_tensor( axes_tensor = helper.make_tensor(name="axes",
name="axes", data_type=TensorProto.INT64,
data_type=TensorProto.INT64, dims=axes.shape,
dims=axes.shape, vals=axes.astype(np.int64))
vals=axes.astype(np.int64))
node = onnx.helper.make_node( node = onnx.helper.make_node('ReduceSum',
'ReduceSum', inputs=['x', 'axes'],
inputs=['x', 'axes'], outputs=['y'],
outputs=['y'], keepdims=0,
keepdims=0, noop_with_empty_axes=True)
noop_with_empty_axes=True)
return ([node], [x], [y], [axes_tensor]) return ([node], [x], [y], [axes_tensor])
...@@ -3994,8 +4090,11 @@ def reducesum_keepdims_test(): ...@@ -3994,8 +4090,11 @@ def reducesum_keepdims_test():
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 1]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 1])
axes = [2, 3] axes = [2, 3]
node = onnx.helper.make_node( node = onnx.helper.make_node('ReduceSum',
'ReduceSum', inputs=['x'], outputs=['y'], axes=axes, keepdims=1) inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=1)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -4006,8 +4105,11 @@ def reducesum_multiaxis_test(): ...@@ -4006,8 +4105,11 @@ def reducesum_multiaxis_test():
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 1]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 1])
axes = [2, 3] axes = [2, 3]
node = onnx.helper.make_node( node = onnx.helper.make_node('ReduceSum',
'ReduceSum', inputs=['x'], outputs=['y'], axes=axes, keepdims=0) inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=0)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -4018,8 +4120,11 @@ def reducesum_square_test(): ...@@ -4018,8 +4120,11 @@ def reducesum_square_test():
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 6]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 6])
axes = [-2] axes = [-2]
node = onnx.helper.make_node( node = onnx.helper.make_node('ReduceSumSquare',
'ReduceSumSquare', inputs=['x'], outputs=['y'], axes=axes, keepdims=0) inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=0)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -4034,8 +4139,10 @@ def reshape_test(): ...@@ -4034,8 +4139,10 @@ def reshape_test():
node = onnx.helper.make_node('Reshape', inputs=['0', '1'], outputs=['2']) node = onnx.helper.make_node('Reshape', inputs=['0', '1'], outputs=['2'])
node2 = onnx.helper.make_node( node2 = onnx.helper.make_node('Reshape',
'Reshape', inputs=['0'], shape=x_shape_list, outputs=['3']) inputs=['0'],
shape=x_shape_list,
outputs=['3'])
return ([node, node2], [x, x_shape], [y, y2], return ([node, node2], [x, x_shape], [y, y2],
[helper.make_tensor('1', TensorProto.INT64, [2], [3, 8])]) [helper.make_tensor('1', TensorProto.INT64, [2], [3, 8])])
...@@ -4053,8 +4160,10 @@ def reshape_non_standard_test(): ...@@ -4053,8 +4160,10 @@ def reshape_non_standard_test():
perm=[0, 2, 1], perm=[0, 2, 1],
) )
res = onnx.helper.make_node( res = onnx.helper.make_node('Reshape',
'Reshape', inputs=['trans_x'], outputs=['y'], shape=[4, 3, 2]) inputs=['trans_x'],
outputs=['y'],
shape=[4, 3, 2])
return ([trans, res], [x], [y]) return ([trans, res], [x], [y])
...@@ -4062,11 +4171,10 @@ def reshape_non_standard_test(): ...@@ -4062,11 +4171,10 @@ def reshape_non_standard_test():
@onnx_test @onnx_test
def resize_downsample_f_test(): def resize_downsample_f_test():
scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32) scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)
scale_tensor = helper.make_tensor( scale_tensor = helper.make_tensor(name='scales',
name='scales', data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=scales.shape,
dims=scales.shape, vals=scales.flatten().astype(np.float32))
vals=scales.flatten().astype(np.float32))
X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 4]) X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 4])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, []) Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [])
...@@ -4085,22 +4193,20 @@ def resize_downsample_f_test(): ...@@ -4085,22 +4193,20 @@ def resize_downsample_f_test():
@onnx_test @onnx_test
def resize_downsample_c_test(): def resize_downsample_c_test():
scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32) scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)
scale_tensor = helper.make_tensor( scale_tensor = helper.make_tensor(name='scales',
name='scales', data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=scales.shape,
dims=scales.shape, vals=scales.flatten().astype(np.float32))
vals=scales.flatten().astype(np.float32))
X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 4]) X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 4])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 1, 1, 2]) Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 1, 1, 2])
node = onnx.helper.make_node( node = onnx.helper.make_node('Resize',
'Resize', inputs=['X', '', 'scales'],
inputs=['X', '', 'scales'], outputs=['Y'],
outputs=['Y'], coordinate_transformation_mode='asymmetric',
coordinate_transformation_mode='asymmetric', mode='nearest',
mode='nearest', nearest_mode='ceil')
nearest_mode='ceil')
return ([node], [X], [Y], [scale_tensor]) return ([node], [X], [Y], [scale_tensor])
...@@ -4108,17 +4214,18 @@ def resize_downsample_c_test(): ...@@ -4108,17 +4214,18 @@ def resize_downsample_c_test():
@onnx_test @onnx_test
def resize_downsample_linear_test(): def resize_downsample_linear_test():
scales = np.array([1.0, 1.0, 0.6, 0.5], dtype=np.float32) scales = np.array([1.0, 1.0, 0.6, 0.5], dtype=np.float32)
scale_tensor = helper.make_tensor( scale_tensor = helper.make_tensor(name='scales',
name='scales', data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=scales.shape,
dims=scales.shape, vals=scales.flatten().astype(np.float32))
vals=scales.flatten().astype(np.float32))
X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 4]) X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 4])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, []) Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [])
node = onnx.helper.make_node( node = onnx.helper.make_node('Resize',
'Resize', inputs=['X', '', 'scales'], outputs=['Y'], mode='linear') inputs=['X', '', 'scales'],
outputs=['Y'],
mode='linear')
return ([node], [X], [Y], [scale_tensor]) return ([node], [X], [Y], [scale_tensor])
...@@ -4126,25 +4233,25 @@ def resize_downsample_linear_test(): ...@@ -4126,25 +4233,25 @@ def resize_downsample_linear_test():
@onnx_test @onnx_test
def resize_nonstd_input_test(): def resize_nonstd_input_test():
scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32) scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)
scale_tensor = helper.make_tensor( scale_tensor = helper.make_tensor(name='scales',
name='scales', data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=scales.shape,
dims=scales.shape, vals=scales.flatten().astype(np.float32))
vals=scales.flatten().astype(np.float32))
X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 4, 2]) X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 4, 2])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 1, 1, 2]) Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 1, 1, 2])
trn = onnx.helper.make_node( trn = onnx.helper.make_node('Transpose',
'Transpose', inputs=['X'], outputs=['TX'], perm=[0, 1, 3, 2]) inputs=['X'],
outputs=['TX'],
perm=[0, 1, 3, 2])
node = onnx.helper.make_node( node = onnx.helper.make_node('Resize',
'Resize', inputs=['TX', '', 'scales'],
inputs=['TX', '', 'scales'], outputs=['Y'],
outputs=['Y'], coordinate_transformation_mode='asymmetric',
coordinate_transformation_mode='asymmetric', mode='nearest',
mode='nearest', nearest_mode='ceil')
nearest_mode='ceil')
return ([trn, node], [X], [Y], [scale_tensor]) return ([trn, node], [X], [Y], [scale_tensor])
...@@ -4152,11 +4259,11 @@ def resize_nonstd_input_test(): ...@@ -4152,11 +4259,11 @@ def resize_nonstd_input_test():
@onnx_test @onnx_test
def resize_outsize_test(): def resize_outsize_test():
out_lens = np.array([1, 1, 4, 6], dtype=np.int64) out_lens = np.array([1, 1, 4, 6], dtype=np.int64)
out_lens_tensor = helper.make_tensor( out_lens_tensor = helper.make_tensor(name='out_lens',
name='out_lens', data_type=TensorProto.INT64,
data_type=TensorProto.INT64, dims=out_lens.shape,
dims=out_lens.shape, vals=out_lens.flatten().astype(
vals=out_lens.flatten().astype(np.int64)) np.int64))
X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 2]) X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 2])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 1, 4, 6]) Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 1, 4, 6])
...@@ -4175,11 +4282,11 @@ def resize_outsize_test(): ...@@ -4175,11 +4282,11 @@ def resize_outsize_test():
@onnx_test @onnx_test
def resize_upsample_linear_ac_test(): def resize_upsample_linear_ac_test():
scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32) scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)
scales_tensor = helper.make_tensor( scales_tensor = helper.make_tensor(name='scales',
name='scales', data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=scales.shape,
dims=scales.shape, vals=scales.flatten().astype(
vals=scales.flatten().astype(np.float32)) np.float32))
X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 2]) X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 2])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, []) Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [])
...@@ -4196,16 +4303,18 @@ def resize_upsample_linear_ac_test(): ...@@ -4196,16 +4303,18 @@ def resize_upsample_linear_ac_test():
@onnx_test @onnx_test
def resize_upsample_linear_test(): def resize_upsample_linear_test():
scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32) scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)
scales_tensor = helper.make_tensor( scales_tensor = helper.make_tensor(name='scales',
name='scales', data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=scales.shape,
dims=scales.shape, vals=scales.flatten().astype(
vals=scales.flatten().astype(np.float32)) np.float32))
X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 2]) X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 2])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, []) Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [])
node = onnx.helper.make_node( node = onnx.helper.make_node('Resize',
'Resize', inputs=['X', '', 'scales'], outputs=['Y'], mode='linear') inputs=['X', '', 'scales'],
outputs=['Y'],
mode='linear')
return ([node], [X], [Y], [scales_tensor]) return ([node], [X], [Y], [scales_tensor])
...@@ -4213,17 +4322,18 @@ def resize_upsample_linear_test(): ...@@ -4213,17 +4322,18 @@ def resize_upsample_linear_test():
@onnx_test @onnx_test
def resize_upsample_pf_test(): def resize_upsample_pf_test():
scales = np.array([1.0, 1.0, 2.0, 3.0], dtype=np.float32) scales = np.array([1.0, 1.0, 2.0, 3.0], dtype=np.float32)
scale_tensor = helper.make_tensor( scale_tensor = helper.make_tensor(name='scales',
name='scales', data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=scales.shape,
dims=scales.shape, vals=scales.flatten().astype(np.float32))
vals=scales.flatten().astype(np.float32))
X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 2]) X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 2])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 1, 4, 6]) Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 1, 4, 6])
node = onnx.helper.make_node( node = onnx.helper.make_node('Resize',
'Resize', inputs=['X', '', 'scales'], outputs=['Y'], mode='nearest') inputs=['X', '', 'scales'],
outputs=['Y'],
mode='nearest')
return ([node], [X], [Y], [scale_tensor]) return ([node], [X], [Y], [scale_tensor])
...@@ -4231,11 +4341,10 @@ def resize_upsample_pf_test(): ...@@ -4231,11 +4341,10 @@ def resize_upsample_pf_test():
@onnx_test @onnx_test
def resize_upsample_pc_test(): def resize_upsample_pc_test():
scales = np.array([1.0, 1.0, 2.0, 1.5], dtype=np.float32) scales = np.array([1.0, 1.0, 2.0, 1.5], dtype=np.float32)
scale_tensor = helper.make_tensor( scale_tensor = helper.make_tensor(name='scales',
name='scales', data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=scales.shape,
dims=scales.shape, vals=scales.flatten().astype(np.float32))
vals=scales.flatten().astype(np.float32))
X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 4]) X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 4])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 1, 4, 6]) Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 1, 4, 6])
...@@ -4259,8 +4368,9 @@ def roialign_default_test(): ...@@ -4259,8 +4368,9 @@ def roialign_default_test():
bi = helper.make_tensor_value_info('batch_ind', TensorProto.INT64, [8]) bi = helper.make_tensor_value_info('batch_ind', TensorProto.INT64, [8])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [8, 4, 1, 1]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [8, 4, 1, 1])
node = onnx.helper.make_node( node = onnx.helper.make_node('RoiAlign',
'RoiAlign', inputs=['x', 'rois', 'batch_ind'], outputs=['y']) inputs=['x', 'rois', 'batch_ind'],
outputs=['y'])
return ([node], [x, roi, bi], [y]) return ([node], [x, roi, bi], [y])
...@@ -4356,11 +4466,10 @@ def scatternd_add_test(): ...@@ -4356,11 +4466,10 @@ def scatternd_add_test():
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
[2, 2, 2]) [2, 2, 2])
node = onnx.helper.make_node( node = onnx.helper.make_node('ScatterND',
'ScatterND', inputs=['data', 'indices', 'updates'],
inputs=['data', 'indices', 'updates'], outputs=['output'],
outputs=['output'], reduction="add")
reduction="add")
return ([node], [data, indices, updates], [output]) return ([node], [data, indices, updates], [output])
...@@ -4375,11 +4484,10 @@ def scatternd_mul_test(): ...@@ -4375,11 +4484,10 @@ def scatternd_mul_test():
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
[2, 2, 2]) [2, 2, 2])
node = onnx.helper.make_node( node = onnx.helper.make_node('ScatterND',
'ScatterND', inputs=['data', 'indices', 'updates'],
inputs=['data', 'indices', 'updates'], outputs=['output'],
outputs=['output'], reduction="mul")
reduction="mul")
return ([node], [data, indices, updates], [output]) return ([node], [data, indices, updates], [output])
...@@ -4394,8 +4502,9 @@ def scatternd_test(): ...@@ -4394,8 +4502,9 @@ def scatternd_test():
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
[2, 2, 2]) [2, 2, 2])
node = onnx.helper.make_node( node = onnx.helper.make_node('ScatterND',
'ScatterND', inputs=['data', 'indices', 'updates'], outputs=['output']) inputs=['data', 'indices', 'updates'],
outputs=['output'])
return ([node], [data, indices, updates], [output]) return ([node], [data, indices, updates], [output])
...@@ -4405,8 +4514,11 @@ def selu_test(): ...@@ -4405,8 +4514,11 @@ def selu_test():
x = helper.make_tensor_value_info('x', TensorProto.DOUBLE, [2, 3]) x = helper.make_tensor_value_info('x', TensorProto.DOUBLE, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.DOUBLE, [2, 3]) y = helper.make_tensor_value_info('y', TensorProto.DOUBLE, [2, 3])
node = onnx.helper.make_node( node = onnx.helper.make_node('Selu',
'Selu', inputs=['x'], outputs=['y'], alpha=0.3, gamma=0.5) inputs=['x'],
outputs=['y'],
alpha=0.3,
gamma=0.5)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -4432,11 +4544,10 @@ def shape_gather_test(): ...@@ -4432,11 +4544,10 @@ def shape_gather_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [7, 3, 10]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [7, 3, 10])
z = helper.make_tensor_value_info('z', TensorProto.FLOAT, [1]) z = helper.make_tensor_value_info('z', TensorProto.FLOAT, [1])
value_tensor = helper.make_tensor( value_tensor = helper.make_tensor(name='const_tensor',
name='const_tensor', data_type=TensorProto.INT32,
data_type=TensorProto.INT32, dims=values.shape,
dims=values.shape, vals=values.flatten().astype(int))
vals=values.flatten().astype(int))
node_const = onnx.helper.make_node( node_const = onnx.helper.make_node(
'Constant', 'Constant',
...@@ -4556,13 +4667,12 @@ def slice_test(): ...@@ -4556,13 +4667,12 @@ def slice_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3, 2]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3, 2])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 2]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 2])
node = onnx.helper.make_node( node = onnx.helper.make_node('Slice',
'Slice', inputs=['0'],
inputs=['0'], axes=[0, 1],
axes=[0, 1], starts=[1, 0],
starts=[1, 0], ends=[2, 2],
ends=[2, 2], outputs=['1'])
outputs=['1'])
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -4572,26 +4682,29 @@ def slice_3arg_test(): ...@@ -4572,26 +4682,29 @@ def slice_3arg_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [5, 5]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [5, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [2, 5]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [2, 5])
start = np.array([0, 0]) start = np.array([0, 0])
start_tensor = helper.make_tensor( start_tensor = helper.make_tensor(name="start",
name="start", data_type=TensorProto.INT32,
data_type=TensorProto.INT32, dims=start.shape,
dims=start.shape, vals=start.astype(int))
vals=start.astype(int))
arg_start = helper.make_node( arg_start = helper.make_node("Constant",
"Constant", inputs=[], outputs=['arg_start'], value=start_tensor) inputs=[],
outputs=['arg_start'],
value=start_tensor)
end = np.array([2, 5]) end = np.array([2, 5])
end_tensor = helper.make_tensor( end_tensor = helper.make_tensor(name="end",
name="end", data_type=TensorProto.INT32,
data_type=TensorProto.INT32, dims=end.shape,
dims=end.shape, vals=end.astype(int))
vals=end.astype(int)) arg_end = helper.make_node("Constant",
arg_end = helper.make_node( inputs=[],
"Constant", inputs=[], outputs=['arg_end'], value=end_tensor) outputs=['arg_end'],
value=end_tensor)
node = onnx.helper.make_node(
'Slice', inputs=['0', 'arg_start', 'arg_end'], outputs=['1']) node = onnx.helper.make_node('Slice',
inputs=['0', 'arg_start', 'arg_end'],
outputs=['1'])
return ([arg_start, arg_end, node], [x], [y]) return ([arg_start, arg_end, node], [x], [y])
...@@ -4599,40 +4712,44 @@ def slice_3arg_test(): ...@@ -4599,40 +4712,44 @@ def slice_3arg_test():
@onnx_test @onnx_test
def slice_5arg_test(): def slice_5arg_test():
step = np.array([1, 1]) step = np.array([1, 1])
step_tensor = helper.make_tensor( step_tensor = helper.make_tensor(name="step",
name="step", data_type=TensorProto.INT32,
data_type=TensorProto.INT32, dims=step.shape,
dims=step.shape, vals=step.astype(int))
vals=step.astype(int)) arg_step = helper.make_node("Constant",
arg_step = helper.make_node( inputs=[],
"Constant", inputs=[], outputs=['arg_step'], value=step_tensor) outputs=['arg_step'],
value=step_tensor)
axis = np.array([-1, -2]) axis = np.array([-1, -2])
axis_tensor = helper.make_tensor( axis_tensor = helper.make_tensor(name="axis",
name="axis", data_type=TensorProto.INT32,
data_type=TensorProto.INT32, dims=axis.shape,
dims=axis.shape, vals=axis.astype(int))
vals=axis.astype(int)) arg_axis = helper.make_node("Constant",
arg_axis = helper.make_node( inputs=[],
"Constant", inputs=[], outputs=['arg_axis'], value=axis_tensor) outputs=['arg_axis'],
value=axis_tensor)
end = np.array([-1, -1]) end = np.array([-1, -1])
end_tensor = helper.make_tensor( end_tensor = helper.make_tensor(name="end",
name="end", data_type=TensorProto.INT32,
data_type=TensorProto.INT32, dims=end.shape,
dims=end.shape, vals=end.astype(int))
vals=end.astype(int)) arg_end = helper.make_node("Constant",
arg_end = helper.make_node( inputs=[],
"Constant", inputs=[], outputs=['arg_end'], value=end_tensor) outputs=['arg_end'],
value=end_tensor)
start = np.array([-5, -3]) start = np.array([-5, -3])
start_tensor = helper.make_tensor( start_tensor = helper.make_tensor(name="start",
name="start", data_type=TensorProto.INT32,
data_type=TensorProto.INT32, dims=start.shape,
dims=start.shape, vals=start.astype(int))
vals=start.astype(int)) arg_start = helper.make_node("Constant",
arg_start = helper.make_node( inputs=[],
"Constant", inputs=[], outputs=['arg_start'], value=start_tensor) outputs=['arg_start'],
value=start_tensor)
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [5, 5]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [5, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 2]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 2])
...@@ -4648,40 +4765,44 @@ def slice_5arg_test(): ...@@ -4648,40 +4765,44 @@ def slice_5arg_test():
@onnx_test @onnx_test
def slice_5arg_reverse_test(): def slice_5arg_reverse_test():
step = np.array([-1, 1]) step = np.array([-1, 1])
step_tensor = helper.make_tensor( step_tensor = helper.make_tensor(name="step",
name="step", data_type=TensorProto.INT32,
data_type=TensorProto.INT32, dims=step.shape,
dims=step.shape, vals=step.astype(int))
vals=step.astype(int)) arg_step = helper.make_node("Constant",
arg_step = helper.make_node( inputs=[],
"Constant", inputs=[], outputs=['arg_step'], value=step_tensor) outputs=['arg_step'],
value=step_tensor)
axis = np.array([-1, -2]) axis = np.array([-1, -2])
axis_tensor = helper.make_tensor( axis_tensor = helper.make_tensor(name="axis",
name="axis", data_type=TensorProto.INT32,
data_type=TensorProto.INT32, dims=axis.shape,
dims=axis.shape, vals=axis.astype(int))
vals=axis.astype(int)) arg_axis = helper.make_node("Constant",
arg_axis = helper.make_node( inputs=[],
"Constant", inputs=[], outputs=['arg_axis'], value=axis_tensor) outputs=['arg_axis'],
value=axis_tensor)
end = np.array([-5, -1]) end = np.array([-5, -1])
end_tensor = helper.make_tensor( end_tensor = helper.make_tensor(name="end",
name="end", data_type=TensorProto.INT32,
data_type=TensorProto.INT32, dims=end.shape,
dims=end.shape, vals=end.astype(int))
vals=end.astype(int)) arg_end = helper.make_node("Constant",
arg_end = helper.make_node( inputs=[],
"Constant", inputs=[], outputs=['arg_end'], value=end_tensor) outputs=['arg_end'],
value=end_tensor)
start = np.array([-1, -3]) start = np.array([-1, -3])
start_tensor = helper.make_tensor( start_tensor = helper.make_tensor(name="start",
name="start", data_type=TensorProto.INT32,
data_type=TensorProto.INT32, dims=start.shape,
dims=start.shape, vals=start.astype(int))
vals=start.astype(int)) arg_start = helper.make_node("Constant",
arg_start = helper.make_node( inputs=[],
"Constant", inputs=[], outputs=['arg_start'], value=start_tensor) outputs=['arg_start'],
value=start_tensor)
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [5, 5]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [5, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 2]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 2])
...@@ -4697,40 +4818,44 @@ def slice_5arg_reverse_test(): ...@@ -4697,40 +4818,44 @@ def slice_5arg_reverse_test():
@onnx_test @onnx_test
def slice_5arg_step_test(): def slice_5arg_step_test():
step = np.array([-2, 2]) step = np.array([-2, 2])
step_tensor = helper.make_tensor( step_tensor = helper.make_tensor(name="step",
name="step", data_type=TensorProto.INT32,
data_type=TensorProto.INT32, dims=step.shape,
dims=step.shape, vals=step.astype(int))
vals=step.astype(int)) arg_step = helper.make_node("Constant",
arg_step = helper.make_node( inputs=[],
"Constant", inputs=[], outputs=['arg_step'], value=step_tensor) outputs=['arg_step'],
value=step_tensor)
axis = np.array([-1, -2]) axis = np.array([-1, -2])
axis_tensor = helper.make_tensor( axis_tensor = helper.make_tensor(name="axis",
name="axis", data_type=TensorProto.INT32,
data_type=TensorProto.INT32, dims=axis.shape,
dims=axis.shape, vals=axis.astype(int))
vals=axis.astype(int)) arg_axis = helper.make_node("Constant",
arg_axis = helper.make_node( inputs=[],
"Constant", inputs=[], outputs=['arg_axis'], value=axis_tensor) outputs=['arg_axis'],
value=axis_tensor)
end = np.array([-5, -1]) end = np.array([-5, -1])
end_tensor = helper.make_tensor( end_tensor = helper.make_tensor(name="end",
name="end", data_type=TensorProto.INT32,
data_type=TensorProto.INT32, dims=end.shape,
dims=end.shape, vals=end.astype(int))
vals=end.astype(int)) arg_end = helper.make_node("Constant",
arg_end = helper.make_node( inputs=[],
"Constant", inputs=[], outputs=['arg_end'], value=end_tensor) outputs=['arg_end'],
value=end_tensor)
start = np.array([-1, -3]) start = np.array([-1, -3])
start_tensor = helper.make_tensor( start_tensor = helper.make_tensor(name="start",
name="start", data_type=TensorProto.INT32,
data_type=TensorProto.INT32, dims=start.shape,
dims=start.shape, vals=start.astype(int))
vals=start.astype(int)) arg_start = helper.make_node("Constant",
arg_start = helper.make_node( inputs=[],
"Constant", inputs=[], outputs=['arg_start'], value=start_tensor) outputs=['arg_start'],
value=start_tensor)
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [5, 5]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [5, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 2]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 2])
...@@ -4748,13 +4873,12 @@ def slice_max_end_test(): ...@@ -4748,13 +4873,12 @@ def slice_max_end_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [10, 20]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [10, 20])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [9, 17]) y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [9, 17])
node = onnx.helper.make_node( node = onnx.helper.make_node('Slice',
'Slice', inputs=['0'],
inputs=['0'], axes=[0, 1],
axes=[0, 1], starts=[1, 2],
starts=[1, 2], ends=[3000000000, -1],
ends=[3000000000, -1], outputs=['1'])
outputs=['1'])
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -4774,13 +4898,12 @@ def softmax_nonstd_input_test(): ...@@ -4774,13 +4898,12 @@ def softmax_nonstd_input_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [6, 8]) x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [6, 8])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3, 4]) y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3, 4])
node0 = onnx.helper.make_node( node0 = onnx.helper.make_node('Slice',
'Slice', inputs=['0'],
inputs=['0'], axes=[0, 1],
axes=[0, 1], starts=[1, 0],
starts=[1, 0], ends=[4, 4],
ends=[4, 4], outputs=['1'])
outputs=['1'])
node1 = onnx.helper.make_node('Softmax', inputs=['1'], outputs=['2']) node1 = onnx.helper.make_node('Softmax', inputs=['1'], outputs=['2'])
...@@ -4849,12 +4972,11 @@ def split_test(): ...@@ -4849,12 +4972,11 @@ def split_test():
y2 = helper.make_tensor_value_info('y2', TensorProto.FLOAT, [10, 4]) y2 = helper.make_tensor_value_info('y2', TensorProto.FLOAT, [10, 4])
y3 = helper.make_tensor_value_info('y3', TensorProto.FLOAT, [10, 4]) y3 = helper.make_tensor_value_info('y3', TensorProto.FLOAT, [10, 4])
node = onnx.helper.make_node( node = onnx.helper.make_node('Split',
'Split', inputs=['x'],
inputs=['x'], outputs=['y1', 'y2', 'y3'],
outputs=['y1', 'y2', 'y3'], axis=1,
axis=1, split=[7, 4, 4])
split=[7, 4, 4])
return ([node], [x], [y1, y2, y3]) return ([node], [x], [y1, y2, y3])
...@@ -4893,14 +5015,14 @@ def squeeze_axes_input_test(): ...@@ -4893,14 +5015,14 @@ def squeeze_axes_input_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 1, 5, 1]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 1, 5, 1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 5]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 5])
axes = np.array([1, 3], dtype=np.int64) axes = np.array([1, 3], dtype=np.int64)
axes_tensor = helper.make_tensor( axes_tensor = helper.make_tensor(name="axes",
name="axes", data_type=TensorProto.INT64,
data_type=TensorProto.INT64, dims=axes.shape,
dims=axes.shape, vals=axes.astype(np.int64))
vals=axes.astype(np.int64))
node = onnx.helper.make_node( node = onnx.helper.make_node('Squeeze',
'Squeeze', inputs=['x', 'axes'], outputs=['y']) inputs=['x', 'axes'],
outputs=['y'])
return ([node], [x], [y], [axes_tensor]) return ([node], [x], [y], [axes_tensor])
...@@ -4910,14 +5032,14 @@ def squeeze_empty_axes_test(): ...@@ -4910,14 +5032,14 @@ def squeeze_empty_axes_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 1, 5, 1]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 1, 5, 1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 5]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 5])
axes = np.array([], dtype=np.int64) axes = np.array([], dtype=np.int64)
axes_tensor = helper.make_tensor( axes_tensor = helper.make_tensor(name="axes",
name="axes", data_type=TensorProto.INT64,
data_type=TensorProto.INT64, dims=axes.shape,
dims=axes.shape, vals=axes.astype(np.int64))
vals=axes.astype(np.int64))
node = onnx.helper.make_node( node = onnx.helper.make_node('Squeeze',
'Squeeze', inputs=['x', 'axes'], outputs=['y']) inputs=['x', 'axes'],
outputs=['y'])
return ([node], [x], [y], [axes_tensor]) return ([node], [x], [y], [axes_tensor])
...@@ -4929,11 +5051,15 @@ def squeeze_unsqueeze_test(): ...@@ -4929,11 +5051,15 @@ def squeeze_unsqueeze_test():
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, y = helper.make_tensor_value_info('2', TensorProto.FLOAT,
[1, 1, 3, 1, 2, 1]) [1, 1, 3, 1, 2, 1])
node = onnx.helper.make_node( node = onnx.helper.make_node('Squeeze',
'Squeeze', inputs=['0'], axes=[0, 2, 3, 5], outputs=['1']) inputs=['0'],
axes=[0, 2, 3, 5],
outputs=['1'])
node2 = onnx.helper.make_node( node2 = onnx.helper.make_node('Unsqueeze',
'Unsqueeze', inputs=['1'], axes=[0, 1, 3, 5], outputs=['2']) inputs=['1'],
axes=[0, 1, 3, 5],
outputs=['2'])
return ([node, node2], [x], [y]) return ([node, node2], [x], [y])
...@@ -4964,11 +5090,10 @@ def sub_scalar_test(): ...@@ -4964,11 +5090,10 @@ def sub_scalar_test():
arg_out = helper.make_tensor_value_info('out', TensorProto.FLOAT, arg_out = helper.make_tensor_value_info('out', TensorProto.FLOAT,
[2, 3, 4, 5]) [2, 3, 4, 5])
values_tensor = helper.make_tensor( values_tensor = helper.make_tensor(name='const',
name='const', data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=values.reshape(()).shape,
dims=values.reshape(()).shape, vals=values.flatten().astype(float))
vals=values.flatten().astype(float))
arg_const = onnx.helper.make_node( arg_const = onnx.helper.make_node(
'Constant', 'Constant',
...@@ -5025,74 +5150,78 @@ def sum_test(): ...@@ -5025,74 +5150,78 @@ def sum_test():
@onnx_test @onnx_test
def sum_type_test(): def sum_type_test():
valb = np.array([1, 0]) valb = np.array([1, 0])
t_bool = helper.make_tensor( t_bool = helper.make_tensor(name="bool",
name="bool", data_type=TensorProto.BOOL,
data_type=TensorProto.BOOL, dims=valb.shape,
dims=valb.shape, vals=valb.astype(np.bool))
vals=valb.astype(np.bool))
val = np.array([1, 1]) val = np.array([1, 1])
t_int8 = helper.make_tensor( t_int8 = helper.make_tensor(name="int8",
name="int8", data_type=TensorProto.INT8,
data_type=TensorProto.INT8, dims=val.shape,
dims=val.shape, vals=val.astype(np.int8))
vals=val.astype(np.int8))
t_uint8 = helper.make_tensor(name="uint8",
t_uint8 = helper.make_tensor( data_type=TensorProto.UINT8,
name="uint8", dims=val.shape,
data_type=TensorProto.UINT8, vals=val.astype(np.uint8))
dims=val.shape,
vals=val.astype(np.uint8)) t_uint16 = helper.make_tensor(name="uint16",
data_type=TensorProto.UINT16,
t_uint16 = helper.make_tensor( dims=val.shape,
name="uint16", vals=val.astype(np.uint16))
data_type=TensorProto.UINT16,
dims=val.shape, t_uint32 = helper.make_tensor(name="uint32",
vals=val.astype(np.uint16)) data_type=TensorProto.UINT32,
dims=val.shape,
t_uint32 = helper.make_tensor( vals=val.astype(np.uint32))
name="uint32",
data_type=TensorProto.UINT32, t_uint64 = helper.make_tensor(name="uint64",
dims=val.shape, data_type=TensorProto.UINT64,
vals=val.astype(np.uint32)) dims=val.shape,
vals=val.astype(np.uint64))
t_uint64 = helper.make_tensor(
name="uint64", t_double = helper.make_tensor(name="double",
data_type=TensorProto.UINT64, data_type=TensorProto.DOUBLE,
dims=val.shape, dims=val.shape,
vals=val.astype(np.uint64)) vals=val.astype(np.float64))
t_double = helper.make_tensor(
name="double",
data_type=TensorProto.DOUBLE,
dims=val.shape,
vals=val.astype(np.float64))
valr = np.array([1.5, 2.0]) valr = np.array([1.5, 2.0])
t_raw = helper.make_tensor( t_raw = helper.make_tensor(name="raw",
name="raw", data_type=TensorProto.DOUBLE,
data_type=TensorProto.DOUBLE, dims=valr.shape,
dims=valr.shape, vals=valr.tobytes(),
vals=valr.tobytes(), raw=True)
raw=True)
n_bool = onnx.helper.make_node('Cast',
n_bool = onnx.helper.make_node( inputs=['bool'],
'Cast', inputs=['bool'], outputs=['o_bool'], to=11) outputs=['o_bool'],
to=11)
n_int8 = onnx.helper.make_node(
'Cast', inputs=['int8'], outputs=['o_int8'], to=11) n_int8 = onnx.helper.make_node('Cast',
inputs=['int8'],
n_uint8 = onnx.helper.make_node( outputs=['o_int8'],
'Cast', inputs=['uint8'], outputs=['o_uint8'], to=11) to=11)
n_uint16 = onnx.helper.make_node( n_uint8 = onnx.helper.make_node('Cast',
'Cast', inputs=['uint16'], outputs=['o_uint16'], to=11) inputs=['uint8'],
outputs=['o_uint8'],
n_uint32 = onnx.helper.make_node( to=11)
'Cast', inputs=['uint32'], outputs=['o_uint32'], to=11)
n_uint16 = onnx.helper.make_node('Cast',
n_uint64 = onnx.helper.make_node( inputs=['uint16'],
'Cast', inputs=['uint64'], outputs=['o_uint64'], to=11) outputs=['o_uint16'],
to=11)
n_uint32 = onnx.helper.make_node('Cast',
inputs=['uint32'],
outputs=['o_uint32'],
to=11)
n_uint64 = onnx.helper.make_node('Cast',
inputs=['uint64'],
outputs=['o_uint64'],
to=11)
node = onnx.helper.make_node( node = onnx.helper.make_node(
'Sum', 'Sum',
...@@ -5105,11 +5234,11 @@ def sum_type_test(): ...@@ -5105,11 +5234,11 @@ def sum_type_test():
y = helper.make_tensor_value_info('out', TensorProto.DOUBLE, [2]) y = helper.make_tensor_value_info('out', TensorProto.DOUBLE, [2])
return ([n_bool, n_int8, n_uint8, n_uint16, n_uint32, n_uint64, node], [], return ([n_bool, n_int8, n_uint8, n_uint16, n_uint32, n_uint64,
[y], [ node], [], [y], [
t_bool, t_int8, t_uint8, t_uint16, t_uint32, t_uint64, t_bool, t_int8, t_uint8, t_uint16, t_uint32, t_uint64,
t_double, t_raw t_double, t_raw
]) ])
@onnx_test @onnx_test
...@@ -5145,8 +5274,9 @@ def thresholdedrelu_default_test(): ...@@ -5145,8 +5274,9 @@ def thresholdedrelu_default_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 2, 3]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 2, 3]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 2, 3])
node = onnx.helper.make_node( node = onnx.helper.make_node('ThresholdedRelu',
'ThresholdedRelu', inputs=['x'], outputs=['y']) inputs=['x'],
outputs=['y'])
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -5157,8 +5287,10 @@ def thresholdedrelu_test(): ...@@ -5157,8 +5287,10 @@ def thresholdedrelu_test():
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 2, 3]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 2, 3])
alpha = 3.0 alpha = 3.0
node = onnx.helper.make_node( node = onnx.helper.make_node('ThresholdedRelu',
'ThresholdedRelu', inputs=['x'], outputs=['y'], alpha=alpha) inputs=['x'],
outputs=['y'],
alpha=alpha)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -5169,8 +5301,10 @@ def thresholdedrelu_int_test(): ...@@ -5169,8 +5301,10 @@ def thresholdedrelu_int_test():
y = helper.make_tensor_value_info('y', TensorProto.INT32, [2, 2, 3]) y = helper.make_tensor_value_info('y', TensorProto.INT32, [2, 2, 3])
alpha = 3.0 alpha = 3.0
node = onnx.helper.make_node( node = onnx.helper.make_node('ThresholdedRelu',
'ThresholdedRelu', inputs=['x'], outputs=['y'], alpha=alpha) inputs=['x'],
outputs=['y'],
alpha=alpha)
return ([node], [x], [y]) return ([node], [x], [y])
...@@ -5206,8 +5340,10 @@ def topk_attrk_test(): ...@@ -5206,8 +5340,10 @@ def topk_attrk_test():
ind = helper.make_tensor_value_info('indices', TensorProto.INT64, ind = helper.make_tensor_value_info('indices', TensorProto.INT64,
[2, 2, 3, 2]) [2, 2, 3, 2])
node = onnx.helper.make_node( node = onnx.helper.make_node('TopK',
'TopK', inputs=['data'], outputs=['val', 'indices'], k=2) inputs=['data'],
outputs=['val', 'indices'],
k=2)
return ([node], [x], [val, ind]) return ([node], [x], [val, ind])
...@@ -5219,18 +5355,16 @@ def topk_neg_axis_test(): ...@@ -5219,18 +5355,16 @@ def topk_neg_axis_test():
ind = helper.make_tensor_value_info('indices', TensorProto.INT64, ind = helper.make_tensor_value_info('indices', TensorProto.INT64,
[3, 3, 5, 6]) [3, 3, 5, 6])
k_tensor = helper.make_tensor( k_tensor = helper.make_tensor(name='k',
name='k', data_type=TensorProto.INT64,
data_type=TensorProto.INT64, dims=k.shape,
dims=k.shape, vals=k.astype(np.int64))
vals=k.astype(np.int64))
node = onnx.helper.make_node( node = onnx.helper.make_node('TopK',
'TopK', inputs=['data', 'k'],
inputs=['data', 'k'], outputs=['val', 'indices'],
outputs=['val', 'indices'], axis=-2,
axis=-2, sorted=0)
sorted=0)
return ([node], [x], [val, ind], [k_tensor]) return ([node], [x], [val, ind], [k_tensor])
...@@ -5242,18 +5376,16 @@ def topk_test(): ...@@ -5242,18 +5376,16 @@ def topk_test():
ind = helper.make_tensor_value_info('indices', TensorProto.INT64, ind = helper.make_tensor_value_info('indices', TensorProto.INT64,
[2, 4, 3, 2]) [2, 4, 3, 2])
k_tensor = helper.make_tensor( k_tensor = helper.make_tensor(name='k',
name='k', data_type=TensorProto.INT64,
data_type=TensorProto.INT64, dims=k.shape,
dims=k.shape, vals=k.astype(np.int64))
vals=k.astype(np.int64))
node = onnx.helper.make_node( node = onnx.helper.make_node('TopK',
'TopK', inputs=['data', 'k'],
inputs=['data', 'k'], outputs=['val', 'indices'],
outputs=['val', 'indices'], largest=0,
largest=0, axis=1)
axis=1)
return ([node], [x], [val, ind], [k_tensor]) return ([node], [x], [val, ind], [k_tensor])
...@@ -5315,11 +5447,10 @@ def transpose_gather_test(): ...@@ -5315,11 +5447,10 @@ def transpose_gather_test():
perm=[0, 2, 1, 3], perm=[0, 2, 1, 3],
) )
ti = onnx.helper.make_node( ti = onnx.helper.make_node('Transpose',
'Transpose', inputs=['indices'],
inputs=['indices'], outputs=['tindices'],
outputs=['tindices'], perm=[0, 2, 1, 3])
perm=[0, 2, 1, 3])
node = onnx.helper.make_node( node = onnx.helper.make_node(
'Gather', 'Gather',
...@@ -5366,8 +5497,10 @@ def unknown_aten_test(): ...@@ -5366,8 +5497,10 @@ def unknown_aten_test():
a = helper.make_tensor_value_info('3', TensorProto.FLOAT, [2, 3, 4, 5]) a = helper.make_tensor_value_info('3', TensorProto.FLOAT, [2, 3, 4, 5])
node = onnx.helper.make_node( node = onnx.helper.make_node('ATen',
'ATen', inputs=['0', '1'], outputs=['2'], operator='unknown') inputs=['0', '1'],
outputs=['2'],
operator='unknown')
return ([node], [x, y], [a]) return ([node], [x, y], [a])
...@@ -5375,16 +5508,18 @@ def unknown_aten_test(): ...@@ -5375,16 +5508,18 @@ def unknown_aten_test():
@onnx_test @onnx_test
def upsample_linear_test(): def upsample_linear_test():
scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32) scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)
scales_tensor = helper.make_tensor( scales_tensor = helper.make_tensor(name='scales',
name='scales', data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=scales.shape,
dims=scales.shape, vals=scales.flatten().astype(
vals=scales.flatten().astype(np.float32)) np.float32))
X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 2]) X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 2])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, []) Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [])
node = onnx.helper.make_node( node = onnx.helper.make_node('Upsample',
'Upsample', inputs=['X', '', 'scales'], outputs=['Y'], mode='linear') inputs=['X', '', 'scales'],
outputs=['Y'],
mode='linear')
return ([node], [X], [Y], [scales_tensor]) return ([node], [X], [Y], [scales_tensor])
...@@ -5392,11 +5527,10 @@ def upsample_linear_test(): ...@@ -5392,11 +5527,10 @@ def upsample_linear_test():
@onnx_test @onnx_test
def upsample_test(): def upsample_test():
scales = np.array([1.0, 1.0, 2.0, 3.0], dtype=np.float32) scales = np.array([1.0, 1.0, 2.0, 3.0], dtype=np.float32)
scale_tensor = helper.make_tensor( scale_tensor = helper.make_tensor(name='scales',
name='scales', data_type=TensorProto.FLOAT,
data_type=TensorProto.FLOAT, dims=scales.shape,
dims=scales.shape, vals=scales.flatten().astype(np.float32))
vals=scales.flatten().astype(np.float32))
X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 2]) X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 2])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 1, 4, 6]) Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 1, 4, 6])
...@@ -5441,7 +5575,8 @@ def where_test(): ...@@ -5441,7 +5575,8 @@ def where_test():
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 1, 2, 2]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 1, 2, 2])
z = helper.make_tensor_value_info('z', TensorProto.FLOAT, [2, 2, 2, 2]) z = helper.make_tensor_value_info('z', TensorProto.FLOAT, [2, 2, 2, 2])
node = onnx.helper.make_node( node = onnx.helper.make_node('Where',
'Where', inputs=['c', 'x', 'y'], outputs=['z']) inputs=['c', 'x', 'y'],
outputs=['z'])
return ([node], [c, x, y], [z]) return ([node], [c, x, y], [z])
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment