Commit e2bbfca1 authored by turneram's avatar turneram
Browse files

Formatting

parent 6e67ccad
......@@ -87,8 +87,7 @@ struct layernorm
mean_square = sqrt(mean_square / norm_size - mean * mean + epsilon);
for(std::size_t i = 0; i < norm_size; ++i)
{
output[offset + i] =
(data[offset + i] - mean) / mean_square;
output[offset + i] = (data[offset + i] - mean) / mean_square;
/* if(args.size() == 3)
output[offset + i] =
(data[offset + i] - mean) / mean_square * weights[i] + bias[i];
......
......@@ -15,25 +15,25 @@ struct parse_attention : op_parser<parse_attention>
const onnx_parser& parser,
onnx_parser::node_info info,
const std::vector<instruction_ref>& args) const
{
auto input = args[0];
auto weights = args[1];
auto bias = args[2];
{
auto input = args[0];
auto weights = args[1];
auto bias = args[2];
auto mask_index = args[3];
instruction_ref past;
instruction_ref extra_add_qk;
bool is_past = false;
bool is_past = false;
bool is_extra_add_qk = false;
if (args.size() > 4)
if(args.size() > 4)
{
past = args[4];
past = args[4];
is_past = true;
}
if (args.size() == 6)
if(args.size() == 6)
{
is_extra_add_qk = true;
extra_add_qk = args[5];
extra_add_qk = args[5];
}
// ORT default is 12
......@@ -42,112 +42,123 @@ struct parse_attention : op_parser<parse_attention>
num_heads = info.attributes.at("num_heads").i();
// input shape: (batch_size, sequence_length, input_hidden_size)
auto input_lens = input->get_shape().lens();
auto batch_size = input_lens.at(0);
auto sequence_length = input_lens.at(1);
auto input_lens = input->get_shape().lens();
auto batch_size = input_lens.at(0);
auto sequence_length = input_lens.at(1);
auto input_hidden_size = input_lens.at(2);
// bias shape: (3 * hidden_size)
auto bias_lens = bias->get_shape().lens();
auto hidden_size = bias_lens.at(0) / 3;
auto head_size = hidden_size / num_heads;
auto bias_lens = bias->get_shape().lens();
auto hidden_size = bias_lens.at(0) / 3;
auto head_size = hidden_size / num_heads;
int past_sequence_length = 0;
// GetPresent
// Input and output shapes:
// past : (2, batch_size, num_heads, past_sequence_length, head_size)
// present : (2, batch_size, num_heads, past_sequence_length + sequence_length, head_size)
// present : (2, batch_size, num_heads, past_sequence_length + sequence_length,
// head_size)
std::vector<std::size_t> present_lens{2, batch_size, num_heads, sequence_length, head_size};
if (is_past)
if(is_past)
{
auto past_lens = past->get_shape().lens();
auto past_lens = past->get_shape().lens();
past_sequence_length = past_lens.at(3);
present_lens[3] += past_lens[3];
}
// Use GEMM for fully connection.
auto m = batch_size * sequence_length;
auto n = bias_lens.front();
auto n = bias_lens.front();
auto k = input_hidden_size;
// Bias shape is (N), broadcast using B(N, M) = 1 * bias(N, 1) x ones(1, M) + 0 * B.
auto bias_type = bias->get_shape().type();
std::vector<float> ones_vec(m, 1);
std::vector<std::size_t> ones_lens{1, m};
auto ones = info.add_literal(migraphx::literal{migraphx::shape{bias_type, ones_lens}, ones_vec});
bias = info.add_instruction(migraphx::make_op("reshape", {{"dims", {n, 1}}}), bias);
auto gemm_1 = info.add_instruction(migraphx::make_op("dot"), bias, ones/* info.make_contiguous(mb_bias), info.make_contiguous(ones) */);
gemm_1 = info.add_instruction(migraphx::make_op("transpose", {{"permutation", {1, 0}}}), gemm_1);
/// ORT: Gemm, note that ROCM assumes col-major, so result(N, M) = 1 * weights x input + 1 x B.
/// Assume row-major => results(N, M) = 1 * input x weights + 1 x B ?
auto input_sq = info.add_instruction(migraphx::make_op("reshape", {{"dims", {batch_size * sequence_length, hidden_size}}}), input);
auto gemm_2 = info.add_instruction(migraphx::make_op("dot"), input_sq, weights);
auto ones =
info.add_literal(migraphx::literal{migraphx::shape{bias_type, ones_lens}, ones_vec});
bias = info.add_instruction(migraphx::make_op("reshape", {{"dims", {n, 1}}}), bias);
auto gemm_1 = info.add_instruction(
migraphx::make_op("dot"),
bias,
ones /* info.make_contiguous(mb_bias), info.make_contiguous(ones) */);
gemm_1 =
info.add_instruction(migraphx::make_op("transpose", {{"permutation", {1, 0}}}), gemm_1);
/// ORT: Gemm, note that ROCM assumes col-major, so result(N, M) = 1 * weights x input + 1 x
/// B. Assume row-major => results(N, M) = 1 * input x weights + 1 x B ?
auto input_sq = info.add_instruction(
migraphx::make_op("reshape", {{"dims", {batch_size * sequence_length, hidden_size}}}),
input);
auto gemm_2 = info.add_instruction(migraphx::make_op("dot"), input_sq, weights);
auto add_gemms = info.add_instruction(migraphx::make_op("add"), gemm_1, gemm_2);
// LaunchAttentionKernel:
// LaunchTransQkv
// input should be BxSx3xNxH => scratch3: 3xBxNxSxH
add_gemms = info.add_instruction(migraphx::make_op("reshape", {{"dims", {batch_size, sequence_length, 3, num_heads, head_size}}}), add_gemms);
add_gemms = info.add_instruction(
migraphx::make_op("reshape",
{{"dims", {batch_size, sequence_length, 3, num_heads, head_size}}}),
add_gemms);
std::vector<std::size_t> qkv_perm{2, 0, 3, 1, 4};
auto transqkv = info.add_instruction(migraphx::make_op("transpose", {{"permutation", qkv_perm}}), add_gemms);
auto transqkv = info.add_instruction(
migraphx::make_op("transpose", {{"permutation", qkv_perm}}), add_gemms);
// now scratch3 has Q, K, V: each has size BxNxSxH
// => transqkv has shape 3xBxNxSxH
auto batches = batch_size * num_heads;
auto batches = batch_size * num_heads;
auto size_per_batch = sequence_length * head_size;
auto total_size = batches * size_per_batch;
auto q_t = info.add_instruction(migraphx::make_op("slice", {{"axes", {0}}, {"starts", {0}}, {"ends", {1}}}), transqkv);
auto k_t = info.add_instruction(migraphx::make_op("slice", {{"axes", {0}}, {"starts", {1}}, {"ends", {2}}}), transqkv);
auto v_t = info.add_instruction(migraphx::make_op("slice", {{"axes", {0}}, {"starts", {2}}, {"ends", {3}}}), transqkv);
auto total_size = batches * size_per_batch;
auto q_t = info.add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {0}}, {"ends", {1}}}), transqkv);
auto k_t = info.add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {1}}, {"ends", {2}}}), transqkv);
auto v_t = info.add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {2}}, {"ends", {3}}}), transqkv);
q_t = info.add_instruction(make_op("squeeze", {{"axes", {0}}}), q_t);
k_t = info.add_instruction(make_op("squeeze", {{"axes", {0}}}), k_t);
v_t = info.add_instruction(make_op("squeeze", {{"axes", {0}}}), v_t);
if (is_past)
if(is_past)
{
k_t = info.add_instruction(migraphx::make_op("concat", {{"axis", 3}}), past, k_t);
v_t = info.add_instruction(migraphx::make_op("slice", {{"axes", {0}}, {"starts", {1}}, {"ends", {3}}}), k_t);
v_t = info.add_instruction(
migraphx::make_op("slice", {{"axes", {0}}, {"starts", {1}}, {"ends", {3}}}), k_t);
}
// Raw attention mask could be 2D (BxS) or 3D (BxSxS*) or 4D(Bx1xMxM), where M is the max sequence length.
auto mask_index_lens = mask_index->get_shape().lens();
// Raw attention mask could be 2D (BxS) or 3D (BxSxS*) or 4D(Bx1xMxM), where M is the max
// sequence length.
auto mask_index_lens = mask_index->get_shape().lens();
bool use_raw_attention_mask = mask_index_lens.size() >= 2;
// compute Q*K' (as K'*Q), scaled by 1/sqrt(H) and store in scratch1: BxNxSxS*
// Q: BxNxSxH, K (present_k): BxNxS*xH, Q*K': BxNxSxS*
const float rsqrt_head_size = 1.f / sqrt(static_cast<float>(head_size));
const float rsqrt_head_size = 1.f / sqrt(static_cast<float>(head_size));
const int all_sequence_length = past_sequence_length + sequence_length;
const int temp_matrix_size = sequence_length * all_sequence_length;
const int temp_matrix_size = sequence_length * all_sequence_length;
// For raw attention mask, the scalar if 1/sqrt(H) is moved to softmax computation.
const float alpha = use_raw_attention_mask ? 1.0 : rsqrt_head_size;
// K{B,N,S,H} -> K'{B,N,H,S}
k_t = info.add_instruction(make_op("transpose", {{"permutation", {0, 1, 3, 2}}}), k_t);
auto gemm3 = info.add_instruction(migraphx::make_op("dot"), q_t, k_t);
if (is_extra_add_qk)
if(is_extra_add_qk)
gemm3 = info.add_instruction(make_op("add"), gemm3, extra_add_qk);
auto alpha_lit = info.add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", gemm3->get_shape().lens()}}),
info.add_literal(migraphx::literal{migraphx::shape{gemm3->get_shape().type()}, {alpha}}));
gemm3 = info.add_instruction(migraphx::make_op("mul"), gemm3, info.make_contiguous(alpha_lit));
info.add_literal(
migraphx::literal{migraphx::shape{gemm3->get_shape().type()}, {alpha}}));
gemm3 =
info.add_instruction(migraphx::make_op("mul"), gemm3, info.make_contiguous(alpha_lit));
// apply softmax and store result P to scratch2: BxNxSxS*
std::vector<float> mask(batch_size*num_heads*sequence_length*all_sequence_length, 0);
if (false and mask_index_lens.size() >= 2)
{
}
else if (false and mask_index_lens.size() == 1)
std::vector<float> mask(batch_size * num_heads * sequence_length * all_sequence_length, 0);
if(false and mask_index_lens.size() >= 2) {}
else if(false and mask_index_lens.size() == 1)
{
}
// else => no mask
auto softmax = info.add_instruction(migraphx::make_op("softmax", {{"axis", 3}}), gemm3);
......@@ -156,8 +167,11 @@ struct parse_attention : op_parser<parse_attention>
auto gemm4 = info.add_instruction(migraphx::make_op("dot"), softmax, v_t);
// scratch3 is BxNxSxH, transpose to output BxSxNxH
gemm4 = info.add_instruction(migraphx::make_op("transpose", {{"permutation", {0, 2, 1, 3}}}), gemm4);
gemm4 = info.add_instruction(make_op("reshape", {{"dims", {batch_size, sequence_length, num_heads * head_size}}}), info.make_contiguous(gemm4));
gemm4 = info.add_instruction(
migraphx::make_op("transpose", {{"permutation", {0, 2, 1, 3}}}), gemm4);
gemm4 = info.add_instruction(
make_op("reshape", {{"dims", {batch_size, sequence_length, num_heads * head_size}}}),
info.make_contiguous(gemm4));
return gemm4;
}
};
......
......@@ -17,7 +17,7 @@ struct parse_layernorm : op_parser<parse_layernorm>
const std::vector<instruction_ref>& args) const
{
float epsilon = 1e-3f;
int64_t axis = -1;
int64_t axis = -1;
if(contains(info.attributes, "epsilon"))
{
epsilon = parser.parse_value(info.attributes.at("epsilon")).at<float>();
......@@ -27,9 +27,10 @@ struct parse_layernorm : op_parser<parse_layernorm>
epsilon = parser.parse_value(info.attributes.at("axis")).at<int64_t>();
}
auto layernorm = info.add_instruction(make_op("layernorm", {{"epsilon", epsilon}, {"axis", axis}}), args.front());
if (args.size() == 3)
auto layernorm = info.add_instruction(
make_op("layernorm", {{"epsilon", epsilon}, {"axis", axis}}), args.front());
if(args.size() == 3)
{
layernorm = info.add_instruction(make_op("mul"), layernorm, args.at(1));
layernorm = info.add_instruction(make_op("add"), layernorm, args.at(2));
......
......@@ -12,7 +12,8 @@ namespace device {
void layernorm(hipStream_t stream, const argument& result, const argument& arg1);
//void layernorm(hipStream_t stream, const argument& result, const argument& arg1, const argument& arg2, const argument& arg3, const int64_t axis);
// void layernorm(hipStream_t stream, const argument& result, const argument& arg1, const argument&
// arg2, const argument& arg3, const int64_t axis);
void triadd_layernorm(hipStream_t stream,
const argument& result,
......
......@@ -19,12 +19,13 @@ argument hip_layernorm::compute(context& ctx, const shape&, const std::vector<ar
{
auto n_dim = args.front().get_shape().lens().size();
auto tuned_axis = tune_axis(n_dim, op.axis, op.name());
device::layernorm(ctx.get_stream().get(), args.back(), args[0], args[1], args[2], tuned_axis);
device::layernorm(ctx.get_stream().get(), args.back(), args[0], args[1], args[2],
tuned_axis);
}
else */
std::cout << "calling device::ln" << std::endl;
{
device::layernorm(ctx.get_stream().get(), args.back(), args[0]);
std::cout << "called device::ln" << std::endl;
}
......
......@@ -394,7 +394,7 @@ struct miopen_apply
apply_map.emplace(op_name, [=](instruction_ref ins) {
auto output = insert_allocation(ins, ins->get_shape());
std::vector<instruction_ref> refs = ins->inputs();
if (op_name == "layernorm")
if(op_name == "layernorm")
{
std::cout << "layernorm op" << std::endl;
}
......
......@@ -11,17 +11,16 @@ def onnx_test(op_test):
def run_test():
op_info = op_test()
if len(op_info) > 3:
graph_def = helper.make_graph(
op_info[0],
op_test.__name__,
op_info[1],
op_info[2],
initializer=op_info[3])
graph_def = helper.make_graph(op_info[0],
op_test.__name__,
op_info[1],
op_info[2],
initializer=op_info[3])
else:
graph_def = helper.make_graph(op_info[0], op_test.__name__,
op_info[1], op_info[2])
model_def = helper.make_model(
graph_def, producer_name=op_test.__name__)
model_def = helper.make_model(graph_def,
producer_name=op_test.__name__)
onnx.save(model_def, '{}.onnx'.format(op_test.__name__))
return run_test
......@@ -61,8 +60,11 @@ def add_bcast_test():
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 4])
z = helper.make_tensor_value_info('2', TensorProto.FLOAT, [2, 3, 4, 5])
node = onnx.helper.make_node(
'Add', inputs=['0', '1'], broadcast=1, axis=1, outputs=['2'])
node = onnx.helper.make_node('Add',
inputs=['0', '1'],
broadcast=1,
axis=1,
outputs=['2'])
return ([node], [x, y], [z])
......@@ -106,8 +108,11 @@ def argmax_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 6])
node = onnx.helper.make_node(
'ArgMax', inputs=['x'], outputs=['y'], axis=2, keepdims=0)
node = onnx.helper.make_node('ArgMax',
inputs=['x'],
outputs=['y'],
axis=2,
keepdims=0)
return ([node], [x], [y])
......@@ -117,8 +122,11 @@ def argmin_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 5])
node = onnx.helper.make_node(
'ArgMin', inputs=['x'], outputs=['y'], axis=3, keepdims=0)
node = onnx.helper.make_node('ArgMin',
inputs=['x'],
outputs=['y'],
axis=3,
keepdims=0)
return ([node], [x], [y])
......@@ -184,8 +192,10 @@ def averagepool_1d_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5])
out = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 3])
node = onnx.helper.make_node(
'AveragePool', inputs=['0'], outputs=['1'], kernel_shape=[3])
node = onnx.helper.make_node('AveragePool',
inputs=['0'],
outputs=['1'],
kernel_shape=[3])
return ([node], [x], [out])
......@@ -196,8 +206,10 @@ def averagepool_3d_test():
out = helper.make_tensor_value_info('1', TensorProto.FLOAT,
[1, 3, 3, 3, 3])
node = onnx.helper.make_node(
'AveragePool', inputs=['0'], outputs=['1'], kernel_shape=[3, 3, 3])
node = onnx.helper.make_node('AveragePool',
inputs=['0'],
outputs=['1'],
kernel_shape=[3, 3, 3])
return ([node], [x], [out])
......@@ -207,14 +219,13 @@ def averagepool_notset_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 1, 1])
node = onnx.helper.make_node(
'AveragePool',
inputs=['x'],
outputs=['y'],
kernel_shape=[6, 6],
strides=[2, 2],
pads=[0, 0, 1, 1],
auto_pad='NOTSET')
node = onnx.helper.make_node('AveragePool',
inputs=['x'],
outputs=['y'],
kernel_shape=[6, 6],
strides=[2, 2],
pads=[0, 0, 1, 1],
auto_pad='NOTSET')
return ([node], [x], [y])
......@@ -224,15 +235,14 @@ def averagepool_nt_cip_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 1, 1])
node = onnx.helper.make_node(
'AveragePool',
inputs=['x'],
outputs=['y'],
kernel_shape=[6, 6],
strides=[2, 2],
pads=[0, 0, 1, 1],
auto_pad='NOTSET',
count_include_pad=1)
node = onnx.helper.make_node('AveragePool',
inputs=['x'],
outputs=['y'],
kernel_shape=[6, 6],
strides=[2, 2],
pads=[0, 0, 1, 1],
auto_pad='NOTSET',
count_include_pad=1)
return ([node], [x], [y])
......@@ -242,12 +252,11 @@ def averagepool_same_lower_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 5, 5])
node = onnx.helper.make_node(
'AveragePool',
inputs=['x'],
outputs=['y'],
kernel_shape=[2, 2],
auto_pad='SAME_LOWER')
node = onnx.helper.make_node('AveragePool',
inputs=['x'],
outputs=['y'],
kernel_shape=[2, 2],
auto_pad='SAME_LOWER')
return ([node], [x], [y])
......@@ -257,13 +266,12 @@ def averagepool_sl_cip_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 5, 5])
node = onnx.helper.make_node(
'AveragePool',
inputs=['x'],
outputs=['y'],
kernel_shape=[2, 2],
auto_pad='SAME_LOWER',
count_include_pad=1)
node = onnx.helper.make_node('AveragePool',
inputs=['x'],
outputs=['y'],
kernel_shape=[2, 2],
auto_pad='SAME_LOWER',
count_include_pad=1)
return ([node], [x], [y])
......@@ -273,12 +281,11 @@ def averagepool_same_upper_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 5, 5])
node = onnx.helper.make_node(
'AveragePool',
inputs=['x'],
outputs=['y'],
kernel_shape=[2, 2],
auto_pad='SAME_UPPER')
node = onnx.helper.make_node('AveragePool',
inputs=['x'],
outputs=['y'],
kernel_shape=[2, 2],
auto_pad='SAME_UPPER')
return ([node], [x], [y])
......@@ -292,12 +299,11 @@ def batchnorm_1d_test():
var = helper.make_tensor_value_info('4', TensorProto.FLOAT, [3])
out = helper.make_tensor_value_info('5', TensorProto.FLOAT, [1, 3, 5])
node = onnx.helper.make_node(
'BatchNormalization',
inputs=['0', '1', '2', '3', '4'],
outputs=['5'],
epsilon=1e-6,
momentum=0.9)
node = onnx.helper.make_node('BatchNormalization',
inputs=['0', '1', '2', '3', '4'],
outputs=['5'],
epsilon=1e-6,
momentum=0.9)
return ([node], [x, scale, bias, mean, var], [out])
......@@ -312,12 +318,11 @@ def batchnorm_3d_test():
out = helper.make_tensor_value_info('5', TensorProto.FLOAT,
[1, 3, 5, 5, 5])
node = onnx.helper.make_node(
'BatchNormalization',
inputs=['0', '1', '2', '3', '4'],
outputs=['5'],
epsilon=1e-6,
momentum=0.9)
node = onnx.helper.make_node('BatchNormalization',
inputs=['0', '1', '2', '3', '4'],
outputs=['5'],
epsilon=1e-6,
momentum=0.9)
return ([node], [x, scale, bias, mean, var], [out])
......@@ -351,8 +356,10 @@ def celu_alpha_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3])
node = onnx.helper.make_node(
'Celu', inputs=['x'], outputs=['y'], alpha=0.8)
node = onnx.helper.make_node('Celu',
inputs=['x'],
outputs=['y'],
alpha=0.8)
return ([node], [x], [y])
......@@ -372,8 +379,10 @@ def celu_verify_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node(
'Celu', inputs=['x'], outputs=['y'], alpha=0.5)
node = onnx.helper.make_node('Celu',
inputs=['x'],
outputs=['y'],
alpha=0.5)
return ([node], [x], [y])
......@@ -393,8 +402,10 @@ def celu_zero_alpha_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node(
'Celu', inputs=['x'], outputs=['y'], alpha=0.0)
node = onnx.helper.make_node('Celu',
inputs=['x'],
outputs=['y'],
alpha=0.0)
return ([node], [x], [y])
......@@ -404,8 +415,11 @@ def clip_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
node = onnx.helper.make_node(
'Clip', inputs=['0'], outputs=['1'], max=6.0, min=0.0)
node = onnx.helper.make_node('Clip',
inputs=['0'],
outputs=['1'],
max=6.0,
min=0.0)
return ([node], [x], [y])
......@@ -418,8 +432,9 @@ def clip_test_op11():
min_val = helper.make_tensor('min', TensorProto.FLOAT, [], [0.0])
max_val = helper.make_tensor('max', TensorProto.FLOAT, [], [6.0])
node = onnx.helper.make_node(
'Clip', inputs=['0', 'min', 'max'], outputs=['1'])
node = onnx.helper.make_node('Clip',
inputs=['0', 'min', 'max'],
outputs=['1'])
return ([node], [x], [y], [min_val, max_val])
......@@ -431,8 +446,9 @@ def clip_test_op11_max_only():
max_val = helper.make_tensor('max', TensorProto.FLOAT, [], [0.0])
node = onnx.helper.make_node(
'Clip', inputs=['0', '', 'max'], outputs=['1'])
node = onnx.helper.make_node('Clip',
inputs=['0', '', 'max'],
outputs=['1'])
return ([node], [x], [y], [max_val])
......@@ -478,8 +494,9 @@ def clip_test_args_type_mismatch():
[1.5, 2.5, 3.5])
max_val = helper.make_tensor('max', TensorProto.INT64, [3, 1], [2, 3, 4])
node = onnx.helper.make_node(
'Clip', inputs=['0', 'min', 'max'], outputs=['1'])
node = onnx.helper.make_node('Clip',
inputs=['0', 'min', 'max'],
outputs=['1'])
return ([node], [x], [y], [min_val, max_val])
......@@ -542,11 +559,10 @@ def constant_fill_input_as_shape_test():
np_shape = np.array([2, 3])
value = helper.make_tensor_value_info('value', TensorProto.FLOAT, [2, 3])
ts_shape = helper.make_tensor(
name='shape_tensor',
data_type=TensorProto.INT32,
dims=np_shape.shape,
vals=np_shape.flatten().astype(int))
ts_shape = helper.make_tensor(name='shape_tensor',
data_type=TensorProto.INT32,
dims=np_shape.shape,
vals=np_shape.flatten().astype(int))
const_shape_node = onnx.helper.make_node(
'Constant',
......@@ -592,11 +608,10 @@ def const_of_shape_empty_input_test():
tensor_val = onnx.helper.make_tensor('value', onnx.TensorProto.INT64, [1],
[10])
empty_val = np.array([]).astype(np.int64)
empty_ts = helper.make_tensor(
name='empty_tensor',
data_type=TensorProto.INT32,
dims=empty_val.shape,
vals=empty_val.flatten().astype(int))
empty_ts = helper.make_tensor(name='empty_tensor',
data_type=TensorProto.INT32,
dims=empty_val.shape,
vals=empty_val.flatten().astype(int))
shape_const = helper.make_node(
'Constant',
inputs=[],
......@@ -621,11 +636,10 @@ def const_of_shape_float_test():
[10])
shape_val = np.array([2, 3, 4]).astype(np.int64)
shape_ts = helper.make_tensor(
name='shape_tensor',
data_type=TensorProto.INT32,
dims=shape_val.shape,
vals=shape_val.flatten().astype(int))
shape_ts = helper.make_tensor(name='shape_tensor',
data_type=TensorProto.INT32,
dims=shape_val.shape,
vals=shape_val.flatten().astype(int))
shape_const = helper.make_node(
'Constant',
......@@ -635,8 +649,10 @@ def const_of_shape_float_test():
)
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3, 4])
node = onnx.helper.make_node(
'ConstantOfShape', inputs=['shape'], outputs=['y'], value=tensor_val)
node = onnx.helper.make_node('ConstantOfShape',
inputs=['shape'],
outputs=['y'],
value=tensor_val)
return ([shape_const, node], [], [y])
......@@ -646,11 +662,10 @@ def const_of_shape_int64_test():
tensor_val = onnx.helper.make_tensor('value', onnx.TensorProto.INT64, [1],
[10])
shape_val = np.array([2, 3, 4]).astype(np.int64)
shape_ts = helper.make_tensor(
name='shape_tensor',
data_type=TensorProto.INT32,
dims=shape_val.shape,
vals=shape_val.flatten().astype(int))
shape_ts = helper.make_tensor(name='shape_tensor',
data_type=TensorProto.INT32,
dims=shape_val.shape,
vals=shape_val.flatten().astype(int))
shape_const = helper.make_node(
'Constant',
inputs=[],
......@@ -659,8 +674,10 @@ def const_of_shape_int64_test():
)
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3, 4])
node = onnx.helper.make_node(
'ConstantOfShape', inputs=['shape'], outputs=['y'], value=tensor_val)
node = onnx.helper.make_node('ConstantOfShape',
inputs=['shape'],
outputs=['y'],
value=tensor_val)
return ([shape_const, node], [], [y])
......@@ -668,11 +685,10 @@ def const_of_shape_int64_test():
@onnx_test
def const_of_shape_no_value_attr_test():
shape_val = np.array([2, 3, 4]).astype(np.int64)
shape_ts = helper.make_tensor(
name='shape_tensor',
data_type=TensorProto.INT32,
dims=shape_val.shape,
vals=shape_val.flatten().astype(int))
shape_ts = helper.make_tensor(name='shape_tensor',
data_type=TensorProto.INT32,
dims=shape_val.shape,
vals=shape_val.flatten().astype(int))
shape_const = helper.make_node(
'Constant',
inputs=[],
......@@ -719,8 +735,10 @@ def conv_attr_fail_test():
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 3])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1, 1, 3])
node = onnx.helper.make_node(
'Conv', inputs=['0', '1'], strides=[1, 1], outputs=['2'])
node = onnx.helper.make_node('Conv',
inputs=['0', '1'],
strides=[1, 1],
outputs=['2'])
return ([node], [x, y], [out])
......@@ -731,14 +749,13 @@ def conv_autopad_fail_test():
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 1, 1])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1, 1, 34, 34])
node = onnx.helper.make_node(
'Conv',
inputs=['0', '1'],
outputs=['2'],
dilations=[1, 1],
strides=[1, 1],
auto_pad='SAME',
pads=[0, 0, 1, 1, 0, 0, 1, 1])
node = onnx.helper.make_node('Conv',
inputs=['0', '1'],
outputs=['2'],
dilations=[1, 1],
strides=[1, 1],
auto_pad='SAME',
pads=[0, 0, 1, 1, 0, 0, 1, 1])
return ([node], [x, y], [out])
......@@ -749,13 +766,12 @@ def conv_autopad_same_test():
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 3, 3])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1, 1, 32, 32])
node = onnx.helper.make_node(
'Conv',
inputs=['0', '1'],
outputs=['2'],
dilations=[1, 1],
strides=[1, 1],
auto_pad='SAME')
node = onnx.helper.make_node('Conv',
inputs=['0', '1'],
outputs=['2'],
dilations=[1, 1],
strides=[1, 1],
auto_pad='SAME')
return ([node], [x, y], [out])
......@@ -767,12 +783,11 @@ def conv_bias_test():
z = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1])
out = helper.make_tensor_value_info('3', TensorProto.FLOAT, [1, 2, 28, 28])
node = onnx.helper.make_node(
'Conv',
inputs=['0', '1', '2'],
outputs=['3'],
dilations=[1, 1],
strides=[1, 1])
node = onnx.helper.make_node('Conv',
inputs=['0', '1', '2'],
outputs=['3'],
dilations=[1, 1],
strides=[1, 1])
return ([node], [x, y, z], [out])
......@@ -789,29 +804,26 @@ def conv_bn_relu_maxpool_test():
out = helper.make_tensor_value_info('10', TensorProto.FLOAT,
[1, 1, 14, 14])
node0 = onnx.helper.make_node(
'Conv',
inputs=['0', '1', '2'],
outputs=['7'],
dilations=[1, 1],
strides=[1, 1],
pads=[0, 0, 0, 0])
node1 = onnx.helper.make_node(
'BatchNormalization',
inputs=['7', '3', '4', '5', '6'],
outputs=['8'],
epsilon=9.99999974737875e-06,
momentum=0.899999976158142)
node0 = onnx.helper.make_node('Conv',
inputs=['0', '1', '2'],
outputs=['7'],
dilations=[1, 1],
strides=[1, 1],
pads=[0, 0, 0, 0])
node1 = onnx.helper.make_node('BatchNormalization',
inputs=['7', '3', '4', '5', '6'],
outputs=['8'],
epsilon=9.99999974737875e-06,
momentum=0.899999976158142)
node2 = onnx.helper.make_node('Relu', inputs=['8'], outputs=['9'])
node3 = onnx.helper.make_node(
'MaxPool',
inputs=['9'],
outputs=['10'],
pads=[0, 0, 0, 0],
strides=[2, 2],
kernel_shape=[2, 2])
node3 = onnx.helper.make_node('MaxPool',
inputs=['9'],
outputs=['10'],
pads=[0, 0, 0, 0],
strides=[2, 2],
kernel_shape=[2, 2])
return ([node0, node1, node2, node3], [x, y, z, m, n, k, l], [out])
......@@ -823,23 +835,21 @@ def conv_relu_maxpool_test():
z = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1])
out = helper.make_tensor_value_info('5', TensorProto.FLOAT, [1, 1, 14, 14])
node1 = onnx.helper.make_node(
'Conv',
inputs=['0', '1', '2'],
outputs=['3'],
dilations=[1, 1],
strides=[1, 1],
pads=[0, 0, 0, 0])
node1 = onnx.helper.make_node('Conv',
inputs=['0', '1', '2'],
outputs=['3'],
dilations=[1, 1],
strides=[1, 1],
pads=[0, 0, 0, 0])
node2 = onnx.helper.make_node('Relu', inputs=['3'], outputs=['4'])
node3 = onnx.helper.make_node(
'MaxPool',
inputs=['4'],
outputs=['5'],
pads=[0, 0, 0, 0],
strides=[2, 2],
kernel_shape=[2, 2])
node3 = onnx.helper.make_node('MaxPool',
inputs=['4'],
outputs=['5'],
pads=[0, 0, 0, 0],
strides=[2, 2],
kernel_shape=[2, 2])
return ([node1, node2, node3], [x, y, z], [out])
......@@ -853,41 +863,37 @@ def conv_relu_maxpool_x2_test():
n = helper.make_tensor_value_info('4', TensorProto.FLOAT, [1])
out = helper.make_tensor_value_info('10', TensorProto.FLOAT, [1, 1, 5, 5])
node1 = onnx.helper.make_node(
'Conv',
inputs=['0', '1', '2'],
outputs=['5'],
dilations=[1, 1],
strides=[1, 1],
pads=[0, 0, 0, 0])
node1 = onnx.helper.make_node('Conv',
inputs=['0', '1', '2'],
outputs=['5'],
dilations=[1, 1],
strides=[1, 1],
pads=[0, 0, 0, 0])
node2 = onnx.helper.make_node('Relu', inputs=['5'], outputs=['6'])
node3 = onnx.helper.make_node(
'MaxPool',
inputs=['6'],
outputs=['7'],
pads=[0, 0, 0, 0],
strides=[2, 2],
kernel_shape=[2, 2])
node3 = onnx.helper.make_node('MaxPool',
inputs=['6'],
outputs=['7'],
pads=[0, 0, 0, 0],
strides=[2, 2],
kernel_shape=[2, 2])
node4 = onnx.helper.make_node(
'Conv',
inputs=['7', '3', '4'],
outputs=['8'],
dilations=[1, 1],
strides=[1, 1],
pads=[0, 0, 0, 0])
node4 = onnx.helper.make_node('Conv',
inputs=['7', '3', '4'],
outputs=['8'],
dilations=[1, 1],
strides=[1, 1],
pads=[0, 0, 0, 0])
node5 = onnx.helper.make_node('Relu', inputs=['8'], outputs=['9'])
node6 = onnx.helper.make_node(
'MaxPool',
inputs=['9'],
outputs=['10'],
pads=[0, 0, 0, 0],
strides=[2, 2],
kernel_shape=[2, 2])
node6 = onnx.helper.make_node('MaxPool',
inputs=['9'],
outputs=['10'],
pads=[0, 0, 0, 0],
strides=[2, 2],
kernel_shape=[2, 2])
return ([node1, node2, node3, node4, node5, node6], [x, y, z, m, n], [out])
......@@ -899,12 +905,11 @@ def convinteger_bias_test():
z = helper.make_tensor_value_info('2', TensorProto.INT32, [1])
out = helper.make_tensor_value_info('3', TensorProto.INT32, [1, 2, 28, 28])
node = onnx.helper.make_node(
'ConvInteger',
inputs=['0', '1', '2'],
outputs=['3'],
dilations=[1, 1],
strides=[1, 1])
node = onnx.helper.make_node('ConvInteger',
inputs=['0', '1', '2'],
outputs=['3'],
dilations=[1, 1],
strides=[1, 1])
return ([node], [x, y, z], [out])
......@@ -943,8 +948,10 @@ def deconv_test():
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 1, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 5, 5])
node = onnx.helper.make_node(
'ConvTranspose', name='conv1', inputs=['x', 'w'], outputs=['y'])
node = onnx.helper.make_node('ConvTranspose',
name='conv1',
inputs=['x', 'w'],
outputs=['y'])
return ([node], [x, w], [y])
......@@ -956,8 +963,10 @@ def deconv_bias_test():
b = helper.make_tensor_value_info('b', TensorProto.FLOAT, [1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 5, 5])
node = onnx.helper.make_node(
'ConvTranspose', name='conv1', inputs=['x', 'w', 'b'], outputs=['y'])
node = onnx.helper.make_node('ConvTranspose',
name='conv1',
inputs=['x', 'w', 'b'],
outputs=['y'])
return ([node], [x, w, b], [y])
......@@ -968,12 +977,11 @@ def deconv_input_pads_strides_test():
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 7, 5])
node = onnx.helper.make_node(
'ConvTranspose',
inputs=['x', 'w'],
outputs=['y'],
strides=[3, 2],
pads=[1, 1, 1, 1])
node = onnx.helper.make_node('ConvTranspose',
inputs=['x', 'w'],
outputs=['y'],
strides=[3, 2],
pads=[1, 1, 1, 1])
return ([node], [x, w], [y])
......@@ -984,12 +992,11 @@ def deconv_input_pads_asymm_test():
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 8, 6])
node = onnx.helper.make_node(
'ConvTranspose',
inputs=['x', 'w'],
outputs=['y'],
strides=[3, 2],
pads=[0, 0, 1, 1])
node = onnx.helper.make_node('ConvTranspose',
inputs=['x', 'w'],
outputs=['y'],
strides=[3, 2],
pads=[0, 0, 1, 1])
return ([node], [x, w], [y])
......@@ -1000,13 +1007,12 @@ def deconv_input_pads_asymm_1d_test():
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 6])
node = onnx.helper.make_node(
'ConvTranspose',
inputs=['x', 'w'],
outputs=['y'],
strides=[2],
pads=[0, 1],
dilations=[1])
node = onnx.helper.make_node('ConvTranspose',
inputs=['x', 'w'],
outputs=['y'],
strides=[2],
pads=[0, 1],
dilations=[1])
return ([node], [x, w], [y])
......@@ -1017,12 +1023,11 @@ def deconv_output_padding_test():
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 10, 8])
node = onnx.helper.make_node(
'ConvTranspose',
inputs=['x', 'w'],
outputs=['y'],
strides=[3, 2],
output_padding=[1, 1])
node = onnx.helper.make_node('ConvTranspose',
inputs=['x', 'w'],
outputs=['y'],
strides=[3, 2],
output_padding=[1, 1])
return ([node], [x, w], [y])
......@@ -1033,12 +1038,11 @@ def deconv_output_padding_3d_test():
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 10, 8, 8])
node = onnx.helper.make_node(
'ConvTranspose',
inputs=['x', 'w'],
outputs=['y'],
strides=[3, 2, 2],
output_padding=[1, 1, 1])
node = onnx.helper.make_node('ConvTranspose',
inputs=['x', 'w'],
outputs=['y'],
strides=[3, 2, 2],
output_padding=[1, 1, 1])
return ([node], [x, w], [y])
......@@ -1049,12 +1053,11 @@ def deconv_output_shape_test():
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 10, 8])
node = onnx.helper.make_node(
'ConvTranspose',
inputs=['x', 'w'],
outputs=['y'],
strides=[3, 2],
output_shape=[10, 8])
node = onnx.helper.make_node('ConvTranspose',
inputs=['x', 'w'],
outputs=['y'],
strides=[3, 2],
output_shape=[10, 8])
return ([node], [x, w], [y])
......@@ -1065,12 +1068,11 @@ def deconv_output_shape_3d_test():
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 10, 8, 8])
node = onnx.helper.make_node(
'ConvTranspose',
inputs=['x', 'w'],
outputs=['y'],
strides=[3, 2, 2],
output_shape=[10, 8, 8])
node = onnx.helper.make_node('ConvTranspose',
inputs=['x', 'w'],
outputs=['y'],
strides=[3, 2, 2],
output_shape=[10, 8, 8])
return ([node], [x, w], [y])
......@@ -1081,8 +1083,10 @@ def deconv_stride_test():
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 2, 3, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 7, 3])
node = onnx.helper.make_node(
'ConvTranspose', inputs=['x', 'w'], outputs=['y'], strides=[3, 2])
node = onnx.helper.make_node('ConvTranspose',
inputs=['x', 'w'],
outputs=['y'],
strides=[3, 2])
return ([node], [x, w], [y])
......@@ -1093,8 +1097,11 @@ def depthtospace_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 8, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 2, 10, 10])
node = onnx.helper.make_node(
'DepthToSpace', inputs=['x'], outputs=['y'], blocksize=2, mode='DCR')
node = onnx.helper.make_node('DepthToSpace',
inputs=['x'],
outputs=['y'],
blocksize=2,
mode='DCR')
return ([node], [x], [y])
......@@ -1105,8 +1112,11 @@ def depthtospace_simple_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 8, 2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 4, 6])
node = onnx.helper.make_node(
'DepthToSpace', inputs=['x'], outputs=['y'], blocksize=2, mode='DCR')
node = onnx.helper.make_node('DepthToSpace',
inputs=['x'],
outputs=['y'],
blocksize=2,
mode='DCR')
return ([node], [x], [y])
......@@ -1117,8 +1127,11 @@ def depthtospace_crd_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 8, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 2, 10, 10])
node = onnx.helper.make_node(
'DepthToSpace', inputs=['x'], outputs=['y'], blocksize=2, mode='CRD')
node = onnx.helper.make_node('DepthToSpace',
inputs=['x'],
outputs=['y'],
blocksize=2,
mode='CRD')
return ([node], [x], [y])
......@@ -1129,8 +1142,10 @@ def spacetodepth_test():
x = helper.make_tensor_value_info('x', TensorProto.float, [2, 2, 10, 10])
y = helper.make_tensor_value_info('y', TensorProto.float, [2, 8, 5, 5])
node = onnx.helper.make_node(
'spacetodepth', inputs=['x'], outputs=['y'], blocksize=2)
node = onnx.helper.make_node('spacetodepth',
inputs=['x'],
outputs=['y'],
blocksize=2)
return ([node], [x], [y])
......@@ -1141,8 +1156,10 @@ def spacetodepth_simple_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 2, 4, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 8, 2, 3])
node = onnx.helper.make_node(
'SpaceToDepth', inputs=['x'], outputs=['y'], blocksize=2)
node = onnx.helper.make_node('SpaceToDepth',
inputs=['x'],
outputs=['y'],
blocksize=2)
return ([node], [x], [y])
......@@ -1153,8 +1170,10 @@ def spacetodepth_invalid_blocksize_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 2, 4, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 8, 2, 3])
node = onnx.helper.make_node(
'SpaceToDepth', inputs=['x'], outputs=['y'], blocksize=0.3)
node = onnx.helper.make_node('SpaceToDepth',
inputs=['x'],
outputs=['y'],
blocksize=0.3)
return ([node], [x], [y])
......@@ -1165,8 +1184,10 @@ def spacetodepth_nondivisibility_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 2, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 8, 2, 2])
node = onnx.helper.make_node(
'SpaceToDepth', inputs=['x'], outputs=['y'], blocksize=2)
node = onnx.helper.make_node('SpaceToDepth',
inputs=['x'],
outputs=['y'],
blocksize=2)
return ([node], [x], [y])
......@@ -1209,8 +1230,10 @@ def make_dequantizelinear_axis_graph(axis):
arg_out = helper.make_tensor_value_info('out', TensorProto.FLOAT,
[1, 1, 5, 1])
node = onnx.helper.make_node(
'DequantizeLinear', inputs=['0', '1', '2'], outputs=['out'], axis=axis)
node = onnx.helper.make_node('DequantizeLinear',
inputs=['0', '1', '2'],
outputs=['out'],
axis=axis)
return ([node], [arg0, arg1, arg2], [arg_out])
......@@ -1244,8 +1267,10 @@ def elu_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
node = onnx.helper.make_node(
'Elu', inputs=['0'], outputs=['1'], alpha=0.01)
node = onnx.helper.make_node('Elu',
inputs=['0'],
outputs=['1'],
alpha=0.01)
return ([node], [x], [y])
......@@ -1256,23 +1281,25 @@ def embedding_bag_test():
index_val = np.array([1, 0, 2])
offset_val = np.array([0])
index_tensor = helper.make_tensor(
name='index_val',
data_type=TensorProto.INT32,
dims=index_val.shape,
vals=index_val.astype(np.int32))
index_tensor = helper.make_tensor(name='index_val',
data_type=TensorProto.INT32,
dims=index_val.shape,
vals=index_val.astype(np.int32))
index = onnx.helper.make_node(
'Constant', inputs=[], outputs=['index'], value=index_tensor)
index = onnx.helper.make_node('Constant',
inputs=[],
outputs=['index'],
value=index_tensor)
offset_tensor = helper.make_tensor(
name='offset_val',
data_type=TensorProto.INT32,
dims=offset_val.reshape(()).shape,
vals=offset_val.astype(np.int32))
offset_tensor = helper.make_tensor(name='offset_val',
data_type=TensorProto.INT32,
dims=offset_val.reshape(()).shape,
vals=offset_val.astype(np.int32))
offset = onnx.helper.make_node(
'Constant', inputs=[], outputs=['offset'], value=offset_tensor)
offset = onnx.helper.make_node('Constant',
inputs=[],
outputs=['offset'],
value=offset_tensor)
weight = helper.make_tensor_value_info('weight', TensorProto.FLOAT, [4, 2])
......@@ -1280,26 +1307,23 @@ def embedding_bag_test():
y2 = helper.make_tensor_value_info('y2', TensorProto.FLOAT, [1, 2])
y3 = helper.make_tensor_value_info('y3', TensorProto.FLOAT, [1, 2])
node1 = onnx.helper.make_node(
'ATen',
inputs=['weight', 'index', 'offset'],
outputs=['y1'],
mode=0,
operator='embedding_bag')
node1 = onnx.helper.make_node('ATen',
inputs=['weight', 'index', 'offset'],
outputs=['y1'],
mode=0,
operator='embedding_bag')
node2 = onnx.helper.make_node(
'ATen',
inputs=['weight', 'index', 'offset'],
outputs=['y2'],
mode=1,
operator='embedding_bag')
node3 = onnx.helper.make_node(
'ATen',
inputs=['weight', 'index', 'offset'],
outputs=['y3'],
mode=2,
operator='embedding_bag')
node2 = onnx.helper.make_node('ATen',
inputs=['weight', 'index', 'offset'],
outputs=['y2'],
mode=1,
operator='embedding_bag')
node3 = onnx.helper.make_node('ATen',
inputs=['weight', 'index', 'offset'],
outputs=['y3'],
mode=2,
operator='embedding_bag')
return ([index, offset, node1, node2, node3], [weight], [y1, y2, y3])
......@@ -1310,34 +1334,35 @@ def embedding_bag_offset_test():
index_val = np.array([1, 0])
offset_val = np.array([0, 1])
index_tensor = helper.make_tensor(
name='index_val',
data_type=TensorProto.INT32,
dims=index_val.shape,
vals=index_val.astype(np.int32))
index_tensor = helper.make_tensor(name='index_val',
data_type=TensorProto.INT32,
dims=index_val.shape,
vals=index_val.astype(np.int32))
index = onnx.helper.make_node(
'Constant', inputs=[], outputs=['index'], value=index_tensor)
index = onnx.helper.make_node('Constant',
inputs=[],
outputs=['index'],
value=index_tensor)
offset_tensor = helper.make_tensor(
name='offset_val',
data_type=TensorProto.INT32,
dims=offset_val.shape,
vals=offset_val.astype(np.int32))
offset_tensor = helper.make_tensor(name='offset_val',
data_type=TensorProto.INT32,
dims=offset_val.shape,
vals=offset_val.astype(np.int32))
offset = onnx.helper.make_node(
'Constant', inputs=[], outputs=['offset'], value=offset_tensor)
offset = onnx.helper.make_node('Constant',
inputs=[],
outputs=['offset'],
value=offset_tensor)
weight = helper.make_tensor_value_info('weight', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node(
'ATen',
inputs=['weight', 'index', 'offset'],
outputs=['y'],
mode=0,
operator='embedding_bag')
node = onnx.helper.make_node('ATen',
inputs=['weight', 'index', 'offset'],
outputs=['y'],
mode=0,
operator='embedding_bag')
return ([index, offset, node], [weight], [y])
......@@ -1345,11 +1370,10 @@ def embedding_bag_offset_test():
@onnx_test
def equal_test():
ax1 = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
x1 = helper.make_tensor(
"x1",
data_type=TensorProto.FLOAT,
dims=(2, 3),
vals=ax1.astype(np.float32))
x1 = helper.make_tensor("x1",
data_type=TensorProto.FLOAT,
dims=(2, 3),
vals=ax1.astype(np.float32))
x2 = helper.make_tensor_value_info('x2', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
......@@ -1412,11 +1436,10 @@ def exp_test():
@onnx_test
def expand_test():
shape_val = np.array([2, 3, 4, 5]).astype(np.int64)
shape_ts = helper.make_tensor(
name='shape_tensor',
data_type=TensorProto.INT32,
dims=shape_val.shape,
vals=shape_val.flatten().astype(int))
shape_ts = helper.make_tensor(name='shape_tensor',
data_type=TensorProto.INT32,
dims=shape_val.shape,
vals=shape_val.flatten().astype(int))
shape_const = helper.make_node(
'Constant',
inputs=[],
......@@ -1426,8 +1449,9 @@ def expand_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 1, 1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3, 4, 5])
node = onnx.helper.make_node(
'Expand', inputs=['x', 'shape'], outputs=['y'])
node = onnx.helper.make_node('Expand',
inputs=['x', 'shape'],
outputs=['y'])
return ([shape_const, node], [x], [y])
......@@ -1483,8 +1507,10 @@ def eyelike_k_test():
def eyelike_k_outofbounds_neg_test():
T1 = helper.make_tensor_value_info('T1', TensorProto.FLOAT, [2, 4])
T2 = helper.make_tensor_value_info('T2', TensorProto.FLOAT, [2, 4])
node = onnx.helper.make_node(
'EyeLike', inputs=['T1'], outputs=['T2'], k=-2)
node = onnx.helper.make_node('EyeLike',
inputs=['T1'],
outputs=['T2'],
k=-2)
return ([node], [T1], [T2])
......@@ -1520,8 +1546,10 @@ def eyelike_verify_test():
def eyelike_verify_negk_test():
T1 = helper.make_tensor_value_info('T1', TensorProto.FLOAT, [3, 4])
T2 = helper.make_tensor_value_info('T2', TensorProto.FLOAT, [3, 4])
node = onnx.helper.make_node(
'EyeLike', inputs=['T1'], outputs=['T2'], k=-2)
node = onnx.helper.make_node('EyeLike',
inputs=['T1'],
outputs=['T2'],
k=-2)
return ([node], [T1], [T2])
......@@ -1529,8 +1557,10 @@ def eyelike_verify_negk_test():
def eyelike_set_dtype_test():
T1 = helper.make_tensor_value_info('T1', TensorProto.FLOAT, [3, 4])
T2 = helper.make_tensor_value_info('T2', TensorProto.DOUBLE, [3, 4])
node = onnx.helper.make_node(
'EyeLike', inputs=['T1'], outputs=['T2'], dtype=TensorProto.DOUBLE)
node = onnx.helper.make_node('EyeLike',
inputs=['T1'],
outputs=['T2'],
dtype=TensorProto.DOUBLE)
return ([node], [T1], [T2])
......@@ -1540,8 +1570,10 @@ def flatten_test():
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [6, 20])
y2 = helper.make_tensor_value_info('3', TensorProto.FLOAT, [2, 60])
node = onnx.helper.make_node(
'Flatten', inputs=['0'], axis=2, outputs=['2'])
node = onnx.helper.make_node('Flatten',
inputs=['0'],
axis=2,
outputs=['2'])
node2 = onnx.helper.make_node('Flatten', inputs=['0'], outputs=['3'])
......@@ -1561,8 +1593,10 @@ def flatten_nonstd_test():
perm=[0, 1, 3, 2],
)
node = onnx.helper.make_node(
'Flatten', inputs=['tx'], axis=2, outputs=['2'])
node = onnx.helper.make_node('Flatten',
inputs=['tx'],
axis=2,
outputs=['2'])
node2 = onnx.helper.make_node('Flatten', inputs=['tx'], outputs=['3'])
......@@ -1639,14 +1673,13 @@ def gemm_test():
z = helper.make_tensor_value_info('2', TensorProto.FLOAT, [])
a = helper.make_tensor_value_info('3', TensorProto.FLOAT, [7, 11])
node = onnx.helper.make_node(
'Gemm',
inputs=['0', '1', '2'],
outputs=['3'],
alpha=2.0,
beta=2.0,
transA=1,
transB=1)
node = onnx.helper.make_node('Gemm',
inputs=['0', '1', '2'],
outputs=['3'],
alpha=2.0,
beta=2.0,
transA=1,
transB=1)
return ([node], [x, y, z], [a])
......@@ -1658,13 +1691,12 @@ def gemm_ex_test():
m3 = helper.make_tensor_value_info('3', TensorProto.FLOAT, [1, 1, 6, 7])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 6, 7])
node = onnx.helper.make_node(
'Gemm',
inputs=['1', '2', '3'],
outputs=['y'],
alpha=0.5,
beta=0.8,
transA=1)
node = onnx.helper.make_node('Gemm',
inputs=['1', '2', '3'],
outputs=['y'],
alpha=0.5,
beta=0.8,
transA=1)
return ([node], [m1, m2, m3], [y])
......@@ -1676,13 +1708,12 @@ def gemm_ex_brcst_test():
m3 = helper.make_tensor_value_info('3', TensorProto.FLOAT, [1, 1, 6, 1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 6, 7])
node = onnx.helper.make_node(
'Gemm',
inputs=['1', '2', '3'],
outputs=['y'],
alpha=0.5,
beta=0.8,
transA=1)
node = onnx.helper.make_node('Gemm',
inputs=['1', '2', '3'],
outputs=['y'],
alpha=0.5,
beta=0.8,
transA=1)
return ([node], [m1, m2, m3], [y])
......@@ -1694,13 +1725,12 @@ def gemm_half_test():
m3 = helper.make_tensor_value_info('3', TensorProto.FLOAT16, [1, 1, 6, 1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT16, [1, 1, 6, 7])
node = onnx.helper.make_node(
'Gemm',
inputs=['1', '2', '3'],
outputs=['y'],
alpha=0.5,
beta=0.8,
transA=1)
node = onnx.helper.make_node('Gemm',
inputs=['1', '2', '3'],
outputs=['y'],
alpha=0.5,
beta=0.8,
transA=1)
return ([node], [m1, m2, m3], [y])
......@@ -1736,11 +1766,10 @@ def globalmaxpool_test():
@onnx_test
def greater_test():
ax1 = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
x1 = helper.make_tensor(
"x1",
data_type=TensorProto.FLOAT,
dims=(2, 3),
vals=ax1.astype(np.float32))
x1 = helper.make_tensor("x1",
data_type=TensorProto.FLOAT,
dims=(2, 3),
vals=ax1.astype(np.float32))
x2 = helper.make_tensor_value_info('x2', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
......@@ -1819,8 +1848,11 @@ def hardsigmoid_double_test():
x = helper.make_tensor_value_info('x', TensorProto.DOUBLE, [1, 3, 4, 5])
y = helper.make_tensor_value_info('y', TensorProto.DOUBLE, [1, 3, 4, 5])
node = onnx.helper.make_node(
'HardSigmoid', inputs=['x'], outputs=['y'], alpha=0.3, beta=0.7)
node = onnx.helper.make_node('HardSigmoid',
inputs=['x'],
outputs=['y'],
alpha=0.3,
beta=0.7)
return ([node], [x], [y])
......@@ -1860,30 +1892,32 @@ def if_else_test():
x = onnx.helper.make_tensor_value_info('x', onnx.TensorProto.FLOAT, [2, 3])
y = onnx.helper.make_tensor_value_info('y', onnx.TensorProto.FLOAT, [2, 3])
then_out = onnx.helper.make_tensor_value_info(
'then_out', onnx.TensorProto.FLOAT, [2, 3])
else_out = onnx.helper.make_tensor_value_info(
'else_out', onnx.TensorProto.FLOAT, [2, 3])
then_out = onnx.helper.make_tensor_value_info('then_out',
onnx.TensorProto.FLOAT,
[2, 3])
else_out = onnx.helper.make_tensor_value_info('else_out',
onnx.TensorProto.FLOAT,
[2, 3])
xt = np.ones((2, 3)).astype(np.float)
xt_tensor = helper.make_tensor(
name='xt',
data_type=TensorProto.FLOAT,
dims=xt.shape,
vals=xt.flatten().astype(np.float32))
xt_tensor = helper.make_tensor(name='xt',
data_type=TensorProto.FLOAT,
dims=xt.shape,
vals=xt.flatten().astype(np.float32))
yt = np.random.randn(2, 3).astype(np.float)
yt_tensor = helper.make_tensor(
name='yt',
data_type=TensorProto.FLOAT,
dims=yt.shape,
vals=yt.flatten().astype(np.float32))
yt_tensor = helper.make_tensor(name='yt',
data_type=TensorProto.FLOAT,
dims=yt.shape,
vals=yt.flatten().astype(np.float32))
then_add_node = onnx.helper.make_node(
'Add', inputs=['x', 'xt'], outputs=['then_out'])
then_add_node = onnx.helper.make_node('Add',
inputs=['x', 'xt'],
outputs=['then_out'])
else_mul_node = onnx.helper.make_node(
'Mul', inputs=['y', 'yt'], outputs=['else_out'])
else_mul_node = onnx.helper.make_node('Mul',
inputs=['y', 'yt'],
outputs=['else_out'])
then_body = onnx.helper.make_graph([then_add_node], 'then_body', [],
[then_out])
......@@ -1892,19 +1926,17 @@ def if_else_test():
[else_out])
cond = np.array([0]).astype(np.bool)
cond_tensor = helper.make_tensor(
name="cond",
data_type=TensorProto.BOOL,
dims=cond.shape,
vals=cond.astype(bool))
cond_tensor = helper.make_tensor(name="cond",
data_type=TensorProto.BOOL,
dims=cond.shape,
vals=cond.astype(bool))
res = onnx.helper.make_tensor_value_info('res', TensorProto.FLOAT, [])
node = onnx.helper.make_node(
'If',
inputs=['cond'],
outputs=['res'],
then_branch=then_body,
else_branch=else_body)
node = onnx.helper.make_node('If',
inputs=['cond'],
outputs=['res'],
then_branch=then_body,
else_branch=else_body)
return ([node], [x, y], [res], [cond_tensor, xt_tensor, yt_tensor])
......@@ -1948,22 +1980,23 @@ def if_literal_test():
onnx.TensorProto.BOOL, [])
ret = onnx.helper.make_tensor_value_info('ret', TensorProto.FLOAT, [])
node = onnx.helper.make_node(
'If',
inputs=['cond'],
outputs=['ret'],
then_branch=then_body,
else_branch=else_body)
node = onnx.helper.make_node('If',
inputs=['cond'],
outputs=['ret'],
then_branch=then_body,
else_branch=else_body)
return ([node], [cond_input], [ret])
@onnx_test
def if_param_excp_test():
then_out = onnx.helper.make_tensor_value_info(
'then_out', onnx.TensorProto.FLOAT, [2, 3])
else_out = onnx.helper.make_tensor_value_info(
'else_out', onnx.TensorProto.FLOAT, [2, 3])
then_out = onnx.helper.make_tensor_value_info('then_out',
onnx.TensorProto.FLOAT,
[2, 3])
else_out = onnx.helper.make_tensor_value_info('else_out',
onnx.TensorProto.FLOAT,
[2, 3])
x = onnx.helper.make_tensor_value_info('x', onnx.TensorProto.FLOAT, [2, 3])
y = onnx.helper.make_tensor_value_info('y', onnx.TensorProto.FLOAT, [2, 4])
......@@ -1971,23 +2004,23 @@ def if_param_excp_test():
yt = np.random.randn(2, 4).astype(np.float)
xt = np.random.randn(2, 3).astype(np.float)
xt_tensor = helper.make_tensor(
name='xt',
data_type=TensorProto.FLOAT,
dims=xt.shape,
vals=xt.flatten().astype(np.float32))
xt_tensor = helper.make_tensor(name='xt',
data_type=TensorProto.FLOAT,
dims=xt.shape,
vals=xt.flatten().astype(np.float32))
yt_tensor = helper.make_tensor(
name='yt',
data_type=TensorProto.FLOAT,
dims=yt.shape,
vals=yt.flatten().astype(np.float32))
yt_tensor = helper.make_tensor(name='yt',
data_type=TensorProto.FLOAT,
dims=yt.shape,
vals=yt.flatten().astype(np.float32))
then_add_node = onnx.helper.make_node(
'Add', inputs=['x', 'xt'], outputs=['then_out'])
then_add_node = onnx.helper.make_node('Add',
inputs=['x', 'xt'],
outputs=['then_out'])
else_mul_node = onnx.helper.make_node(
'Mul', inputs=['y', 'yt'], outputs=['else_out'])
else_mul_node = onnx.helper.make_node('Mul',
inputs=['y', 'yt'],
outputs=['else_out'])
then_body = onnx.helper.make_graph([then_add_node], 'then_body', [],
[then_out], [xt_tensor])
......@@ -1999,33 +2032,33 @@ def if_param_excp_test():
onnx.TensorProto.BOOL, [])
ret = onnx.helper.make_tensor_value_info('ret', TensorProto.FLOAT, [])
node = onnx.helper.make_node(
'If',
inputs=['cond'],
outputs=['ret'],
then_branch=then_body,
else_branch=else_body)
node = onnx.helper.make_node('If',
inputs=['cond'],
outputs=['ret'],
then_branch=then_body,
else_branch=else_body)
return ([node], [cond_input, x, y], [ret])
@onnx_test
def if_param_excp1_test():
then_out = onnx.helper.make_tensor_value_info(
'sub_out', onnx.TensorProto.FLOAT, [2, 3])
then_out = onnx.helper.make_tensor_value_info('sub_out',
onnx.TensorProto.FLOAT,
[2, 3])
x = onnx.helper.make_tensor_value_info('x', onnx.TensorProto.FLOAT, [2, 3])
xt = np.random.randn(2, 3).astype(np.float)
xt_tensor = helper.make_tensor(
name='xt',
data_type=TensorProto.FLOAT,
dims=xt.shape,
vals=xt.flatten().astype(np.float32))
xt_tensor = helper.make_tensor(name='xt',
data_type=TensorProto.FLOAT,
dims=xt.shape,
vals=xt.flatten().astype(np.float32))
then_add_node = onnx.helper.make_node(
'Add', inputs=['x', 'xt'], outputs=['sub_out'])
then_add_node = onnx.helper.make_node('Add',
inputs=['x', 'xt'],
outputs=['sub_out'])
sub_body = onnx.helper.make_graph([then_add_node], 'sub_body', [],
[then_out], [xt_tensor])
......@@ -2034,22 +2067,23 @@ def if_param_excp1_test():
onnx.TensorProto.BOOL, [2])
ret = onnx.helper.make_tensor_value_info('ret', TensorProto.FLOAT, [])
node = onnx.helper.make_node(
'If',
inputs=['cond'],
outputs=['ret'],
then_branch=sub_body,
else_branch=sub_body)
node = onnx.helper.make_node('If',
inputs=['cond'],
outputs=['ret'],
then_branch=sub_body,
else_branch=sub_body)
return ([node], [cond_input, x], [ret])
@onnx_test
def if_param_test():
then_out = onnx.helper.make_tensor_value_info(
'then_out', onnx.TensorProto.FLOAT, [2, 3])
else_out = onnx.helper.make_tensor_value_info(
'else_out', onnx.TensorProto.FLOAT, [2, 3])
then_out = onnx.helper.make_tensor_value_info('then_out',
onnx.TensorProto.FLOAT,
[2, 3])
else_out = onnx.helper.make_tensor_value_info('else_out',
onnx.TensorProto.FLOAT,
[2, 3])
x = onnx.helper.make_tensor_value_info('x', onnx.TensorProto.FLOAT, [2, 3])
y = onnx.helper.make_tensor_value_info('y', onnx.TensorProto.FLOAT, [2, 3])
......@@ -2057,23 +2091,23 @@ def if_param_test():
yt = np.random.randn(2, 3).astype(np.float)
xt = np.random.randn(2, 3).astype(np.float)
xt_tensor = helper.make_tensor(
name='xt',
data_type=TensorProto.FLOAT,
dims=xt.shape,
vals=xt.flatten().astype(np.float32))
xt_tensor = helper.make_tensor(name='xt',
data_type=TensorProto.FLOAT,
dims=xt.shape,
vals=xt.flatten().astype(np.float32))
yt_tensor = helper.make_tensor(
name='yt',
data_type=TensorProto.FLOAT,
dims=yt.shape,
vals=yt.flatten().astype(np.float32))
yt_tensor = helper.make_tensor(name='yt',
data_type=TensorProto.FLOAT,
dims=yt.shape,
vals=yt.flatten().astype(np.float32))
then_add_node = onnx.helper.make_node(
'Add', inputs=['x', 'xt'], outputs=['then_out'])
then_add_node = onnx.helper.make_node('Add',
inputs=['x', 'xt'],
outputs=['then_out'])
else_mul_node = onnx.helper.make_node(
'Mul', inputs=['y', 'yt'], outputs=['else_out'])
else_mul_node = onnx.helper.make_node('Mul',
inputs=['y', 'yt'],
outputs=['else_out'])
then_body = onnx.helper.make_graph([then_add_node], 'then_body', [],
[then_out], [xt_tensor])
......@@ -2085,12 +2119,11 @@ def if_param_test():
onnx.TensorProto.BOOL, [])
ret = onnx.helper.make_tensor_value_info('ret', TensorProto.FLOAT, [])
node = onnx.helper.make_node(
'If',
inputs=['cond'],
outputs=['ret'],
then_branch=then_body,
else_branch=else_body)
node = onnx.helper.make_node('If',
inputs=['cond'],
outputs=['ret'],
then_branch=then_body,
else_branch=else_body)
return ([node], [cond_input, x, y], [ret])
......@@ -2099,12 +2132,14 @@ def if_param_test():
def if_pl_test():
out_x = onnx.helper.make_tensor_value_info('out_x', onnx.TensorProto.FLOAT,
[2, 3])
out_l_x = onnx.helper.make_tensor_value_info(
'out_l_x', onnx.TensorProto.FLOAT, [2, 3])
out_l_x = onnx.helper.make_tensor_value_info('out_l_x',
onnx.TensorProto.FLOAT,
[2, 3])
out_y = onnx.helper.make_tensor_value_info('out_y', onnx.TensorProto.FLOAT,
[3, 3])
out_l_y = onnx.helper.make_tensor_value_info(
'out_l_y', onnx.TensorProto.FLOAT, [3, 3])
out_l_y = onnx.helper.make_tensor_value_info('out_l_y',
onnx.TensorProto.FLOAT,
[3, 3])
x = onnx.helper.make_tensor_value_info('x', onnx.TensorProto.FLOAT, [2, 3])
y = onnx.helper.make_tensor_value_info('y', onnx.TensorProto.FLOAT, [3, 3])
......@@ -2112,23 +2147,23 @@ def if_pl_test():
xt = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
yt = np.array([[8, 7, 6], [5, 4, 3], [2, 1, 0]]).astype(np.float32)
xt_tensor = helper.make_tensor(
name='xt',
data_type=TensorProto.FLOAT,
dims=xt.shape,
vals=xt.flatten().astype(np.float32))
xt_tensor = helper.make_tensor(name='xt',
data_type=TensorProto.FLOAT,
dims=xt.shape,
vals=xt.flatten().astype(np.float32))
yt_tensor = helper.make_tensor(
name='yt',
data_type=TensorProto.FLOAT,
dims=yt.shape,
vals=yt.flatten().astype(np.float32))
yt_tensor = helper.make_tensor(name='yt',
data_type=TensorProto.FLOAT,
dims=yt.shape,
vals=yt.flatten().astype(np.float32))
then_add_node = onnx.helper.make_node(
'Add', inputs=['x', 'xt'], outputs=['out_x'])
then_add_node = onnx.helper.make_node('Add',
inputs=['x', 'xt'],
outputs=['out_x'])
else_mul_node = onnx.helper.make_node(
'Mul', inputs=['y', 'yt'], outputs=['out_y'])
else_mul_node = onnx.helper.make_node('Mul',
inputs=['y', 'yt'],
outputs=['out_y'])
then_const_node = onnx.helper.make_node(
'Constant',
......@@ -2152,12 +2187,11 @@ def if_pl_test():
onnx.TensorProto.BOOL, [])
ret = onnx.helper.make_tensor_value_info('ret', TensorProto.FLOAT, [])
node = onnx.helper.make_node(
'If',
inputs=['cond'],
outputs=['ret'],
then_branch=then_body,
else_branch=else_body)
node = onnx.helper.make_node('If',
inputs=['cond'],
outputs=['ret'],
then_branch=then_body,
else_branch=else_body)
return ([node], [cond_input, x, y], [ret], [xt_tensor, yt_tensor])
......@@ -2167,30 +2201,32 @@ def if_then_test():
x = onnx.helper.make_tensor_value_info('x', onnx.TensorProto.FLOAT, [2, 3])
y = onnx.helper.make_tensor_value_info('y', onnx.TensorProto.FLOAT, [2, 3])
then_out = onnx.helper.make_tensor_value_info(
'then_out', onnx.TensorProto.FLOAT, [2, 3])
else_out = onnx.helper.make_tensor_value_info(
'else_out', onnx.TensorProto.FLOAT, [2, 3])
then_out = onnx.helper.make_tensor_value_info('then_out',
onnx.TensorProto.FLOAT,
[2, 3])
else_out = onnx.helper.make_tensor_value_info('else_out',
onnx.TensorProto.FLOAT,
[2, 3])
xt = np.ones((2, 3)).astype(np.float)
xt_tensor = helper.make_tensor(
name='xt',
data_type=TensorProto.FLOAT,
dims=xt.shape,
vals=xt.flatten().astype(np.float32))
xt_tensor = helper.make_tensor(name='xt',
data_type=TensorProto.FLOAT,
dims=xt.shape,
vals=xt.flatten().astype(np.float32))
yt = np.random.randn(2, 3).astype(np.float)
yt_tensor = helper.make_tensor(
name='yt',
data_type=TensorProto.FLOAT,
dims=yt.shape,
vals=yt.flatten().astype(np.float32))
yt_tensor = helper.make_tensor(name='yt',
data_type=TensorProto.FLOAT,
dims=yt.shape,
vals=yt.flatten().astype(np.float32))
then_add_node = onnx.helper.make_node(
'Add', inputs=['x', 'xt'], outputs=['then_out'])
then_add_node = onnx.helper.make_node('Add',
inputs=['x', 'xt'],
outputs=['then_out'])
else_mul_node = onnx.helper.make_node(
'Mul', inputs=['y', 'yt'], outputs=['else_out'])
else_mul_node = onnx.helper.make_node('Mul',
inputs=['y', 'yt'],
outputs=['else_out'])
then_body = onnx.helper.make_graph([then_add_node], 'then_body', [],
[then_out])
......@@ -2199,19 +2235,17 @@ def if_then_test():
[else_out])
cond = np.array([1]).astype(np.bool)
cond_tensor = helper.make_tensor(
name="cond",
data_type=TensorProto.BOOL,
dims=cond.shape,
vals=cond.astype(bool))
cond_tensor = helper.make_tensor(name="cond",
data_type=TensorProto.BOOL,
dims=cond.shape,
vals=cond.astype(bool))
res = onnx.helper.make_tensor_value_info('res', TensorProto.FLOAT, [])
node = onnx.helper.make_node(
'If',
inputs=['cond'],
outputs=['res'],
then_branch=then_body,
else_branch=else_body)
node = onnx.helper.make_node('If',
inputs=['cond'],
outputs=['res'],
then_branch=then_body,
else_branch=else_body)
return ([node], [x, y], [res], [cond_tensor, xt_tensor, yt_tensor])
......@@ -2223,45 +2257,50 @@ def if_tuple_test():
cond_input = onnx.helper.make_tensor_value_info('cond',
onnx.TensorProto.BOOL, [])
then_out0 = onnx.helper.make_tensor_value_info(
'then_out0', onnx.TensorProto.FLOAT, [1, 4])
then_out1 = onnx.helper.make_tensor_value_info(
'then_out1', onnx.TensorProto.FLOAT, [3, 4])
else_out0 = onnx.helper.make_tensor_value_info(
'else_out0', onnx.TensorProto.FLOAT, [1, 4])
else_out1 = onnx.helper.make_tensor_value_info(
'else_out1', onnx.TensorProto.FLOAT, [3, 4])
then_out0 = onnx.helper.make_tensor_value_info('then_out0',
onnx.TensorProto.FLOAT,
[1, 4])
then_out1 = onnx.helper.make_tensor_value_info('then_out1',
onnx.TensorProto.FLOAT,
[3, 4])
else_out0 = onnx.helper.make_tensor_value_info('else_out0',
onnx.TensorProto.FLOAT,
[1, 4])
else_out1 = onnx.helper.make_tensor_value_info('else_out1',
onnx.TensorProto.FLOAT,
[3, 4])
one = np.ones([1]).astype(np.float)
one_tensor = helper.make_tensor(
name='one',
data_type=TensorProto.FLOAT,
dims=one.shape,
vals=one.flatten().astype(np.float32))
one_tensor = helper.make_tensor(name='one',
data_type=TensorProto.FLOAT,
dims=one.shape,
vals=one.flatten().astype(np.float32))
two = np.array([2]).astype(np.float)
two_tensor = helper.make_tensor(
name='two',
data_type=TensorProto.FLOAT,
dims=two.shape,
vals=two.flatten().astype(np.float32))
two_tensor = helper.make_tensor(name='two',
data_type=TensorProto.FLOAT,
dims=two.shape,
vals=two.flatten().astype(np.float32))
three = np.array([3]).astype(np.float)
three_tensor = helper.make_tensor(
name='three',
data_type=TensorProto.FLOAT,
dims=three.shape,
vals=three.flatten().astype(np.float32))
then_add_node = onnx.helper.make_node(
'Add', inputs=['x', 'one'], outputs=['then_out0'])
then_mul_node = onnx.helper.make_node(
'Mul', inputs=['y', 'two'], outputs=['then_out1'])
else_mul_node = onnx.helper.make_node(
'Mul', inputs=['x', 'three'], outputs=['else_out0'])
else_add_node = onnx.helper.make_node(
'Add', inputs=['y', 'three'], outputs=['else_out1'])
three_tensor = helper.make_tensor(name='three',
data_type=TensorProto.FLOAT,
dims=three.shape,
vals=three.flatten().astype(np.float32))
then_add_node = onnx.helper.make_node('Add',
inputs=['x', 'one'],
outputs=['then_out0'])
then_mul_node = onnx.helper.make_node('Mul',
inputs=['y', 'two'],
outputs=['then_out1'])
else_mul_node = onnx.helper.make_node('Mul',
inputs=['x', 'three'],
outputs=['else_out0'])
else_add_node = onnx.helper.make_node('Add',
inputs=['y', 'three'],
outputs=['else_out1'])
then_body = onnx.helper.make_graph([then_add_node, then_mul_node],
'then_body', [], [then_out0, then_out1])
......@@ -2272,15 +2311,14 @@ def if_tuple_test():
res0 = onnx.helper.make_tensor_value_info('res0', TensorProto.FLOAT, [])
res1 = onnx.helper.make_tensor_value_info('res1', TensorProto.FLOAT, [])
node = onnx.helper.make_node(
'If',
inputs=['cond'],
outputs=['res0', 'res1'],
then_branch=then_body,
else_branch=else_body)
node = onnx.helper.make_node('If',
inputs=['cond'],
outputs=['res0', 'res1'],
then_branch=then_body,
else_branch=else_body)
return ([node], [cond_input, x, y], [res0, res1],
[one_tensor, two_tensor, three_tensor])
return ([node], [cond_input, x,
y], [res0, res1], [one_tensor, two_tensor, three_tensor])
@onnx_test
......@@ -2288,12 +2326,11 @@ def imagescaler_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 16, 16])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 16, 16])
node = onnx.helper.make_node(
'ImageScaler',
inputs=['0'],
outputs=['1'],
bias=[0.01, 0.02, 0.03],
scale=0.5)
node = onnx.helper.make_node('ImageScaler',
inputs=['0'],
outputs=['1'],
bias=[0.01, 0.02, 0.03],
scale=0.5)
return ([node], [x], [y])
......@@ -2303,12 +2340,11 @@ def imagescaler_half_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT16, [1, 3, 16, 16])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT16, [1, 3, 16, 16])
node = onnx.helper.make_node(
'ImageScaler',
inputs=['0'],
outputs=['1'],
bias=[0.01, 0.02, 0.03],
scale=0.5)
node = onnx.helper.make_node('ImageScaler',
inputs=['0'],
outputs=['1'],
bias=[0.01, 0.02, 0.03],
scale=0.5)
return ([node], [x], [y])
......@@ -2363,11 +2399,10 @@ def implicit_sub_bcast_test():
@onnx_test
def initializer_not_an_input():
values = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
w = helper.make_tensor(
name='w',
data_type=TensorProto.FLOAT,
dims=values.shape,
vals=values.flatten().astype(np.float))
w = helper.make_tensor(name='w',
data_type=TensorProto.FLOAT,
dims=values.shape,
vals=values.flatten().astype(np.float))
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [5, 2])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [5, 4])
......@@ -2388,34 +2423,32 @@ def instance_norm_test():
bias = helper.make_tensor_value_info('2', TensorProto.FLOAT, [2])
y = helper.make_tensor_value_info('3', TensorProto.FLOAT, [1, 2, 3, 3])
node = onnx.helper.make_node(
'InstanceNormalization', inputs=['0', '1', '2'], outputs=['3'])
node = onnx.helper.make_node('InstanceNormalization',
inputs=['0', '1', '2'],
outputs=['3'])
return ([node], [x, scale, bias], [y])
@onnx_test
def instance_norm_val_test():
x = np.array([[[[0, 1, 2], [3, 4, 5], [6, 7, 8]], [[0, 1, 2], [3, 4, 5],
[6, 7, 8]]]])
x = np.array([[[[0, 1, 2], [3, 4, 5], [6, 7, 8]],
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]]])
scale = np.array([1, 2])
bias = np.array([0, 1])
x_tensor = helper.make_tensor(
name='x_tensor',
data_type=TensorProto.FLOAT,
dims=x.shape,
vals=x.flatten().astype(np.float))
scale_tensor = helper.make_tensor(
name='scale_tensor',
data_type=TensorProto.FLOAT,
dims=scale.shape,
vals=scale.flatten().astype(np.float))
bias_tensor = helper.make_tensor(
name='bias_tensor',
data_type=TensorProto.FLOAT,
dims=bias.shape,
vals=bias.flatten().astype(np.float))
x_tensor = helper.make_tensor(name='x_tensor',
data_type=TensorProto.FLOAT,
dims=x.shape,
vals=x.flatten().astype(np.float))
scale_tensor = helper.make_tensor(name='scale_tensor',
data_type=TensorProto.FLOAT,
dims=scale.shape,
vals=scale.flatten().astype(np.float))
bias_tensor = helper.make_tensor(name='bias_tensor',
data_type=TensorProto.FLOAT,
dims=bias.shape,
vals=bias.flatten().astype(np.float))
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 3, 3])
......@@ -2429,26 +2462,23 @@ def instance_norm_val_test():
@onnx_test
def instance_norm_val_3d_test():
x = np.array([[[[[0, 1], [2, 3]], [[4, 5], [6, 7]]], [[[0, 1], [2, 3]],
[[4, 5], [6, 7]]]]])
x = np.array([[[[[0, 1], [2, 3]], [[4, 5], [6, 7]]],
[[[0, 1], [2, 3]], [[4, 5], [6, 7]]]]])
scale = np.array([1, 2])
bias = np.array([0, 1])
x_tensor = helper.make_tensor(
name='x_tensor',
data_type=TensorProto.FLOAT,
dims=x.shape,
vals=x.flatten().astype(np.float))
scale_tensor = helper.make_tensor(
name='scale_tensor',
data_type=TensorProto.FLOAT,
dims=scale.shape,
vals=scale.flatten().astype(np.float))
bias_tensor = helper.make_tensor(
name='bias_tensor',
data_type=TensorProto.FLOAT,
dims=bias.shape,
vals=bias.flatten().astype(np.float))
x_tensor = helper.make_tensor(name='x_tensor',
data_type=TensorProto.FLOAT,
dims=x.shape,
vals=x.flatten().astype(np.float))
scale_tensor = helper.make_tensor(name='scale_tensor',
data_type=TensorProto.FLOAT,
dims=scale.shape,
vals=scale.flatten().astype(np.float))
bias_tensor = helper.make_tensor(name='bias_tensor',
data_type=TensorProto.FLOAT,
dims=bias.shape,
vals=bias.flatten().astype(np.float))
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 2, 2, 2, 2])
......@@ -2496,47 +2526,57 @@ def layernorm_test():
pow_2 = np.array([[[2, 2, 2, 2, 2]]])
epsilon = np.array([1e-12])
pow_tensor = helper.make_tensor(
name='pow',
data_type=TensorProto.FLOAT,
dims=pow_2.shape,
vals=pow_2.flatten().astype(np.float))
pow_tensor = helper.make_tensor(name='pow',
data_type=TensorProto.FLOAT,
dims=pow_2.shape,
vals=pow_2.flatten().astype(np.float))
epsilon_tensor = helper.make_tensor(
name='epsilon',
data_type=TensorProto.FLOAT,
dims=epsilon.shape,
vals=epsilon.flatten().astype(np.float))
epsilon_tensor = helper.make_tensor(name='epsilon',
data_type=TensorProto.FLOAT,
dims=epsilon.shape,
vals=epsilon.flatten().astype(
np.float))
mean = onnx.helper.make_node(
'ReduceMean', inputs=['0'], outputs=['mean_out'], axes=axes)
mean = onnx.helper.make_node('ReduceMean',
inputs=['0'],
outputs=['mean_out'],
axes=axes)
sub_mean = onnx.helper.make_node(
'Sub', inputs=['0', 'mean_out'], outputs=['sub_out'])
sub_mean = onnx.helper.make_node('Sub',
inputs=['0', 'mean_out'],
outputs=['sub_out'])
sub_pow = onnx.helper.make_node(
'Pow', inputs=['sub_out', 'pow'], outputs=['pow_out'])
sub_pow = onnx.helper.make_node('Pow',
inputs=['sub_out', 'pow'],
outputs=['pow_out'])
var = onnx.helper.make_node(
'ReduceMean', inputs=['pow_out'], outputs=['var_out'], axes=axes)
var = onnx.helper.make_node('ReduceMean',
inputs=['pow_out'],
outputs=['var_out'],
axes=axes)
add = onnx.helper.make_node(
'Add', inputs=['var_out', 'epsilon'], outputs=['add_out'])
add = onnx.helper.make_node('Add',
inputs=['var_out', 'epsilon'],
outputs=['add_out'])
sqrt = onnx.helper.make_node(
'Sqrt', inputs=['add_out'], outputs=['sqrt_out'])
sqrt = onnx.helper.make_node('Sqrt',
inputs=['add_out'],
outputs=['sqrt_out'])
div = onnx.helper.make_node(
'Div', inputs=['sub_out', 'sqrt_out'], outputs=['div_out'])
div = onnx.helper.make_node('Div',
inputs=['sub_out', 'sqrt_out'],
outputs=['div_out'])
mul = onnx.helper.make_node(
'Mul', inputs=['scale', 'div_out'], outputs=['mul_out'])
mul = onnx.helper.make_node('Mul',
inputs=['scale', 'div_out'],
outputs=['mul_out'])
bias_add = onnx.helper.make_node(
'Add', inputs=['mul_out', 'bias'], outputs=['1'])
bias_add = onnx.helper.make_node('Add',
inputs=['mul_out', 'bias'],
outputs=['1'])
return ([mean, sub_mean, sub_pow, var, add, sqrt, div, mul, bias_add],
[x, scale, bias], [y], [pow_tensor, epsilon_tensor])
return ([mean, sub_mean, sub_pow, var, add, sqrt, div, mul,
bias_add], [x, scale, bias], [y], [pow_tensor, epsilon_tensor])
@onnx_test
......@@ -2554,14 +2594,16 @@ def layernorm_op_test():
return ([node], [x, w, b], [output])
@onnx_test
def leaky_relu_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
node = onnx.helper.make_node(
'LeakyRelu', inputs=['0'], outputs=['1'], alpha=0.01)
node = onnx.helper.make_node('LeakyRelu',
inputs=['0'],
outputs=['1'],
alpha=0.01)
return ([node], [x], [y])
......@@ -2569,11 +2611,10 @@ def leaky_relu_test():
@onnx_test
def less_test():
ax1 = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
x1 = helper.make_tensor(
"x1",
data_type=TensorProto.FLOAT,
dims=(2, 3),
vals=ax1.astype(np.float32))
x1 = helper.make_tensor("x1",
data_type=TensorProto.FLOAT,
dims=(2, 3),
vals=ax1.astype(np.float32))
x2 = helper.make_tensor_value_info('x2', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
......@@ -2673,8 +2714,10 @@ def logsoftmax_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 5, 6])
node = onnx.helper.make_node(
'LogSoftmax', inputs=['x'], outputs=['y'], axis=1)
node = onnx.helper.make_node('LogSoftmax',
inputs=['x'],
outputs=['y'],
axis=1)
return ([node], [x], [y])
......@@ -2684,16 +2727,17 @@ def logsoftmax_nonstd_input_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [6, 9])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3, 4])
node0 = onnx.helper.make_node(
'Slice',
inputs=['0'],
axes=[0, 1],
starts=[1, 0],
ends=[4, 4],
outputs=['1'])
node0 = onnx.helper.make_node('Slice',
inputs=['0'],
axes=[0, 1],
starts=[1, 0],
ends=[4, 4],
outputs=['1'])
node1 = onnx.helper.make_node(
'LogSoftmax', inputs=['1'], outputs=['2'], axis=-1)
node1 = onnx.helper.make_node('LogSoftmax',
inputs=['1'],
outputs=['2'],
axis=-1)
return ([node0, node1], [x], [y])
......@@ -2781,8 +2825,10 @@ def lpnormalization_axis_error_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node(
'LpNormalization', inputs=['x'], outputs=['y'], axis=2)
node = onnx.helper.make_node('LpNormalization',
inputs=['x'],
outputs=['y'],
axis=2)
return ([node], [x], [y])
......@@ -2819,8 +2865,10 @@ def lpnormalization_l2_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4])
node = onnx.helper.make_node(
'LpNormalization', inputs=['x'], outputs=['y'], p=2)
node = onnx.helper.make_node('LpNormalization',
inputs=['x'],
outputs=['y'],
p=2)
return ([node], [x], [y])
......@@ -2829,8 +2877,10 @@ def lpnormalization_p_error_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node(
'LpNormalization', inputs=['x'], outputs=['y'], p=3)
node = onnx.helper.make_node('LpNormalization',
inputs=['x'],
outputs=['y'],
p=3)
return ([node], [x], [y])
......@@ -2839,14 +2889,13 @@ def lrn_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 28, 24, 24])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 28, 24, 24])
node = onnx.helper.make_node(
'LRN',
inputs=['0'],
size=5,
alpha=0.0001,
beta=0.75,
bias=1.0,
outputs=['1'])
node = onnx.helper.make_node('LRN',
inputs=['0'],
size=5,
alpha=0.0001,
beta=0.75,
bias=1.0,
outputs=['1'])
return ([node], [x], [y])
......@@ -2977,14 +3026,13 @@ def maxpool_notset_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 1, 1])
node = onnx.helper.make_node(
'MaxPool',
inputs=['x'],
outputs=['y'],
kernel_shape=[6, 6],
strides=[2, 2],
pads=[0, 0, 1, 1],
auto_pad='NOTSET')
node = onnx.helper.make_node('MaxPool',
inputs=['x'],
outputs=['y'],
kernel_shape=[6, 6],
strides=[2, 2],
pads=[0, 0, 1, 1],
auto_pad='NOTSET')
return ([node], [x], [y])
......@@ -2994,12 +3042,11 @@ def maxpool_same_upper_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 5, 5])
node = onnx.helper.make_node(
'MaxPool',
inputs=['x'],
outputs=['y'],
kernel_shape=[2, 2],
auto_pad='SAME_UPPER')
node = onnx.helper.make_node('MaxPool',
inputs=['x'],
outputs=['y'],
kernel_shape=[2, 2],
auto_pad='SAME_UPPER')
return ([node], [x], [y])
......@@ -3016,8 +3063,9 @@ def mean_broadcast_test():
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT,
[1, 2, 3, 4])
node = onnx.helper.make_node(
"Mean", inputs=["0", "1", "2", "3", "4"], outputs=["mean"])
node = onnx.helper.make_node("Mean",
inputs=["0", "1", "2", "3", "4"],
outputs=["mean"])
return ([node], [data_0, data_1, data_2, data_3, data_4], [mean])
......@@ -3031,8 +3079,9 @@ def mean_fp16_test():
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT16,
[1, 2, 3])
node = onnx.helper.make_node(
"Mean", inputs=["0", "1", "2"], outputs=["mean"])
node = onnx.helper.make_node("Mean",
inputs=["0", "1", "2"],
outputs=["mean"])
return ([node], [data_0, data_1, data_2], [mean])
......@@ -3045,8 +3094,9 @@ def mean_invalid_broadcast_test():
mean = helper.make_tensor_value_info('mean', TensorProto.FLOAT, [1, 2, 3])
node = onnx.helper.make_node(
"Mean", inputs=["0", "1", "2"], outputs=["mean"])
node = onnx.helper.make_node("Mean",
inputs=["0", "1", "2"],
outputs=["mean"])
return ([node], [data_0, data_1, data_2], [mean])
......@@ -3099,12 +3149,11 @@ def multinomial_test():
output = helper.make_tensor_value_info("output", TensorProto.INT32,
[1, 10])
node = onnx.helper.make_node(
'Multinomial',
inputs=['input'],
sample_size=sample_size,
seed=seed,
outputs=['output'])
node = onnx.helper.make_node('Multinomial',
inputs=['input'],
sample_size=sample_size,
seed=seed,
outputs=['output'])
return ([node], [input], [output])
......@@ -3116,11 +3165,10 @@ def multinomial_generated_seed_test():
output = helper.make_tensor_value_info("output", TensorProto.INT32,
[1, 10])
node = onnx.helper.make_node(
'Multinomial',
inputs=['input'],
sample_size=sample_size,
outputs=['output'])
node = onnx.helper.make_node('Multinomial',
inputs=['input'],
sample_size=sample_size,
outputs=['output'])
return ([node], [input], [output])
......@@ -3133,12 +3181,11 @@ def multinomial_dtype_error_test():
output = helper.make_tensor_value_info("output", TensorProto.INT64,
[1, 10])
node = onnx.helper.make_node(
'Multinomial',
inputs=['input'],
sample_size=sample_size,
dtype=dtype,
outputs=['output'])
node = onnx.helper.make_node('Multinomial',
inputs=['input'],
sample_size=sample_size,
dtype=dtype,
outputs=['output'])
return ([node], [input], [output])
......@@ -3152,13 +3199,12 @@ def multinomial_int64_test():
output = helper.make_tensor_value_info("output", TensorProto.INT64,
[1, 10])
node = onnx.helper.make_node(
'Multinomial',
inputs=['input'],
sample_size=sample_size,
dtype=dtype,
seed=seed,
outputs=['output'])
node = onnx.helper.make_node('Multinomial',
inputs=['input'],
sample_size=sample_size,
dtype=dtype,
seed=seed,
outputs=['output'])
return ([node], [input], [output])
......@@ -3186,14 +3232,14 @@ def nms_test():
out = helper.make_tensor_value_info('selected_indices', TensorProto.INT64,
[6, 3])
node = onnx.helper.make_node(
'NonMaxSuppression',
inputs=[
'boxes', 'scores', 'max_output_boxes_per_class', 'iou_threshold',
'score_threshold'
],
outputs=['selected_indices'],
center_point_box=1)
node = onnx.helper.make_node('NonMaxSuppression',
inputs=[
'boxes', 'scores',
'max_output_boxes_per_class',
'iou_threshold', 'score_threshold'
],
outputs=['selected_indices'],
center_point_box=1)
return ([node], [b, s, mo, iou, st], [out])
......@@ -3223,8 +3269,10 @@ def no_pad_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 2])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [2, 2])
node = onnx.helper.make_node(
'Pad', inputs=['0'], pads=[0, 0, 0, 0], outputs=['1'])
node = onnx.helper.make_node('Pad',
inputs=['0'],
pads=[0, 0, 0, 0],
outputs=['1'])
return ([node], [x], [y])
......@@ -3234,8 +3282,9 @@ def nonzero_dynamic_test():
x = helper.make_tensor_value_info('data', TensorProto.BOOL, [2, 2])
y = helper.make_tensor_value_info('indices', TensorProto.INT64, [2, 3])
node = onnx.helper.make_node(
'NonZero', inputs=['data'], outputs=['indices'])
node = onnx.helper.make_node('NonZero',
inputs=['data'],
outputs=['indices'])
return ([node], [x], [y])
......@@ -3243,15 +3292,15 @@ def nonzero_dynamic_test():
@onnx_test
def nonzero_test():
data1 = np.array([[1., 0.], [1., 1.]])
data = helper.make_tensor(
name='data',
data_type=TensorProto.FLOAT,
dims=data1.shape,
vals=data1.flatten().astype(np.float))
data = helper.make_tensor(name='data',
data_type=TensorProto.FLOAT,
dims=data1.shape,
vals=data1.flatten().astype(np.float))
y = helper.make_tensor_value_info('indices', TensorProto.INT64, [2, 3])
node = onnx.helper.make_node(
'NonZero', inputs=['data'], outputs=['indices'])
node = onnx.helper.make_node('NonZero',
inputs=['data'],
outputs=['indices'])
return ([node], [], [y], [data])
......@@ -3259,15 +3308,15 @@ def nonzero_test():
@onnx_test
def nonzero_int_test():
data1 = np.array([[1, 1, 0], [1, 0, 1]])
data = helper.make_tensor(
name='data',
data_type=TensorProto.INT16,
dims=data1.shape,
vals=data1.flatten().astype(np.int16))
data = helper.make_tensor(name='data',
data_type=TensorProto.INT16,
dims=data1.shape,
vals=data1.flatten().astype(np.int16))
y = helper.make_tensor_value_info('indices', TensorProto.INT64, [2, 4])
node = onnx.helper.make_node(
'NonZero', inputs=['data'], outputs=['indices'])
node = onnx.helper.make_node('NonZero',
inputs=['data'],
outputs=['indices'])
return ([node], [], [y], [data])
......@@ -3281,17 +3330,15 @@ def onehot_test():
values = helper.make_tensor_value_info("values", TensorProto.FLOAT16, [2])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT16, [3, 5, 2])
depth_tensor = helper.make_tensor(
name="depth",
data_type=TensorProto.INT32,
dims=None,
vals=depth.astype(int))
depth_tensor = helper.make_tensor(name="depth",
data_type=TensorProto.INT32,
dims=None,
vals=depth.astype(int))
node = onnx.helper.make_node(
'OneHot',
inputs=['indices', 'depth', 'values'],
outputs=['y'],
axis=axis_value)
node = onnx.helper.make_node('OneHot',
inputs=['indices', 'depth', 'values'],
outputs=['y'],
axis=axis_value)
return ([node], [indices, values], [y], [depth_tensor])
......@@ -3301,8 +3348,10 @@ def pad_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 2])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 4])
node = onnx.helper.make_node(
'Pad', inputs=['0'], pads=[1, 1, 1, 1], outputs=['1'])
node = onnx.helper.make_node('Pad',
inputs=['0'],
pads=[1, 1, 1, 1],
outputs=['1'])
return ([node], [x], [y])
......@@ -3310,28 +3359,31 @@ def pad_test():
@onnx_test
def pad_3arg_test():
values = np.array([1])
val_tensor = helper.make_tensor(
name='val',
data_type=TensorProto.FLOAT,
dims=values.reshape(()).shape,
vals=values.astype(float))
arg_val = onnx.helper.make_node(
'Constant', inputs=[], outputs=['arg_val'], value=val_tensor)
val_tensor = helper.make_tensor(name='val',
data_type=TensorProto.FLOAT,
dims=values.reshape(()).shape,
vals=values.astype(float))
arg_val = onnx.helper.make_node('Constant',
inputs=[],
outputs=['arg_val'],
value=val_tensor)
sizes = np.array([1, 1, 2, 2])
pad_tensor = helper.make_tensor(
name='pad_size',
data_type=TensorProto.INT32,
dims=sizes.shape,
vals=sizes.astype(int))
arg_pad = onnx.helper.make_node(
'Constant', inputs=[], outputs=['arg_pad'], value=pad_tensor)
pad_tensor = helper.make_tensor(name='pad_size',
data_type=TensorProto.INT32,
dims=sizes.shape,
vals=sizes.astype(int))
arg_pad = onnx.helper.make_node('Constant',
inputs=[],
outputs=['arg_pad'],
value=pad_tensor)
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 2])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [5, 5])
node = onnx.helper.make_node(
'Pad', inputs=['0', 'arg_pad', 'arg_val'], outputs=['1'])
node = onnx.helper.make_node('Pad',
inputs=['0', 'arg_pad', 'arg_val'],
outputs=['1'])
return ([arg_val, arg_pad, node], [x], [y])
......@@ -3342,16 +3394,19 @@ def pad_reflect_test():
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [2, 5])
sizes = np.array([0, 2, 0, 1])
pad_tensor = helper.make_tensor(
name='pad_size',
data_type=TensorProto.INT32,
dims=sizes.shape,
vals=sizes.astype(int))
arg_pad = onnx.helper.make_node(
'Constant', inputs=[], outputs=['arg_pad'], value=pad_tensor)
node = onnx.helper.make_node(
'Pad', mode='reflect', inputs=['0', 'arg_pad'], outputs=['1'])
pad_tensor = helper.make_tensor(name='pad_size',
data_type=TensorProto.INT32,
dims=sizes.shape,
vals=sizes.astype(int))
arg_pad = onnx.helper.make_node('Constant',
inputs=[],
outputs=['arg_pad'],
value=pad_tensor)
node = onnx.helper.make_node('Pad',
mode='reflect',
inputs=['0', 'arg_pad'],
outputs=['1'])
return ([arg_pad, node], [x], [y])
......@@ -3362,16 +3417,19 @@ def pad_reflect_multiaxis_test():
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 5])
sizes = np.array([0, 2, 2, 0])
pad_tensor = helper.make_tensor(
name='pad_size',
data_type=TensorProto.INT32,
dims=sizes.shape,
vals=sizes.astype(int))
arg_pad = onnx.helper.make_node(
'Constant', inputs=[], outputs=['arg_pad'], value=pad_tensor)
node = onnx.helper.make_node(
'Pad', mode='reflect', inputs=['0', 'arg_pad'], outputs=['1'])
pad_tensor = helper.make_tensor(name='pad_size',
data_type=TensorProto.INT32,
dims=sizes.shape,
vals=sizes.astype(int))
arg_pad = onnx.helper.make_node('Constant',
inputs=[],
outputs=['arg_pad'],
value=pad_tensor)
node = onnx.helper.make_node('Pad',
mode='reflect',
inputs=['0', 'arg_pad'],
outputs=['1'])
return ([arg_pad, node], [x], [y])
......@@ -3429,13 +3487,15 @@ def prefix_scan_sum_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 2, 2])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 2, 2])
axis_val = np.array([0])
axis_tensor = helper.make_tensor(
name="axis",
data_type=TensorProto.INT32,
dims=axis_val.shape,
vals=axis_val.astype(int))
node = onnx.helper.make_node(
'CumSum', inputs=['x', 'axis'], outputs=['y'], exclusive=1, reverse=1)
axis_tensor = helper.make_tensor(name="axis",
data_type=TensorProto.INT32,
dims=axis_val.shape,
vals=axis_val.astype(int))
node = onnx.helper.make_node('CumSum',
inputs=['x', 'axis'],
outputs=['y'],
exclusive=1,
reverse=1)
return ([node], [x], [y], [axis_tensor])
......@@ -3508,8 +3568,10 @@ def make_quantizelinear_axis_graph(axis):
arg_out = helper.make_tensor_value_info('out', TensorProto.INT8,
[1, 1, 5, 1])
node = onnx.helper.make_node(
'QuantizeLinear', inputs=['0', '1', '2'], outputs=['out'], axis=axis)
node = onnx.helper.make_node('QuantizeLinear',
inputs=['0', '1', '2'],
outputs=['out'],
axis=axis)
return ([node], [arg0, arg1, arg2], [arg_out])
......@@ -3534,15 +3596,14 @@ def randomnormal_test():
output = helper.make_tensor_value_info('output', TensorProto.DOUBLE,
[2, 3, 4])
node = onnx.helper.make_node(
'RandomNormal',
inputs=[],
outputs=['output'],
dtype=dtype,
mean=mean,
scale=scale,
seed=seed,
shape=shape)
node = onnx.helper.make_node('RandomNormal',
inputs=[],
outputs=['output'],
dtype=dtype,
mean=mean,
scale=scale,
seed=seed,
shape=shape)
return ([node], [], [output])
......@@ -3554,12 +3615,11 @@ def randomnormal_dtype_error_test():
output = helper.make_tensor_value_info('output', TensorProto.INT32,
[2, 3, 4])
node = onnx.helper.make_node(
'RandomNormal',
inputs=[],
outputs=['output'],
dtype=dtype,
shape=shape)
node = onnx.helper.make_node('RandomNormal',
inputs=[],
outputs=['output'],
dtype=dtype,
shape=shape)
return ([node], [], [output])
......@@ -3571,11 +3631,10 @@ def randomnormal_generated_seed_test():
output = helper.make_tensor_value_info("output", TensorProto.INT32,
[1, 10])
node = onnx.helper.make_node(
'RandomNormal',
inputs=['input'],
sample_size=sample_size,
outputs=['output'])
node = onnx.helper.make_node('RandomNormal',
inputs=['input'],
sample_size=sample_size,
outputs=['output'])
return ([node], [input], [output])
......@@ -3586,8 +3645,10 @@ def randomnormal_shape_error_test():
output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
[2, 3, 4])
node = onnx.helper.make_node(
'RandomNormal', inputs=[], outputs=['output'], dtype=dtype)
node = onnx.helper.make_node('RandomNormal',
inputs=[],
outputs=['output'],
dtype=dtype)
return ([node], [], [output])
......@@ -3603,14 +3664,13 @@ def randomnormallike_test():
output = helper.make_tensor_value_info('output', TensorProto.FLOAT16,
[2, 3, 4])
node = onnx.helper.make_node(
'RandomNormalLike',
inputs=['input'],
outputs=['output'],
dtype=dtype,
mean=mean,
scale=scale,
seed=seed)
node = onnx.helper.make_node('RandomNormalLike',
inputs=['input'],
outputs=['output'],
dtype=dtype,
mean=mean,
scale=scale,
seed=seed)
return ([node], [input], [output])
......@@ -3623,8 +3683,10 @@ def randomnormallike_type_error_test():
output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
[2, 3, 4])
node = onnx.helper.make_node(
'RandomNormalLike', inputs=['input'], outputs=['output'], seed=seed)
node = onnx.helper.make_node('RandomNormalLike',
inputs=['input'],
outputs=['output'],
seed=seed)
return ([node], [input], [output])
......@@ -3639,15 +3701,14 @@ def randomuniform_test():
output = helper.make_tensor_value_info('output', TensorProto.DOUBLE,
[2, 3, 4])
node = onnx.helper.make_node(
'RandomUniform',
inputs=[],
outputs=['output'],
dtype=dtype,
high=high,
low=low,
seed=seed,
shape=shape)
node = onnx.helper.make_node('RandomUniform',
inputs=[],
outputs=['output'],
dtype=dtype,
high=high,
low=low,
seed=seed,
shape=shape)
return ([node], [], [output])
......@@ -3659,12 +3720,11 @@ def randomuniform_dtype_error_test():
output = helper.make_tensor_value_info('output', TensorProto.INT32,
[2, 3, 4])
node = onnx.helper.make_node(
'RandomUniform',
inputs=[],
outputs=['output'],
dtype=dtype,
shape=shape)
node = onnx.helper.make_node('RandomUniform',
inputs=[],
outputs=['output'],
dtype=dtype,
shape=shape)
return ([node], [], [output])
......@@ -3676,11 +3736,10 @@ def randomuniform_generated_seed_test():
output = helper.make_tensor_value_info("output", TensorProto.INT32,
[1, 10])
node = onnx.helper.make_node(
'RandomUniform',
inputs=['input'],
sample_size=sample_size,
outputs=['output'])
node = onnx.helper.make_node('RandomUniform',
inputs=['input'],
sample_size=sample_size,
outputs=['output'])
return ([node], [input], [output])
......@@ -3691,8 +3750,10 @@ def randomuniform_shape_error_test():
output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
[2, 3, 4])
node = onnx.helper.make_node(
'RandomUniform', inputs=[], outputs=['output'], dtype=dtype)
node = onnx.helper.make_node('RandomUniform',
inputs=[],
outputs=['output'],
dtype=dtype)
return ([node], [], [output])
......@@ -3708,14 +3769,13 @@ def randomuniformlike_test():
output = helper.make_tensor_value_info('output', TensorProto.FLOAT16,
[2, 3, 4])
node = onnx.helper.make_node(
'RandomUniformLike',
inputs=['input'],
outputs=['output'],
dtype=dtype,
high=high,
low=low,
seed=seed)
node = onnx.helper.make_node('RandomUniformLike',
inputs=['input'],
outputs=['output'],
dtype=dtype,
high=high,
low=low,
seed=seed)
return ([node], [input], [output])
......@@ -3728,8 +3788,10 @@ def randomuniformlike_type_error_test():
output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
[2, 3, 4])
node = onnx.helper.make_node(
'RandomUniformLike', inputs=['input'], outputs=['output'], seed=seed)
node = onnx.helper.make_node('RandomUniformLike',
inputs=['input'],
outputs=['output'],
seed=seed)
return ([node], [input], [output])
......@@ -3741,32 +3803,36 @@ def range_test():
limit_val = np.array([6])
delta_val = np.array([-3])
start_tensor = helper.make_tensor(
name='start_val',
data_type=TensorProto.INT64,
dims=start_val.reshape(()).shape,
vals=start_val.astype(np.int64))
start = onnx.helper.make_node(
'Constant', inputs=[], outputs=['start'], value=start_tensor)
limit_tensor = helper.make_tensor(
name='limit_val',
data_type=TensorProto.INT64,
dims=limit_val.reshape(()).shape,
vals=limit_val.astype(np.int64))
limit = onnx.helper.make_node(
'Constant', inputs=[], outputs=['limit'], value=limit_tensor)
delta_tensor = helper.make_tensor(
name='delta_val',
data_type=TensorProto.INT64,
dims=delta_val.reshape(()).shape,
vals=delta_val.astype(np.int64))
delta = onnx.helper.make_node(
'Constant', inputs=[], outputs=['delta'], value=delta_tensor)
node = onnx.helper.make_node(
'Range', inputs=['start', 'limit', 'delta'], outputs=['1'])
start_tensor = helper.make_tensor(name='start_val',
data_type=TensorProto.INT64,
dims=start_val.reshape(()).shape,
vals=start_val.astype(np.int64))
start = onnx.helper.make_node('Constant',
inputs=[],
outputs=['start'],
value=start_tensor)
limit_tensor = helper.make_tensor(name='limit_val',
data_type=TensorProto.INT64,
dims=limit_val.reshape(()).shape,
vals=limit_val.astype(np.int64))
limit = onnx.helper.make_node('Constant',
inputs=[],
outputs=['limit'],
value=limit_tensor)
delta_tensor = helper.make_tensor(name='delta_val',
data_type=TensorProto.INT64,
dims=delta_val.reshape(()).shape,
vals=delta_val.astype(np.int64))
delta = onnx.helper.make_node('Constant',
inputs=[],
outputs=['delta'],
value=delta_tensor)
node = onnx.helper.make_node('Range',
inputs=['start', 'limit', 'delta'],
outputs=['1'])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
......@@ -3780,32 +3846,36 @@ def range_float_test():
limit_val = np.array([11])
delta_val = np.array([2])
start_tensor = helper.make_tensor(
name='start_val',
data_type=TensorProto.FLOAT,
dims=start_val.reshape(()).shape,
vals=start_val.astype(np.float))
start = onnx.helper.make_node(
'Constant', inputs=[], outputs=['start'], value=start_tensor)
limit_tensor = helper.make_tensor(
name='limit_val',
data_type=TensorProto.FLOAT,
dims=limit_val.reshape(()).shape,
vals=limit_val.astype(np.float))
limit = onnx.helper.make_node(
'Constant', inputs=[], outputs=['limit'], value=limit_tensor)
delta_tensor = helper.make_tensor(
name='delta_val',
data_type=TensorProto.FLOAT,
dims=delta_val.reshape(()).shape,
vals=delta_val.astype(np.float))
delta = onnx.helper.make_node(
'Constant', inputs=[], outputs=['delta'], value=delta_tensor)
node = onnx.helper.make_node(
'Range', inputs=['start', 'limit', 'delta'], outputs=['1'])
start_tensor = helper.make_tensor(name='start_val',
data_type=TensorProto.FLOAT,
dims=start_val.reshape(()).shape,
vals=start_val.astype(np.float))
start = onnx.helper.make_node('Constant',
inputs=[],
outputs=['start'],
value=start_tensor)
limit_tensor = helper.make_tensor(name='limit_val',
data_type=TensorProto.FLOAT,
dims=limit_val.reshape(()).shape,
vals=limit_val.astype(np.float))
limit = onnx.helper.make_node('Constant',
inputs=[],
outputs=['limit'],
value=limit_tensor)
delta_tensor = helper.make_tensor(name='delta_val',
data_type=TensorProto.FLOAT,
dims=delta_val.reshape(()).shape,
vals=delta_val.astype(np.float))
delta = onnx.helper.make_node('Constant',
inputs=[],
outputs=['delta'],
value=delta_tensor)
node = onnx.helper.make_node('Range',
inputs=['start', 'limit', 'delta'],
outputs=['1'])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
......@@ -3832,8 +3902,11 @@ def reducel1_test():
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 6])
axes = [-2]
node = onnx.helper.make_node(
'ReduceL1', inputs=['x'], outputs=['y'], axes=axes, keepdims=0)
node = onnx.helper.make_node('ReduceL1',
inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=0)
return ([node], [x], [y])
......@@ -3844,8 +3917,11 @@ def reducel2_test():
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 5])
axes = [-1]
node = onnx.helper.make_node(
'ReduceL2', inputs=['x'], outputs=['y'], axes=axes, keepdims=0)
node = onnx.helper.make_node('ReduceL2',
inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=0)
return ([node], [x], [y])
......@@ -3856,8 +3932,11 @@ def reduce_log_sum_test():
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 1, 5, 6])
axes = [-3]
node = onnx.helper.make_node(
'ReduceLogSum', inputs=['x'], outputs=['y'], axes=axes, keepdims=1)
node = onnx.helper.make_node('ReduceLogSum',
inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=1)
return ([node], [x], [y])
......@@ -3868,8 +3947,11 @@ def reduce_log_sum_exp_test():
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [4, 5, 6])
axes = [-4]
node = onnx.helper.make_node(
'ReduceLogSumExp', inputs=['x'], outputs=['y'], axes=axes, keepdims=1)
node = onnx.helper.make_node('ReduceLogSumExp',
inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=1)
return ([node], [x], [y])
......@@ -3880,8 +3962,11 @@ def reducemax_test():
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 6])
axes = [2]
node = onnx.helper.make_node(
'ReduceMax', inputs=['x'], outputs=['y'], axes=axes, keepdims=0)
node = onnx.helper.make_node('ReduceMax',
inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=0)
return ([node], [x], [y])
......@@ -3892,8 +3977,11 @@ def reducemean_test():
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4])
axes = [2, 3]
node = onnx.helper.make_node(
'ReduceMean', inputs=['x'], outputs=['y'], axes=axes, keepdims=0)
node = onnx.helper.make_node('ReduceMean',
inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=0)
return ([node], [x], [y])
......@@ -3904,8 +3992,11 @@ def reducemean_keepdims_test():
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 6])
axes = [2]
node = onnx.helper.make_node(
'ReduceMean', inputs=['x'], outputs=['y'], axes=axes, keepdims=1)
node = onnx.helper.make_node('ReduceMean',
inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=1)
return ([node], [x], [y])
......@@ -3916,8 +4007,11 @@ def reducemin_test():
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 1, 5, 1])
axes = [1, 3]
node = onnx.helper.make_node(
'ReduceMin', inputs=['x'], outputs=['y'], axes=axes, keepdims=1)
node = onnx.helper.make_node('ReduceMin',
inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=1)
return ([node], [x], [y])
......@@ -3928,8 +4022,11 @@ def reduceprod_test():
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 6])
axes = [2]
node = onnx.helper.make_node(
'ReduceProd', inputs=['x'], outputs=['y'], axes=axes, keepdims=1)
node = onnx.helper.make_node('ReduceProd',
inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=1)
return ([node], [x], [y])
......@@ -3940,8 +4037,11 @@ def reducesum_test():
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 6])
axes = [2]
node = onnx.helper.make_node(
'ReduceSum', inputs=['x'], outputs=['y'], axes=axes, keepdims=0)
node = onnx.helper.make_node('ReduceSum',
inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=0)
return ([node], [x], [y])
......@@ -3951,18 +4051,16 @@ def reducesum_empty_axes_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 6])
axes = np.array([], dtype=np.int64)
axes_tensor = helper.make_tensor(
name="axes",
data_type=TensorProto.INT64,
dims=axes.shape,
vals=axes.astype(np.int64))
axes_tensor = helper.make_tensor(name="axes",
data_type=TensorProto.INT64,
dims=axes.shape,
vals=axes.astype(np.int64))
node = onnx.helper.make_node(
'ReduceSum',
inputs=['x', 'axes'],
outputs=['y'],
keepdims=0,
noop_with_empty_axes=False)
node = onnx.helper.make_node('ReduceSum',
inputs=['x', 'axes'],
outputs=['y'],
keepdims=0,
noop_with_empty_axes=False)
return ([node], [x], [y], [axes_tensor])
......@@ -3972,18 +4070,16 @@ def reducesum_noop_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 6])
axes = np.array([], dtype=np.int64)
axes_tensor = helper.make_tensor(
name="axes",
data_type=TensorProto.INT64,
dims=axes.shape,
vals=axes.astype(np.int64))
axes_tensor = helper.make_tensor(name="axes",
data_type=TensorProto.INT64,
dims=axes.shape,
vals=axes.astype(np.int64))
node = onnx.helper.make_node(
'ReduceSum',
inputs=['x', 'axes'],
outputs=['y'],
keepdims=0,
noop_with_empty_axes=True)
node = onnx.helper.make_node('ReduceSum',
inputs=['x', 'axes'],
outputs=['y'],
keepdims=0,
noop_with_empty_axes=True)
return ([node], [x], [y], [axes_tensor])
......@@ -3994,8 +4090,11 @@ def reducesum_keepdims_test():
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 1])
axes = [2, 3]
node = onnx.helper.make_node(
'ReduceSum', inputs=['x'], outputs=['y'], axes=axes, keepdims=1)
node = onnx.helper.make_node('ReduceSum',
inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=1)
return ([node], [x], [y])
......@@ -4006,8 +4105,11 @@ def reducesum_multiaxis_test():
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 1])
axes = [2, 3]
node = onnx.helper.make_node(
'ReduceSum', inputs=['x'], outputs=['y'], axes=axes, keepdims=0)
node = onnx.helper.make_node('ReduceSum',
inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=0)
return ([node], [x], [y])
......@@ -4018,8 +4120,11 @@ def reducesum_square_test():
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 6])
axes = [-2]
node = onnx.helper.make_node(
'ReduceSumSquare', inputs=['x'], outputs=['y'], axes=axes, keepdims=0)
node = onnx.helper.make_node('ReduceSumSquare',
inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=0)
return ([node], [x], [y])
......@@ -4034,8 +4139,10 @@ def reshape_test():
node = onnx.helper.make_node('Reshape', inputs=['0', '1'], outputs=['2'])
node2 = onnx.helper.make_node(
'Reshape', inputs=['0'], shape=x_shape_list, outputs=['3'])
node2 = onnx.helper.make_node('Reshape',
inputs=['0'],
shape=x_shape_list,
outputs=['3'])
return ([node, node2], [x, x_shape], [y, y2],
[helper.make_tensor('1', TensorProto.INT64, [2], [3, 8])])
......@@ -4053,8 +4160,10 @@ def reshape_non_standard_test():
perm=[0, 2, 1],
)
res = onnx.helper.make_node(
'Reshape', inputs=['trans_x'], outputs=['y'], shape=[4, 3, 2])
res = onnx.helper.make_node('Reshape',
inputs=['trans_x'],
outputs=['y'],
shape=[4, 3, 2])
return ([trans, res], [x], [y])
......@@ -4062,11 +4171,10 @@ def reshape_non_standard_test():
@onnx_test
def resize_downsample_f_test():
scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)
scale_tensor = helper.make_tensor(
name='scales',
data_type=TensorProto.FLOAT,
dims=scales.shape,
vals=scales.flatten().astype(np.float32))
scale_tensor = helper.make_tensor(name='scales',
data_type=TensorProto.FLOAT,
dims=scales.shape,
vals=scales.flatten().astype(np.float32))
X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 4])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [])
......@@ -4085,22 +4193,20 @@ def resize_downsample_f_test():
@onnx_test
def resize_downsample_c_test():
scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)
scale_tensor = helper.make_tensor(
name='scales',
data_type=TensorProto.FLOAT,
dims=scales.shape,
vals=scales.flatten().astype(np.float32))
scale_tensor = helper.make_tensor(name='scales',
data_type=TensorProto.FLOAT,
dims=scales.shape,
vals=scales.flatten().astype(np.float32))
X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 4])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 1, 1, 2])
node = onnx.helper.make_node(
'Resize',
inputs=['X', '', 'scales'],
outputs=['Y'],
coordinate_transformation_mode='asymmetric',
mode='nearest',
nearest_mode='ceil')
node = onnx.helper.make_node('Resize',
inputs=['X', '', 'scales'],
outputs=['Y'],
coordinate_transformation_mode='asymmetric',
mode='nearest',
nearest_mode='ceil')
return ([node], [X], [Y], [scale_tensor])
......@@ -4108,17 +4214,18 @@ def resize_downsample_c_test():
@onnx_test
def resize_downsample_linear_test():
scales = np.array([1.0, 1.0, 0.6, 0.5], dtype=np.float32)
scale_tensor = helper.make_tensor(
name='scales',
data_type=TensorProto.FLOAT,
dims=scales.shape,
vals=scales.flatten().astype(np.float32))
scale_tensor = helper.make_tensor(name='scales',
data_type=TensorProto.FLOAT,
dims=scales.shape,
vals=scales.flatten().astype(np.float32))
X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 4])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [])
node = onnx.helper.make_node(
'Resize', inputs=['X', '', 'scales'], outputs=['Y'], mode='linear')
node = onnx.helper.make_node('Resize',
inputs=['X', '', 'scales'],
outputs=['Y'],
mode='linear')
return ([node], [X], [Y], [scale_tensor])
......@@ -4126,25 +4233,25 @@ def resize_downsample_linear_test():
@onnx_test
def resize_nonstd_input_test():
scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)
scale_tensor = helper.make_tensor(
name='scales',
data_type=TensorProto.FLOAT,
dims=scales.shape,
vals=scales.flatten().astype(np.float32))
scale_tensor = helper.make_tensor(name='scales',
data_type=TensorProto.FLOAT,
dims=scales.shape,
vals=scales.flatten().astype(np.float32))
X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 4, 2])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 1, 1, 2])
trn = onnx.helper.make_node(
'Transpose', inputs=['X'], outputs=['TX'], perm=[0, 1, 3, 2])
trn = onnx.helper.make_node('Transpose',
inputs=['X'],
outputs=['TX'],
perm=[0, 1, 3, 2])
node = onnx.helper.make_node(
'Resize',
inputs=['TX', '', 'scales'],
outputs=['Y'],
coordinate_transformation_mode='asymmetric',
mode='nearest',
nearest_mode='ceil')
node = onnx.helper.make_node('Resize',
inputs=['TX', '', 'scales'],
outputs=['Y'],
coordinate_transformation_mode='asymmetric',
mode='nearest',
nearest_mode='ceil')
return ([trn, node], [X], [Y], [scale_tensor])
......@@ -4152,11 +4259,11 @@ def resize_nonstd_input_test():
@onnx_test
def resize_outsize_test():
out_lens = np.array([1, 1, 4, 6], dtype=np.int64)
out_lens_tensor = helper.make_tensor(
name='out_lens',
data_type=TensorProto.INT64,
dims=out_lens.shape,
vals=out_lens.flatten().astype(np.int64))
out_lens_tensor = helper.make_tensor(name='out_lens',
data_type=TensorProto.INT64,
dims=out_lens.shape,
vals=out_lens.flatten().astype(
np.int64))
X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 2])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 1, 4, 6])
......@@ -4175,11 +4282,11 @@ def resize_outsize_test():
@onnx_test
def resize_upsample_linear_ac_test():
scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)
scales_tensor = helper.make_tensor(
name='scales',
data_type=TensorProto.FLOAT,
dims=scales.shape,
vals=scales.flatten().astype(np.float32))
scales_tensor = helper.make_tensor(name='scales',
data_type=TensorProto.FLOAT,
dims=scales.shape,
vals=scales.flatten().astype(
np.float32))
X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 2])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [])
......@@ -4196,16 +4303,18 @@ def resize_upsample_linear_ac_test():
@onnx_test
def resize_upsample_linear_test():
scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)
scales_tensor = helper.make_tensor(
name='scales',
data_type=TensorProto.FLOAT,
dims=scales.shape,
vals=scales.flatten().astype(np.float32))
scales_tensor = helper.make_tensor(name='scales',
data_type=TensorProto.FLOAT,
dims=scales.shape,
vals=scales.flatten().astype(
np.float32))
X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 2])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [])
node = onnx.helper.make_node(
'Resize', inputs=['X', '', 'scales'], outputs=['Y'], mode='linear')
node = onnx.helper.make_node('Resize',
inputs=['X', '', 'scales'],
outputs=['Y'],
mode='linear')
return ([node], [X], [Y], [scales_tensor])
......@@ -4213,17 +4322,18 @@ def resize_upsample_linear_test():
@onnx_test
def resize_upsample_pf_test():
scales = np.array([1.0, 1.0, 2.0, 3.0], dtype=np.float32)
scale_tensor = helper.make_tensor(
name='scales',
data_type=TensorProto.FLOAT,
dims=scales.shape,
vals=scales.flatten().astype(np.float32))
scale_tensor = helper.make_tensor(name='scales',
data_type=TensorProto.FLOAT,
dims=scales.shape,
vals=scales.flatten().astype(np.float32))
X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 2])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 1, 4, 6])
node = onnx.helper.make_node(
'Resize', inputs=['X', '', 'scales'], outputs=['Y'], mode='nearest')
node = onnx.helper.make_node('Resize',
inputs=['X', '', 'scales'],
outputs=['Y'],
mode='nearest')
return ([node], [X], [Y], [scale_tensor])
......@@ -4231,11 +4341,10 @@ def resize_upsample_pf_test():
@onnx_test
def resize_upsample_pc_test():
scales = np.array([1.0, 1.0, 2.0, 1.5], dtype=np.float32)
scale_tensor = helper.make_tensor(
name='scales',
data_type=TensorProto.FLOAT,
dims=scales.shape,
vals=scales.flatten().astype(np.float32))
scale_tensor = helper.make_tensor(name='scales',
data_type=TensorProto.FLOAT,
dims=scales.shape,
vals=scales.flatten().astype(np.float32))
X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 4])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 1, 4, 6])
......@@ -4259,8 +4368,9 @@ def roialign_default_test():
bi = helper.make_tensor_value_info('batch_ind', TensorProto.INT64, [8])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [8, 4, 1, 1])
node = onnx.helper.make_node(
'RoiAlign', inputs=['x', 'rois', 'batch_ind'], outputs=['y'])
node = onnx.helper.make_node('RoiAlign',
inputs=['x', 'rois', 'batch_ind'],
outputs=['y'])
return ([node], [x, roi, bi], [y])
......@@ -4356,11 +4466,10 @@ def scatternd_add_test():
output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
[2, 2, 2])
node = onnx.helper.make_node(
'ScatterND',
inputs=['data', 'indices', 'updates'],
outputs=['output'],
reduction="add")
node = onnx.helper.make_node('ScatterND',
inputs=['data', 'indices', 'updates'],
outputs=['output'],
reduction="add")
return ([node], [data, indices, updates], [output])
......@@ -4375,11 +4484,10 @@ def scatternd_mul_test():
output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
[2, 2, 2])
node = onnx.helper.make_node(
'ScatterND',
inputs=['data', 'indices', 'updates'],
outputs=['output'],
reduction="mul")
node = onnx.helper.make_node('ScatterND',
inputs=['data', 'indices', 'updates'],
outputs=['output'],
reduction="mul")
return ([node], [data, indices, updates], [output])
......@@ -4394,8 +4502,9 @@ def scatternd_test():
output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
[2, 2, 2])
node = onnx.helper.make_node(
'ScatterND', inputs=['data', 'indices', 'updates'], outputs=['output'])
node = onnx.helper.make_node('ScatterND',
inputs=['data', 'indices', 'updates'],
outputs=['output'])
return ([node], [data, indices, updates], [output])
......@@ -4405,8 +4514,11 @@ def selu_test():
x = helper.make_tensor_value_info('x', TensorProto.DOUBLE, [2, 3])
y = helper.make_tensor_value_info('y', TensorProto.DOUBLE, [2, 3])
node = onnx.helper.make_node(
'Selu', inputs=['x'], outputs=['y'], alpha=0.3, gamma=0.5)
node = onnx.helper.make_node('Selu',
inputs=['x'],
outputs=['y'],
alpha=0.3,
gamma=0.5)
return ([node], [x], [y])
......@@ -4432,11 +4544,10 @@ def shape_gather_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [7, 3, 10])
z = helper.make_tensor_value_info('z', TensorProto.FLOAT, [1])
value_tensor = helper.make_tensor(
name='const_tensor',
data_type=TensorProto.INT32,
dims=values.shape,
vals=values.flatten().astype(int))
value_tensor = helper.make_tensor(name='const_tensor',
data_type=TensorProto.INT32,
dims=values.shape,
vals=values.flatten().astype(int))
node_const = onnx.helper.make_node(
'Constant',
......@@ -4556,13 +4667,12 @@ def slice_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3, 2])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 2])
node = onnx.helper.make_node(
'Slice',
inputs=['0'],
axes=[0, 1],
starts=[1, 0],
ends=[2, 2],
outputs=['1'])
node = onnx.helper.make_node('Slice',
inputs=['0'],
axes=[0, 1],
starts=[1, 0],
ends=[2, 2],
outputs=['1'])
return ([node], [x], [y])
......@@ -4572,26 +4682,29 @@ def slice_3arg_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [5, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [2, 5])
start = np.array([0, 0])
start_tensor = helper.make_tensor(
name="start",
data_type=TensorProto.INT32,
dims=start.shape,
vals=start.astype(int))
start_tensor = helper.make_tensor(name="start",
data_type=TensorProto.INT32,
dims=start.shape,
vals=start.astype(int))
arg_start = helper.make_node(
"Constant", inputs=[], outputs=['arg_start'], value=start_tensor)
arg_start = helper.make_node("Constant",
inputs=[],
outputs=['arg_start'],
value=start_tensor)
end = np.array([2, 5])
end_tensor = helper.make_tensor(
name="end",
data_type=TensorProto.INT32,
dims=end.shape,
vals=end.astype(int))
arg_end = helper.make_node(
"Constant", inputs=[], outputs=['arg_end'], value=end_tensor)
node = onnx.helper.make_node(
'Slice', inputs=['0', 'arg_start', 'arg_end'], outputs=['1'])
end_tensor = helper.make_tensor(name="end",
data_type=TensorProto.INT32,
dims=end.shape,
vals=end.astype(int))
arg_end = helper.make_node("Constant",
inputs=[],
outputs=['arg_end'],
value=end_tensor)
node = onnx.helper.make_node('Slice',
inputs=['0', 'arg_start', 'arg_end'],
outputs=['1'])
return ([arg_start, arg_end, node], [x], [y])
......@@ -4599,40 +4712,44 @@ def slice_3arg_test():
@onnx_test
def slice_5arg_test():
step = np.array([1, 1])
step_tensor = helper.make_tensor(
name="step",
data_type=TensorProto.INT32,
dims=step.shape,
vals=step.astype(int))
arg_step = helper.make_node(
"Constant", inputs=[], outputs=['arg_step'], value=step_tensor)
step_tensor = helper.make_tensor(name="step",
data_type=TensorProto.INT32,
dims=step.shape,
vals=step.astype(int))
arg_step = helper.make_node("Constant",
inputs=[],
outputs=['arg_step'],
value=step_tensor)
axis = np.array([-1, -2])
axis_tensor = helper.make_tensor(
name="axis",
data_type=TensorProto.INT32,
dims=axis.shape,
vals=axis.astype(int))
arg_axis = helper.make_node(
"Constant", inputs=[], outputs=['arg_axis'], value=axis_tensor)
axis_tensor = helper.make_tensor(name="axis",
data_type=TensorProto.INT32,
dims=axis.shape,
vals=axis.astype(int))
arg_axis = helper.make_node("Constant",
inputs=[],
outputs=['arg_axis'],
value=axis_tensor)
end = np.array([-1, -1])
end_tensor = helper.make_tensor(
name="end",
data_type=TensorProto.INT32,
dims=end.shape,
vals=end.astype(int))
arg_end = helper.make_node(
"Constant", inputs=[], outputs=['arg_end'], value=end_tensor)
end_tensor = helper.make_tensor(name="end",
data_type=TensorProto.INT32,
dims=end.shape,
vals=end.astype(int))
arg_end = helper.make_node("Constant",
inputs=[],
outputs=['arg_end'],
value=end_tensor)
start = np.array([-5, -3])
start_tensor = helper.make_tensor(
name="start",
data_type=TensorProto.INT32,
dims=start.shape,
vals=start.astype(int))
arg_start = helper.make_node(
"Constant", inputs=[], outputs=['arg_start'], value=start_tensor)
start_tensor = helper.make_tensor(name="start",
data_type=TensorProto.INT32,
dims=start.shape,
vals=start.astype(int))
arg_start = helper.make_node("Constant",
inputs=[],
outputs=['arg_start'],
value=start_tensor)
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [5, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 2])
......@@ -4648,40 +4765,44 @@ def slice_5arg_test():
@onnx_test
def slice_5arg_reverse_test():
step = np.array([-1, 1])
step_tensor = helper.make_tensor(
name="step",
data_type=TensorProto.INT32,
dims=step.shape,
vals=step.astype(int))
arg_step = helper.make_node(
"Constant", inputs=[], outputs=['arg_step'], value=step_tensor)
step_tensor = helper.make_tensor(name="step",
data_type=TensorProto.INT32,
dims=step.shape,
vals=step.astype(int))
arg_step = helper.make_node("Constant",
inputs=[],
outputs=['arg_step'],
value=step_tensor)
axis = np.array([-1, -2])
axis_tensor = helper.make_tensor(
name="axis",
data_type=TensorProto.INT32,
dims=axis.shape,
vals=axis.astype(int))
arg_axis = helper.make_node(
"Constant", inputs=[], outputs=['arg_axis'], value=axis_tensor)
axis_tensor = helper.make_tensor(name="axis",
data_type=TensorProto.INT32,
dims=axis.shape,
vals=axis.astype(int))
arg_axis = helper.make_node("Constant",
inputs=[],
outputs=['arg_axis'],
value=axis_tensor)
end = np.array([-5, -1])
end_tensor = helper.make_tensor(
name="end",
data_type=TensorProto.INT32,
dims=end.shape,
vals=end.astype(int))
arg_end = helper.make_node(
"Constant", inputs=[], outputs=['arg_end'], value=end_tensor)
end_tensor = helper.make_tensor(name="end",
data_type=TensorProto.INT32,
dims=end.shape,
vals=end.astype(int))
arg_end = helper.make_node("Constant",
inputs=[],
outputs=['arg_end'],
value=end_tensor)
start = np.array([-1, -3])
start_tensor = helper.make_tensor(
name="start",
data_type=TensorProto.INT32,
dims=start.shape,
vals=start.astype(int))
arg_start = helper.make_node(
"Constant", inputs=[], outputs=['arg_start'], value=start_tensor)
start_tensor = helper.make_tensor(name="start",
data_type=TensorProto.INT32,
dims=start.shape,
vals=start.astype(int))
arg_start = helper.make_node("Constant",
inputs=[],
outputs=['arg_start'],
value=start_tensor)
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [5, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 2])
......@@ -4697,40 +4818,44 @@ def slice_5arg_reverse_test():
@onnx_test
def slice_5arg_step_test():
step = np.array([-2, 2])
step_tensor = helper.make_tensor(
name="step",
data_type=TensorProto.INT32,
dims=step.shape,
vals=step.astype(int))
arg_step = helper.make_node(
"Constant", inputs=[], outputs=['arg_step'], value=step_tensor)
step_tensor = helper.make_tensor(name="step",
data_type=TensorProto.INT32,
dims=step.shape,
vals=step.astype(int))
arg_step = helper.make_node("Constant",
inputs=[],
outputs=['arg_step'],
value=step_tensor)
axis = np.array([-1, -2])
axis_tensor = helper.make_tensor(
name="axis",
data_type=TensorProto.INT32,
dims=axis.shape,
vals=axis.astype(int))
arg_axis = helper.make_node(
"Constant", inputs=[], outputs=['arg_axis'], value=axis_tensor)
axis_tensor = helper.make_tensor(name="axis",
data_type=TensorProto.INT32,
dims=axis.shape,
vals=axis.astype(int))
arg_axis = helper.make_node("Constant",
inputs=[],
outputs=['arg_axis'],
value=axis_tensor)
end = np.array([-5, -1])
end_tensor = helper.make_tensor(
name="end",
data_type=TensorProto.INT32,
dims=end.shape,
vals=end.astype(int))
arg_end = helper.make_node(
"Constant", inputs=[], outputs=['arg_end'], value=end_tensor)
end_tensor = helper.make_tensor(name="end",
data_type=TensorProto.INT32,
dims=end.shape,
vals=end.astype(int))
arg_end = helper.make_node("Constant",
inputs=[],
outputs=['arg_end'],
value=end_tensor)
start = np.array([-1, -3])
start_tensor = helper.make_tensor(
name="start",
data_type=TensorProto.INT32,
dims=start.shape,
vals=start.astype(int))
arg_start = helper.make_node(
"Constant", inputs=[], outputs=['arg_start'], value=start_tensor)
start_tensor = helper.make_tensor(name="start",
data_type=TensorProto.INT32,
dims=start.shape,
vals=start.astype(int))
arg_start = helper.make_node("Constant",
inputs=[],
outputs=['arg_start'],
value=start_tensor)
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [5, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 2])
......@@ -4748,13 +4873,12 @@ def slice_max_end_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [10, 20])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [9, 17])
node = onnx.helper.make_node(
'Slice',
inputs=['0'],
axes=[0, 1],
starts=[1, 2],
ends=[3000000000, -1],
outputs=['1'])
node = onnx.helper.make_node('Slice',
inputs=['0'],
axes=[0, 1],
starts=[1, 2],
ends=[3000000000, -1],
outputs=['1'])
return ([node], [x], [y])
......@@ -4774,13 +4898,12 @@ def softmax_nonstd_input_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [6, 8])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3, 4])
node0 = onnx.helper.make_node(
'Slice',
inputs=['0'],
axes=[0, 1],
starts=[1, 0],
ends=[4, 4],
outputs=['1'])
node0 = onnx.helper.make_node('Slice',
inputs=['0'],
axes=[0, 1],
starts=[1, 0],
ends=[4, 4],
outputs=['1'])
node1 = onnx.helper.make_node('Softmax', inputs=['1'], outputs=['2'])
......@@ -4849,12 +4972,11 @@ def split_test():
y2 = helper.make_tensor_value_info('y2', TensorProto.FLOAT, [10, 4])
y3 = helper.make_tensor_value_info('y3', TensorProto.FLOAT, [10, 4])
node = onnx.helper.make_node(
'Split',
inputs=['x'],
outputs=['y1', 'y2', 'y3'],
axis=1,
split=[7, 4, 4])
node = onnx.helper.make_node('Split',
inputs=['x'],
outputs=['y1', 'y2', 'y3'],
axis=1,
split=[7, 4, 4])
return ([node], [x], [y1, y2, y3])
......@@ -4893,14 +5015,14 @@ def squeeze_axes_input_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 1, 5, 1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 5])
axes = np.array([1, 3], dtype=np.int64)
axes_tensor = helper.make_tensor(
name="axes",
data_type=TensorProto.INT64,
dims=axes.shape,
vals=axes.astype(np.int64))
axes_tensor = helper.make_tensor(name="axes",
data_type=TensorProto.INT64,
dims=axes.shape,
vals=axes.astype(np.int64))
node = onnx.helper.make_node(
'Squeeze', inputs=['x', 'axes'], outputs=['y'])
node = onnx.helper.make_node('Squeeze',
inputs=['x', 'axes'],
outputs=['y'])
return ([node], [x], [y], [axes_tensor])
......@@ -4910,14 +5032,14 @@ def squeeze_empty_axes_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 1, 5, 1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 5])
axes = np.array([], dtype=np.int64)
axes_tensor = helper.make_tensor(
name="axes",
data_type=TensorProto.INT64,
dims=axes.shape,
vals=axes.astype(np.int64))
axes_tensor = helper.make_tensor(name="axes",
data_type=TensorProto.INT64,
dims=axes.shape,
vals=axes.astype(np.int64))
node = onnx.helper.make_node(
'Squeeze', inputs=['x', 'axes'], outputs=['y'])
node = onnx.helper.make_node('Squeeze',
inputs=['x', 'axes'],
outputs=['y'])
return ([node], [x], [y], [axes_tensor])
......@@ -4929,11 +5051,15 @@ def squeeze_unsqueeze_test():
y = helper.make_tensor_value_info('2', TensorProto.FLOAT,
[1, 1, 3, 1, 2, 1])
node = onnx.helper.make_node(
'Squeeze', inputs=['0'], axes=[0, 2, 3, 5], outputs=['1'])
node = onnx.helper.make_node('Squeeze',
inputs=['0'],
axes=[0, 2, 3, 5],
outputs=['1'])
node2 = onnx.helper.make_node(
'Unsqueeze', inputs=['1'], axes=[0, 1, 3, 5], outputs=['2'])
node2 = onnx.helper.make_node('Unsqueeze',
inputs=['1'],
axes=[0, 1, 3, 5],
outputs=['2'])
return ([node, node2], [x], [y])
......@@ -4964,11 +5090,10 @@ def sub_scalar_test():
arg_out = helper.make_tensor_value_info('out', TensorProto.FLOAT,
[2, 3, 4, 5])
values_tensor = helper.make_tensor(
name='const',
data_type=TensorProto.FLOAT,
dims=values.reshape(()).shape,
vals=values.flatten().astype(float))
values_tensor = helper.make_tensor(name='const',
data_type=TensorProto.FLOAT,
dims=values.reshape(()).shape,
vals=values.flatten().astype(float))
arg_const = onnx.helper.make_node(
'Constant',
......@@ -5025,74 +5150,78 @@ def sum_test():
@onnx_test
def sum_type_test():
valb = np.array([1, 0])
t_bool = helper.make_tensor(
name="bool",
data_type=TensorProto.BOOL,
dims=valb.shape,
vals=valb.astype(np.bool))
t_bool = helper.make_tensor(name="bool",
data_type=TensorProto.BOOL,
dims=valb.shape,
vals=valb.astype(np.bool))
val = np.array([1, 1])
t_int8 = helper.make_tensor(
name="int8",
data_type=TensorProto.INT8,
dims=val.shape,
vals=val.astype(np.int8))
t_uint8 = helper.make_tensor(
name="uint8",
data_type=TensorProto.UINT8,
dims=val.shape,
vals=val.astype(np.uint8))
t_uint16 = helper.make_tensor(
name="uint16",
data_type=TensorProto.UINT16,
dims=val.shape,
vals=val.astype(np.uint16))
t_uint32 = helper.make_tensor(
name="uint32",
data_type=TensorProto.UINT32,
dims=val.shape,
vals=val.astype(np.uint32))
t_uint64 = helper.make_tensor(
name="uint64",
data_type=TensorProto.UINT64,
dims=val.shape,
vals=val.astype(np.uint64))
t_double = helper.make_tensor(
name="double",
data_type=TensorProto.DOUBLE,
dims=val.shape,
vals=val.astype(np.float64))
t_int8 = helper.make_tensor(name="int8",
data_type=TensorProto.INT8,
dims=val.shape,
vals=val.astype(np.int8))
t_uint8 = helper.make_tensor(name="uint8",
data_type=TensorProto.UINT8,
dims=val.shape,
vals=val.astype(np.uint8))
t_uint16 = helper.make_tensor(name="uint16",
data_type=TensorProto.UINT16,
dims=val.shape,
vals=val.astype(np.uint16))
t_uint32 = helper.make_tensor(name="uint32",
data_type=TensorProto.UINT32,
dims=val.shape,
vals=val.astype(np.uint32))
t_uint64 = helper.make_tensor(name="uint64",
data_type=TensorProto.UINT64,
dims=val.shape,
vals=val.astype(np.uint64))
t_double = helper.make_tensor(name="double",
data_type=TensorProto.DOUBLE,
dims=val.shape,
vals=val.astype(np.float64))
valr = np.array([1.5, 2.0])
t_raw = helper.make_tensor(
name="raw",
data_type=TensorProto.DOUBLE,
dims=valr.shape,
vals=valr.tobytes(),
raw=True)
n_bool = onnx.helper.make_node(
'Cast', inputs=['bool'], outputs=['o_bool'], to=11)
n_int8 = onnx.helper.make_node(
'Cast', inputs=['int8'], outputs=['o_int8'], to=11)
n_uint8 = onnx.helper.make_node(
'Cast', inputs=['uint8'], outputs=['o_uint8'], to=11)
n_uint16 = onnx.helper.make_node(
'Cast', inputs=['uint16'], outputs=['o_uint16'], to=11)
n_uint32 = onnx.helper.make_node(
'Cast', inputs=['uint32'], outputs=['o_uint32'], to=11)
n_uint64 = onnx.helper.make_node(
'Cast', inputs=['uint64'], outputs=['o_uint64'], to=11)
t_raw = helper.make_tensor(name="raw",
data_type=TensorProto.DOUBLE,
dims=valr.shape,
vals=valr.tobytes(),
raw=True)
n_bool = onnx.helper.make_node('Cast',
inputs=['bool'],
outputs=['o_bool'],
to=11)
n_int8 = onnx.helper.make_node('Cast',
inputs=['int8'],
outputs=['o_int8'],
to=11)
n_uint8 = onnx.helper.make_node('Cast',
inputs=['uint8'],
outputs=['o_uint8'],
to=11)
n_uint16 = onnx.helper.make_node('Cast',
inputs=['uint16'],
outputs=['o_uint16'],
to=11)
n_uint32 = onnx.helper.make_node('Cast',
inputs=['uint32'],
outputs=['o_uint32'],
to=11)
n_uint64 = onnx.helper.make_node('Cast',
inputs=['uint64'],
outputs=['o_uint64'],
to=11)
node = onnx.helper.make_node(
'Sum',
......@@ -5105,11 +5234,11 @@ def sum_type_test():
y = helper.make_tensor_value_info('out', TensorProto.DOUBLE, [2])
return ([n_bool, n_int8, n_uint8, n_uint16, n_uint32, n_uint64, node], [],
[y], [
t_bool, t_int8, t_uint8, t_uint16, t_uint32, t_uint64,
t_double, t_raw
])
return ([n_bool, n_int8, n_uint8, n_uint16, n_uint32, n_uint64,
node], [], [y], [
t_bool, t_int8, t_uint8, t_uint16, t_uint32, t_uint64,
t_double, t_raw
])
@onnx_test
......@@ -5145,8 +5274,9 @@ def thresholdedrelu_default_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 2, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 2, 3])
node = onnx.helper.make_node(
'ThresholdedRelu', inputs=['x'], outputs=['y'])
node = onnx.helper.make_node('ThresholdedRelu',
inputs=['x'],
outputs=['y'])
return ([node], [x], [y])
......@@ -5157,8 +5287,10 @@ def thresholdedrelu_test():
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 2, 3])
alpha = 3.0
node = onnx.helper.make_node(
'ThresholdedRelu', inputs=['x'], outputs=['y'], alpha=alpha)
node = onnx.helper.make_node('ThresholdedRelu',
inputs=['x'],
outputs=['y'],
alpha=alpha)
return ([node], [x], [y])
......@@ -5169,8 +5301,10 @@ def thresholdedrelu_int_test():
y = helper.make_tensor_value_info('y', TensorProto.INT32, [2, 2, 3])
alpha = 3.0
node = onnx.helper.make_node(
'ThresholdedRelu', inputs=['x'], outputs=['y'], alpha=alpha)
node = onnx.helper.make_node('ThresholdedRelu',
inputs=['x'],
outputs=['y'],
alpha=alpha)
return ([node], [x], [y])
......@@ -5206,8 +5340,10 @@ def topk_attrk_test():
ind = helper.make_tensor_value_info('indices', TensorProto.INT64,
[2, 2, 3, 2])
node = onnx.helper.make_node(
'TopK', inputs=['data'], outputs=['val', 'indices'], k=2)
node = onnx.helper.make_node('TopK',
inputs=['data'],
outputs=['val', 'indices'],
k=2)
return ([node], [x], [val, ind])
......@@ -5219,18 +5355,16 @@ def topk_neg_axis_test():
ind = helper.make_tensor_value_info('indices', TensorProto.INT64,
[3, 3, 5, 6])
k_tensor = helper.make_tensor(
name='k',
data_type=TensorProto.INT64,
dims=k.shape,
vals=k.astype(np.int64))
k_tensor = helper.make_tensor(name='k',
data_type=TensorProto.INT64,
dims=k.shape,
vals=k.astype(np.int64))
node = onnx.helper.make_node(
'TopK',
inputs=['data', 'k'],
outputs=['val', 'indices'],
axis=-2,
sorted=0)
node = onnx.helper.make_node('TopK',
inputs=['data', 'k'],
outputs=['val', 'indices'],
axis=-2,
sorted=0)
return ([node], [x], [val, ind], [k_tensor])
......@@ -5242,18 +5376,16 @@ def topk_test():
ind = helper.make_tensor_value_info('indices', TensorProto.INT64,
[2, 4, 3, 2])
k_tensor = helper.make_tensor(
name='k',
data_type=TensorProto.INT64,
dims=k.shape,
vals=k.astype(np.int64))
k_tensor = helper.make_tensor(name='k',
data_type=TensorProto.INT64,
dims=k.shape,
vals=k.astype(np.int64))
node = onnx.helper.make_node(
'TopK',
inputs=['data', 'k'],
outputs=['val', 'indices'],
largest=0,
axis=1)
node = onnx.helper.make_node('TopK',
inputs=['data', 'k'],
outputs=['val', 'indices'],
largest=0,
axis=1)
return ([node], [x], [val, ind], [k_tensor])
......@@ -5315,11 +5447,10 @@ def transpose_gather_test():
perm=[0, 2, 1, 3],
)
ti = onnx.helper.make_node(
'Transpose',
inputs=['indices'],
outputs=['tindices'],
perm=[0, 2, 1, 3])
ti = onnx.helper.make_node('Transpose',
inputs=['indices'],
outputs=['tindices'],
perm=[0, 2, 1, 3])
node = onnx.helper.make_node(
'Gather',
......@@ -5366,8 +5497,10 @@ def unknown_aten_test():
a = helper.make_tensor_value_info('3', TensorProto.FLOAT, [2, 3, 4, 5])
node = onnx.helper.make_node(
'ATen', inputs=['0', '1'], outputs=['2'], operator='unknown')
node = onnx.helper.make_node('ATen',
inputs=['0', '1'],
outputs=['2'],
operator='unknown')
return ([node], [x, y], [a])
......@@ -5375,16 +5508,18 @@ def unknown_aten_test():
@onnx_test
def upsample_linear_test():
scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)
scales_tensor = helper.make_tensor(
name='scales',
data_type=TensorProto.FLOAT,
dims=scales.shape,
vals=scales.flatten().astype(np.float32))
scales_tensor = helper.make_tensor(name='scales',
data_type=TensorProto.FLOAT,
dims=scales.shape,
vals=scales.flatten().astype(
np.float32))
X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 2])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [])
node = onnx.helper.make_node(
'Upsample', inputs=['X', '', 'scales'], outputs=['Y'], mode='linear')
node = onnx.helper.make_node('Upsample',
inputs=['X', '', 'scales'],
outputs=['Y'],
mode='linear')
return ([node], [X], [Y], [scales_tensor])
......@@ -5392,11 +5527,10 @@ def upsample_linear_test():
@onnx_test
def upsample_test():
scales = np.array([1.0, 1.0, 2.0, 3.0], dtype=np.float32)
scale_tensor = helper.make_tensor(
name='scales',
data_type=TensorProto.FLOAT,
dims=scales.shape,
vals=scales.flatten().astype(np.float32))
scale_tensor = helper.make_tensor(name='scales',
data_type=TensorProto.FLOAT,
dims=scales.shape,
vals=scales.flatten().astype(np.float32))
X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 2])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 1, 4, 6])
......@@ -5441,7 +5575,8 @@ def where_test():
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 1, 2, 2])
z = helper.make_tensor_value_info('z', TensorProto.FLOAT, [2, 2, 2, 2])
node = onnx.helper.make_node(
'Where', inputs=['c', 'x', 'y'], outputs=['z'])
node = onnx.helper.make_node('Where',
inputs=['c', 'x', 'y'],
outputs=['z'])
return ([node], [c, x, y], [z])
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment