Unverified Commit f8b56a66 authored by kahmed10's avatar kahmed10 Committed by GitHub
Browse files

Extra tf ops (#679)

* initial testing

* add new ops

* remove comment

* remove extra changes

* remove extra changes

* add tests

* formatting

* add tests
parent 2466dd6f
......@@ -180,12 +180,12 @@ struct tf_parser
add_generic_op("Identity", op::identity{});
add_generic_op("LessEqual", op::identity{});
add_generic_op("Relu", op::relu{});
// add_generic_op("Relu6", op::clip{6.0, 0.0});
add_generic_op("Rsqrt", op::rsqrt{});
add_generic_op("Tanh", op::tanh{});
add_generic_op("StopGradient", op::identity{});
add_binary_op("Add", op::add{});
add_binary_op("AddV2", op::add{});
add_binary_op("Mul", op::mul{});
add_binary_op("Pow", op::pow{});
add_binary_op("SquaredDifference", op::sqdiff{});
......@@ -204,6 +204,7 @@ struct tf_parser
add_mem_op("DepthwiseConv2dNative", &tf_parser::parse_depthwiseconv);
add_mem_op("ExpandDims", &tf_parser::parse_expanddims, false);
add_mem_op("FusedBatchNorm", &tf_parser::parse_batchnorm);
add_mem_op("FusedBatchNormV3", &tf_parser::parse_batchnorm);
add_mem_op("GatherV2", &tf_parser::parse_gather, false);
add_mem_op("MatMul", &tf_parser::parse_matmul, false);
add_mem_op("MaxPool", &tf_parser::parse_pooling);
......@@ -1061,6 +1062,9 @@ struct tf_parser
// assert ops ignored
if(node.op() == "Assert" or contains(name, "Assert"))
return;
// noOps ignored
if(node.op() == "NoOp" or contains(name, "NoOp"))
return;
std::vector<instruction_ref> args;
for(auto&& input : node.input())
......
2
0 Placeholder*
0 Placeholder*
dtype0*
shape
:*
dtype0
:
2
1 Placeholder*
dtype0*
......@@ -12,4 +12,4 @@
add_bcast1Add01*
T0"
\ No newline at end of file
T0"
\ No newline at end of file
:
0 Placeholder*
shape:*
dtype0
0 Placeholder*
dtype0*
shape:
:
1 Placeholder*
dtype0*
shape:

add1Add01*
T0"
\ No newline at end of file
T0"
\ No newline at end of file
:
0 Placeholder*
dtype0*
shape:
:
1 Placeholder*
dtype0*
shape:

add1AddV201*
T0"
\ No newline at end of file
No preview for this file type
No preview for this file type
:
0 Placeholder*
shape:*
dtype0
0 Placeholder*
dtype0*
shape:
:
1 Placeholder*
dtype0*
shape:
D
batchmatmul1 BatchMatMul01*
F
batchmatmul1 BatchMatMulV201*
T0*
adj_x(*
adj_y(*
T0"
\ No newline at end of file
adj_y("
\ No newline at end of file
;
0 Placeholder*
shape:*
dtype0
0 Placeholder*
dtype0*
shape:
/
1 Placeholder*
dtype0*
......@@ -10,4 +10,4 @@
:
bias_add1BiasAdd01*
T0*
data_formatNHWC"
\ No newline at end of file
data_formatNHWC"
\ No newline at end of file
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
......@@ -18,48 +18,72 @@ def tf_test(op_test):
@tf_test
def add_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 2, 2, 3), name='0')
g2_input = tf.placeholder(tf.float32, shape=(1, 2, 2, 3), name='1')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 2, 2, 3),
name='0')
g2_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 2, 2, 3),
name='1')
tf.add(g1_input, g2_input, name='add1')
@tf_test
def addv2_test(g1):
with g1.as_default():
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 2, 2, 3),
name='0')
g2_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 2, 2, 3),
name='1')
tf.raw_ops.AddV2(x=g1_input, y=g2_input, name='add1')
@tf_test
def add_bcast_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(2, 3), name='0')
g2_input = tf.placeholder(tf.float32, shape=(2, 1), name='1')
g1_input = tf.compat.v1.placeholder(tf.float32, shape=(2, 3), name='0')
g2_input = tf.compat.v1.placeholder(tf.float32, shape=(2, 1), name='1')
tf.math.add(g1_input, g2_input, name='add_bcast1')
@tf_test
def argmax_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(3, 4, 5, 6), name='0')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(3, 4, 5, 6),
name='0')
tf.argmax(g1_input, axis=2, name='argmax1')
@tf_test
def argmin_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(3, 4, 5, 6), name='0')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(3, 4, 5, 6),
name='0')
tf.argmin(g1_input, axis=2, name='argmin1')
@tf_test
def assert_less_equal_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(2, 3), name='0')
g2_input = tf.placeholder(tf.float32, shape=(2, 3), name='1')
g1_input = tf.compat.v1.placeholder(tf.float32, shape=(2, 3), name='0')
g2_input = tf.compat.v1.placeholder(tf.float32, shape=(2, 3), name='1')
with tf.control_dependencies(
[tf.assert_less_equal(g1_input, g2_input)]):
[tf.compat.v1.assert_less_equal(g1_input, g2_input)]):
tf.add(g1_input, g2_input, name='add1')
@tf_test
def batchmatmul_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 2, 8, 4), name='0')
g2_input = tf.placeholder(tf.float32, shape=(1, 2, 4, 8), name='1')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 2, 8, 4),
name='0')
g2_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 2, 4, 8),
name='1')
tf.matmul(g1_input,
g2_input,
transpose_a=True,
......@@ -70,41 +94,75 @@ def batchmatmul_test(g1):
@tf_test
def batchnorm_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 16, 16, 32), name='0')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 16, 16, 32),
name='0')
g1_scale = tf.constant(1.0, dtype=tf.float32, shape=[32], name='1')
g1_offset = tf.compat.v1.placeholder(tf.float32, shape=(32), name='2')
g1_mean = tf.compat.v1.placeholder(tf.float32, shape=(32), name='3')
g1_variance = tf.compat.v1.placeholder(tf.float32,
shape=(32),
name='4')
tf.compat.v1.nn.fused_batch_norm(x=g1_input,
scale=g1_scale,
offset=g1_offset,
mean=g1_mean,
variance=g1_variance,
epsilon=0.00001,
is_training=False,
name='batchnorm1')
@tf_test
def batchnormv3_test(g1):
with g1.as_default():
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 16, 16, 32),
name='0')
g1_scale = tf.constant(1.0, dtype=tf.float32, shape=[32], name='1')
g1_offset = tf.placeholder(tf.float32, shape=(32), name='2')
g1_mean = tf.placeholder(tf.float32, shape=(32), name='3')
g1_variance = tf.placeholder(tf.float32, shape=(32), name='4')
tf.nn.fused_batch_norm(g1_input,
g1_scale,
g1_offset,
g1_mean,
g1_variance,
epsilon=0.00001,
is_training=False,
name='batchnorm1')
g1_offset = tf.compat.v1.placeholder(tf.float32, shape=(32), name='2')
g1_mean = tf.compat.v1.placeholder(tf.float32, shape=(32), name='3')
g1_variance = tf.compat.v1.placeholder(tf.float32,
shape=(32),
name='4')
tf.raw_ops.FusedBatchNormV3(x=g1_input,
scale=g1_scale,
offset=g1_offset,
mean=g1_mean,
variance=g1_variance,
epsilon=0.00001,
is_training=False,
name='batchnorm1')
@tf_test
def biasadd_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 1, 1, 500), name='0')
g2_input = tf.placeholder(tf.float32, shape=(500), name='1')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 1, 1, 500),
name='0')
g2_input = tf.compat.v1.placeholder(tf.float32, shape=(500), name='1')
tf.nn.bias_add(g1_input, g2_input, name='bias_add1')
@tf_test
def cast_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 3, 16, 16), name='0')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 3, 16, 16),
name='0')
tf.cast(g1_input, dtype=tf.int32, name='cast1')
@tf_test
def concat_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(4, 7, 3), name='0')
g2_input = tf.placeholder(tf.float32, shape=(4, 2, 3), name='1')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(4, 7, 3),
name='0')
g2_input = tf.compat.v1.placeholder(tf.float32,
shape=(4, 2, 3),
name='1')
tf.concat([g1_input, g2_input], axis=1, name='concat1')
......@@ -117,7 +175,9 @@ def const_test(g1):
@tf_test
def conv_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 16, 16, 3), name='0')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 16, 16, 3),
name='0')
g1_weights = tf.constant(value=1.0,
dtype=tf.float32,
shape=(3, 3, 3, 32),
......@@ -128,43 +188,49 @@ def conv_test(g1):
@tf_test
def depthwiseconv_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 16, 16, 3), name='0')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 16, 16, 3),
name='0')
g1_weights = tf.constant(value=1.0,
dtype=tf.float32,
shape=(3, 3, 3, 1),
name='1')
tf.nn.depthwise_conv2d_native(g1_input,
g1_weights, [1, 1, 1, 1],
"SAME",
name='depthwiseconv1')
tf.compat.v1.nn.depthwise_conv2d_native(g1_input,
g1_weights, [1, 1, 1, 1],
"SAME",
name='depthwiseconv1')
@tf_test
def expanddims_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(2, 3, 4), name='0')
tf.expand_dims(g1_input, axis=-1, name='expanddims_neg')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(2, 3, 4),
name='0')
tf.expand_dims(g1_input, axis=0, name='expanddims_neg')
@tf_test
def gather_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(2, 4), name='0')
g1_input = tf.compat.v1.placeholder(tf.float32, shape=(2, 4), name='0')
tf.gather(g1_input, [1, 1], axis=1, name='gather1')
@tf_test
def identity_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 3, 16, 16), name='0')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 3, 16, 16),
name='0')
tf.identity(g1_input, 'identity')
@tf_test
def matmul_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(8, 4), name='0')
g2_input = tf.placeholder(tf.float32, shape=(4, 8), name='1')
g1_input = tf.compat.v1.placeholder(tf.float32, shape=(8, 4), name='0')
g2_input = tf.compat.v1.placeholder(tf.float32, shape=(4, 8), name='1')
tf.matmul(g1_input,
g2_input,
transpose_a=True,
......@@ -175,7 +241,9 @@ def matmul_test(g1):
@tf_test
def mean_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 3, 16, 16), name='0')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 3, 16, 16),
name='0')
tf.math.reduce_mean(g1_input, axis=(2, 3), keepdims=True, name='mean1')
tf.math.reduce_mean(g1_input,
axis=(2, 3),
......@@ -186,7 +254,9 @@ def mean_test(g1):
@tf_test
def mean_test_nhwc(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 16, 16, 3), name='0')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 16, 16, 3),
name='0')
tf.math.reduce_mean(g1_input, axis=(1, 2), keepdims=True, name='mean1')
tf.math.reduce_mean(g1_input,
axis=(1, 2),
......@@ -197,11 +267,21 @@ def mean_test_nhwc(g1):
@tf_test
def mul_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 1, 1, 16), name='0')
g2_input = tf.placeholder(tf.float32, shape=(1, 1, 1, 16), name='1')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 1, 1, 16),
name='0')
g2_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 1, 1, 16),
name='1')
tf.multiply(g1_input, g2_input, name='mul1')
@tf_test
def noop_test(g1):
with g1.as_default():
tf.raw_ops.NoOp(name='noop1')
@tf_test
def onehot_test(g1):
with g1.as_default():
......@@ -212,100 +292,124 @@ def onehot_test(g1):
@tf_test
def pack_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(2), name='0')
g2_input = tf.placeholder(tf.float32, shape=(2), name='1')
g3_input = tf.placeholder(tf.float32, shape=(2), name='2')
g1_input = tf.compat.v1.placeholder(tf.float32, shape=(2), name='0')
g2_input = tf.compat.v1.placeholder(tf.float32, shape=(2), name='1')
g3_input = tf.compat.v1.placeholder(tf.float32, shape=(2), name='2')
tf.stack([g1_input, g2_input, g3_input], axis=1, name='pack1')
@tf_test
def pack_test_nhwc(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 1, 1, 2), name='0')
g2_input = tf.placeholder(tf.float32, shape=(1, 1, 1, 2), name='1')
g3_input = tf.placeholder(tf.float32, shape=(1, 1, 1, 2), name='2')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 1, 1, 2),
name='0')
g2_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 1, 1, 2),
name='1')
g3_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 1, 1, 2),
name='2')
tf.stack([g1_input, g2_input, g3_input], axis=3, name='pack1')
@tf_test
def pooling_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 16, 16, 3), name='0')
tf.nn.avg_pool(value=g1_input,
ksize=(1, 2, 2, 1),
strides=(1, 2, 2, 1),
padding='VALID',
data_format='NHWC',
name='avg_pooling')
tf.nn.max_pool(value=g1_input,
ksize=(1, 2, 2, 1),
strides=(1, 2, 2, 1),
padding='VALID',
data_format='NHWC',
name='max_pooling')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 16, 16, 3),
name='0')
tf.compat.v1.nn.avg_pool(value=g1_input,
ksize=(1, 2, 2, 1),
strides=(1, 2, 2, 1),
padding='VALID',
data_format='NHWC',
name='avg_pooling')
tf.compat.v1.nn.max_pool(value=g1_input,
ksize=(1, 2, 2, 1),
strides=(1, 2, 2, 1),
padding='VALID',
data_format='NHWC',
name='max_pooling')
@tf_test
def pow_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 2, 2, 3), name='0')
g2_input = tf.placeholder(tf.float32, shape=(1, 2, 2, 3), name='1')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 2, 2, 3),
name='0')
g2_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 2, 2, 3),
name='1')
tf.pow(g1_input, g2_input, name='pow1')
@tf_test
def relu_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 3, 16, 16), name='0')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 3, 16, 16),
name='0')
tf.nn.relu(g1_input, 'relu')
@tf_test
def relu6_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 3, 16, 16), name='0')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 3, 16, 16),
name='0')
tf.nn.relu6(g1_input, 'relu6')
@tf_test
def reshape_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(16), name='0')
g1_input = tf.compat.v1.placeholder(tf.float32, shape=(16), name='0')
tf.reshape(g1_input, (1, 1, 1, 16), 'reshape')
@tf_test
def rsqrt_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 3, 16, 16), name='0')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 3, 16, 16),
name='0')
tf.math.rsqrt(g1_input, 'rsqrt')
@tf_test
def shape_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 3, 16, 16), name='0')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 3, 16, 16),
name='0')
g1.create_op(op_type='Shape', inputs=[g1_input])
@tf_test
def slice_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(5, 10), name='0')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(5, 10),
name='0')
tf.slice(g1_input, [1, 0], [2, -1], name='slice1')
@tf_test
def softmax_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 3), name='0')
g1_input = tf.compat.v1.placeholder(tf.float32, shape=(1, 3), name='0')
tf.nn.softmax(g1_input, name='softmax')
@tf_test
def split_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(5, 30), name='0')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(5, 30),
name='0')
split0, split1, split2 = tf.split(g1_input, 3, 1, name='split')
tf.concat([split0, split1], axis=1, name='concat1')
tf.concat([split1, split2], axis=1, name='concat2')
......@@ -314,14 +418,18 @@ def split_test(g1):
@tf_test
def split_test_one_output(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(5, 30), name='0')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(5, 30),
name='0')
tf.split(g1_input, 1, 1, name='split')
@tf_test
def split_test_vector_as_input(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(5, 30), name='0')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(5, 30),
name='0')
split0, split1, split2 = tf.split(g1_input, [4, 15, 11],
1,
name='split')
......@@ -332,29 +440,39 @@ def split_test_vector_as_input(g1):
@tf_test
def sqdiff_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 2, 2, 3), name='0')
g2_input = tf.placeholder(tf.float32, shape=(1, 2, 2, 3), name='1')
tf.squared_difference(g1_input, g2_input, name='sqdiff')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 2, 2, 3),
name='0')
g2_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 2, 2, 3),
name='1')
tf.compat.v1.squared_difference(g1_input, g2_input, name='sqdiff')
@tf_test
def squeeze_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 2, 3, 1), name='0')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 2, 3, 1),
name='0')
tf.squeeze(g1_input, name='squeeze')
@tf_test
def stopgradient_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 3, 16, 16), name='0')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 3, 16, 16),
name='0')
tf.stop_gradient(g1_input, 'stopgradient')
@tf_test
def stridedslice_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 1, 1, 10), name='0')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 1, 1, 10),
name='0')
tf.strided_slice(g1_input, [0, 0, 0, 0], [1, 1, 1, 5], [1, 1, 1, 1],
shrink_axis_mask=2,
name='stridedslice1')
......@@ -363,7 +481,9 @@ def stridedslice_test(g1):
@tf_test
def stridedslice_masks_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 3, 3, 10), name='0')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 3, 3, 10),
name='0')
tf.strided_slice(g1_input, [0, 1, 1, 0], [0, 0, 0, 0], [1, 1, 1, 1],
begin_mask=9,
end_mask=15,
......@@ -373,27 +493,87 @@ def stridedslice_masks_test(g1):
@tf_test
def sub_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 2, 2, 3), name='0')
g2_input = tf.placeholder(tf.float32, shape=(1, 2, 2, 3), name='1')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 2, 2, 3),
name='0')
g2_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 2, 2, 3),
name='1')
tf.subtract(g1_input, g2_input, name='sub1')
@tf_test
def tanh_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 3, 16, 16), name='0')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 3, 16, 16),
name='0')
tf.tanh(g1_input, 'tanh')
@tf_test
def transpose_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 3, 16, 16), name='0')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(1, 3, 16, 16),
name='0')
tf.transpose(g1_input, perm=[0, 2, 3, 1], name='transpose')
@tf_test
def variable_batch_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(0, 3, 16, 16), name='0')
g1_input = tf.compat.v1.placeholder(tf.float32,
shape=(0, 3, 16, 16),
name='0')
tf.identity(g1_input, name='identity')
if __name__ == '__main__':
add_test()
addv2_test()
add_bcast_test()
argmax_test()
argmin_test()
assert_less_equal_test()
batchmatmul_test()
batchnorm_test()
batchnormv3_test()
biasadd_test()
cast_test()
concat_test()
const_test()
conv_test()
depthwiseconv_test()
expanddims_test()
gather_test()
identity_test()
matmul_test()
mean_test()
mean_test_nhwc()
mul_test()
onehot_test()
noop_test()
pack_test()
pack_test_nhwc()
pooling_test()
pow_test()
relu_test()
relu6_test()
reshape_test()
rsqrt_test()
shape_test()
slice_test()
softmax_test()
split_test()
split_test_one_output()
split_test_vector_as_input()
sqdiff_test()
squeeze_test()
stopgradient_test()
stridedslice_test()
stridedslice_masks_test()
sub_test()
tanh_test()
transpose_test()
variable_batch_test()
:
0 Placeholder*
shape:*
dtype0
0 Placeholder*
dtype0*
shape:
identityIdentity0*
T0"
\ No newline at end of file
T0"
\ No newline at end of file
......@@ -13,4 +13,4 @@ F
matmul1MatMul01*
T0*
transpose_a(*
transpose_b("
\ No newline at end of file
transpose_b("
\ No newline at end of file
No preview for this file type
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment