Commit 20b1d690 authored by Paul's avatar Paul
Browse files

Merge branch 'develop' into tests

parents 17aaaa1e ba729cfc
#include <migraphx/simplify_reshapes.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/instruction.hpp>
#include <basic_ops.hpp>
#include <test.hpp>
......@@ -165,4 +166,144 @@ TEST_CASE(transpose_double_contiguous)
EXPECT(p.has_instruction(t));
}
TEST_CASE(transpose_partial1)
{
migraphx::program p;
auto s = migraphx::shape{migraphx::shape::float_type, {1, 2, 3}};
auto x = p.add_parameter("x", s);
auto t1 = p.add_instruction(migraphx::op::transpose{{1, 0, 2}}, x);
auto t2 = p.add_instruction(migraphx::op::transpose{{1, 2, 0}}, t1);
p.add_instruction(pass_op{}, t2);
auto out_shape = p.get_shape();
auto n = std::distance(p.begin(), p.end());
p.compile(simplify_reshapes_target{});
EXPECT(p.get_shape() == out_shape);
EXPECT(std::distance(p.begin(), p.end()) == n - 1);
}
TEST_CASE(transpose_partial2)
{
migraphx::program p;
auto s = migraphx::shape{migraphx::shape::float_type, {1, 2, 3}};
auto x = p.add_parameter("x", s);
auto t1 = p.add_instruction(migraphx::op::transpose{{1, 0, 2}}, x);
auto t2 = p.add_instruction(migraphx::op::transpose{{1, 2, 0}}, t1);
auto t3 = p.add_instruction(migraphx::op::transpose{{1, 0, 2}}, t2);
p.add_instruction(pass_op{}, t3);
auto out_shape = p.get_shape();
auto n = std::distance(p.begin(), p.end());
p.compile(simplify_reshapes_target{});
EXPECT(p.get_shape() == out_shape);
EXPECT(std::distance(p.begin(), p.end()) == n - 2);
}
TEST_CASE(transpose_partial3)
{
migraphx::program p;
auto s = migraphx::shape{migraphx::shape::float_type, {1, 2, 3}};
auto x = p.add_parameter("x", s);
auto t1 = p.add_instruction(migraphx::op::transpose{{1, 0, 2}}, x);
auto t2 = p.add_instruction(migraphx::op::transpose{{1, 2, 0}}, t1);
auto t3 = p.add_instruction(migraphx::op::transpose{{1, 0, 2}}, t2);
auto t4 = p.add_instruction(migraphx::op::transpose{{1, 0, 2}}, t3);
p.add_instruction(pass_op{}, t4);
auto out_shape = p.get_shape();
auto n = std::distance(p.begin(), p.end());
p.compile(simplify_reshapes_target{});
EXPECT(p.get_shape() == out_shape);
EXPECT(std::distance(p.begin(), p.end()) == n - 3);
}
TEST_CASE(nop_transpose1)
{
migraphx::program p;
auto s = migraphx::shape{migraphx::shape::float_type, {1, 2, 3}};
auto x = p.add_parameter("x", s);
auto t = p.add_instruction(migraphx::op::transpose{{0, 1, 2}}, x);
p.add_instruction(pass_op{}, t);
auto out_shape = p.get_shape();
auto n = std::distance(p.begin(), p.end());
p.compile(simplify_reshapes_target{});
EXPECT(p.get_shape() == out_shape);
EXPECT(std::distance(p.begin(), p.end()) == n - 1);
}
TEST_CASE(nop_transpose2)
{
migraphx::program p;
auto s = migraphx::shape{migraphx::shape::float_type, {1, 2, 3}};
auto x = p.add_parameter("x", s);
auto t1 = p.add_instruction(migraphx::op::transpose{{0, 1, 2}}, x);
auto t2 = p.add_instruction(migraphx::op::transpose{{0, 1, 2}}, t1);
auto t3 = p.add_instruction(migraphx::op::transpose{{0, 1, 2}}, t2);
auto t4 = p.add_instruction(migraphx::op::transpose{{0, 1, 2}}, t3);
p.add_instruction(pass_op{}, t4);
auto out_shape = p.get_shape();
auto n = std::distance(p.begin(), p.end());
p.compile(simplify_reshapes_target{});
EXPECT(p.get_shape() == out_shape);
EXPECT(std::distance(p.begin(), p.end()) == n - 4);
}
TEST_CASE(nop_transpose3)
{
migraphx::program p;
auto s = migraphx::shape{migraphx::shape::float_type, {1, 2, 3, 4}};
auto x = p.add_parameter("x", s);
auto y = p.add_parameter("y", s);
auto concat = p.add_instruction(migraphx::op::concat{3}, x, y);
auto t1 = p.add_instruction(migraphx::op::transpose{{0, 1, 2, 3}}, concat);
auto t2 = p.add_instruction(migraphx::op::transpose{{0, 1, 3, 2}}, t1);
p.add_instruction(pass_op{}, t2);
auto out_shape = p.get_shape();
auto n = std::distance(p.begin(), p.end());
p.compile(simplify_reshapes_target{});
EXPECT(p.get_shape() == out_shape);
EXPECT(std::distance(p.begin(), p.end()) == n - 1);
}
TEST_CASE(concat_transpose1)
{
migraphx::program p;
auto s = migraphx::shape{migraphx::shape::float_type, {1, 2, 3, 4}};
auto x = p.add_parameter("x", s);
auto y = p.add_parameter("y", s);
auto xt = p.add_instruction(migraphx::op::transpose{{0, 1, 3, 2}}, x);
auto yt = p.add_instruction(migraphx::op::transpose{{0, 1, 3, 2}}, y);
auto concat = p.add_instruction(migraphx::op::concat{2}, xt, yt);
auto t = p.add_instruction(migraphx::op::transpose{{0, 1, 3, 2}}, concat);
p.add_instruction(pass_op{}, t);
auto out_shape = p.get_shape();
auto n = std::distance(p.begin(), p.end());
p.compile(simplify_reshapes_target{});
EXPECT(p.get_shape().lens() == out_shape.lens());
EXPECT(std::distance(p.begin(), p.end()) == n - 3);
auto new_concat =
std::find_if(p.begin(), p.end(), [](auto ins) { return ins.name() == "concat"; });
EXPECT(bool{new_concat != p.end()});
EXPECT(migraphx::any_cast<migraphx::op::concat>(new_concat->get_operator()).axis == 3);
}
TEST_CASE(concat_transpose2)
{
migraphx::program p;
auto s = migraphx::shape{migraphx::shape::float_type, {1, 2, 3, 4}};
auto x = p.add_parameter("x", s);
auto y = p.add_parameter("y", s);
auto xt = p.add_instruction(migraphx::op::transpose{{0, 2, 3, 1}}, x);
auto yt = p.add_instruction(migraphx::op::transpose{{0, 2, 3, 1}}, y);
auto concat = p.add_instruction(migraphx::op::concat{3}, xt, yt);
auto t = p.add_instruction(migraphx::op::transpose{{0, 2, 3, 1}}, concat);
p.add_instruction(pass_op{}, t);
auto out_shape = p.get_shape();
auto n = std::distance(p.begin(), p.end());
p.compile(simplify_reshapes_target{});
EXPECT(p.get_shape().lens() == out_shape.lens());
EXPECT(std::distance(p.begin(), p.end()) == n - 2);
auto new_concat =
std::find_if(p.begin(), p.end(), [](auto ins) { return ins.name() == "concat"; });
EXPECT(bool{new_concat != p.end()});
EXPECT(migraphx::any_cast<migraphx::op::concat>(new_concat->get_operator()).axis == 1);
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
:
0 Placeholder*
shape:*
dtype0
:
1 Placeholder*
dtype0*
shape:
D
batchmatmul1 BatchMatMul01*
adj_x(*
adj_y(*
T0"
\ No newline at end of file
import numpy as np
import tensorflow as tf
def tf_test(op_test):
def run_test():
g1 = tf.Graph()
op_test(g1)
tf.io.write_graph(g1,
'.',
'{}.pb'.format(op_test.__name__),
as_text=False)
return run_test
@tf_test
def add_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 2, 2, 3), name='0')
g2_input = tf.placeholder(tf.float32, shape=(1, 2, 2, 3), name='1')
tf.add(g1_input, g2_input, name='add1')
@tf_test
def add_bcast_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(2, 3), name='0')
g2_input = tf.placeholder(tf.float32, shape=(2, 1), name='1')
tf.math.add(g1_input, g2_input, name='add_bcast1')
@tf_test
def assert_less_equal_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(2, 3), name='0')
g2_input = tf.placeholder(tf.float32, shape=(2, 3), name='1')
with tf.control_dependencies(
[tf.assert_less_equal(g1_input, g2_input)]):
tf.add(g1_input, g2_input, name='add1')
@tf_test
def batchmatmul_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 2, 8, 4), name='0')
g2_input = tf.placeholder(tf.float32, shape=(1, 2, 4, 8), name='1')
tf.matmul(g1_input,
g2_input,
transpose_a=True,
transpose_b=True,
name='batchmatmul1')
@tf_test
def batchnorm_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 16, 16, 32), name='0')
g1_scale = tf.constant(1.0, dtype=tf.float32, shape=[32], name='1')
g1_offset = tf.placeholder(tf.float32, shape=(32), name='2')
g1_mean = tf.placeholder(tf.float32, shape=(32), name='3')
g1_variance = tf.placeholder(tf.float32, shape=(32), name='4')
tf.nn.fused_batch_norm(g1_input,
g1_scale,
g1_offset,
g1_mean,
g1_variance,
epsilon=0.00001,
is_training=False,
name='batchnorm1')
@tf_test
def biasadd_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 1, 1, 500), name='0')
g2_input = tf.placeholder(tf.float32, shape=(500), name='1')
tf.nn.bias_add(g1_input, g2_input, name='bias_add1')
@tf_test
def cast_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 3, 16, 16), name='0')
tf.cast(g1_input, dtype=tf.int32, name='cast1')
@tf_test
def concat_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(4, 7, 3), name='0')
g2_input = tf.placeholder(tf.float32, shape=(4, 2, 3), name='1')
tf.concat([g1_input, g2_input], axis=1, name='concat1')
@tf_test
def const_test(g1):
with g1.as_default():
tf.constant(1.0, dtype=tf.float32, name='constant1')
@tf_test
def conv_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 16, 16, 3), name='0')
g1_weights = tf.constant(value=1.0,
dtype=tf.float32,
shape=(3, 3, 3, 32),
name='1')
tf.nn.conv2d(g1_input, g1_weights, [1, 1, 1, 1], "SAME", name='conv1')
@tf_test
def depthwiseconv_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 16, 16, 3), name='0')
g1_weights = tf.constant(value=1.0,
dtype=tf.float32,
shape=(3, 3, 3, 1),
name='1')
tf.nn.depthwise_conv2d_native(g1_input,
g1_weights, [1, 1, 1, 1],
"SAME",
name='depthwiseconv1')
@tf_test
def expanddims_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(2, 3, 4), name='0')
tf.expand_dims(g1_input, axis=-1, name='expanddims_neg')
@tf_test
def gather_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(2, 4), name='0')
tf.gather(g1_input, [1, 1], axis=1, name='gather1')
@tf_test
def identity_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 3, 16, 16), name='0')
tf.identity(g1_input, 'identity')
@tf_test
def matmul_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(8, 4), name='0')
g2_input = tf.placeholder(tf.float32, shape=(4, 8), name='1')
tf.matmul(g1_input,
g2_input,
transpose_a=True,
transpose_b=True,
name='matmul1')
@tf_test
def mean_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 3, 16, 16), name='0')
tf.math.reduce_mean(g1_input, axis=(2, 3), keepdims=True, name='mean1')
tf.math.reduce_mean(g1_input,
axis=(2, 3),
keepdims=False,
name='mean2')
@tf_test
def mean_test_nhwc(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 16, 16, 3), name='0')
tf.math.reduce_mean(g1_input, axis=(1, 2), keepdims=True, name='mean1')
tf.math.reduce_mean(g1_input,
axis=(1, 2),
keepdims=False,
name='mean2')
@tf_test
def mul_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 1, 1, 16), name='0')
g2_input = tf.placeholder(tf.float32, shape=(1, 1, 1, 16), name='1')
tf.multiply(g1_input, g2_input, name='mul1')
@tf_test
def pack_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(2), name='0')
g2_input = tf.placeholder(tf.float32, shape=(2), name='1')
g3_input = tf.placeholder(tf.float32, shape=(2), name='2')
tf.stack([g1_input, g2_input, g3_input], axis=1, name='pack1')
@tf_test
def pack_test_nhwc(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 1, 1, 2), name='0')
g2_input = tf.placeholder(tf.float32, shape=(1, 1, 1, 2), name='1')
g3_input = tf.placeholder(tf.float32, shape=(1, 1, 1, 2), name='2')
tf.stack([g1_input, g2_input, g3_input], axis=3, name='pack1')
@tf_test
def pooling_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 16, 16, 3), name='0')
tf.nn.avg_pool(value=g1_input,
ksize=(1, 2, 2, 1),
strides=(1, 2, 2, 1),
padding='VALID',
data_format='NHWC',
name='avg_pooling')
tf.nn.max_pool(value=g1_input,
ksize=(1, 2, 2, 1),
strides=(1, 2, 2, 1),
padding='VALID',
data_format='NHWC',
name='max_pooling')
@tf_test
def pow_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 2, 2, 3), name='0')
g2_input = tf.placeholder(tf.float32, shape=(1, 2, 2, 3), name='1')
tf.pow(g1_input, g2_input, name='pow1')
@tf_test
def relu_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 3, 16, 16), name='0')
tf.nn.relu(g1_input, 'relu')
@tf_test
def relu6_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 3, 16, 16), name='0')
tf.nn.relu6(g1_input, 'relu6')
@tf_test
def reshape_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(16), name='0')
tf.reshape(g1_input, (1, 1, 1, 16), 'reshape')
@tf_test
def rsqrt_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 3, 16, 16), name='0')
tf.math.rsqrt(g1_input, 'rsqrt')
@tf_test
def slice_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(5, 10), name='0')
tf.slice(g1_input, [1, 0], [2, -1], name='slice1')
@tf_test
def softmax_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 3), name='0')
tf.nn.softmax(g1_input, name='softmax')
@tf_test
def sqdiff_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 2, 2, 3), name='0')
g2_input = tf.placeholder(tf.float32, shape=(1, 2, 2, 3), name='1')
tf.squared_difference(g1_input, g2_input, name='sqdiff')
@tf_test
def squeeze_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 2, 3, 1), name='0')
tf.squeeze(g1_input, name='squeeze')
@tf_test
def stopgradient_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 3, 16, 16), name='0')
tf.stop_gradient(g1_input, 'stopgradient')
@tf_test
def stridedslice_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 1, 1, 10), name='0')
tf.strided_slice(g1_input, [0, 0, 0, 0], [1, 1, 1, 5], [1, 1, 1, 1],
shrink_axis_mask=2,
name='stridedslice1')
@tf_test
def stridedslice_masks_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 3, 3, 10), name='0')
tf.strided_slice(g1_input, [0, 1, 1, 0], [0, 0, 0, 0], [1, 1, 1, 1],
begin_mask=9,
end_mask=15,
name='stridedslice1')
@tf_test
def sub_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 2, 2, 3), name='0')
g2_input = tf.placeholder(tf.float32, shape=(1, 2, 2, 3), name='1')
tf.subtract(g1_input, g2_input, name='sub1')
@tf_test
def tanh_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 3, 16, 16), name='0')
tf.tanh(g1_input, 'tanh')
@tf_test
def transpose_test(g1):
with g1.as_default():
g1_input = tf.placeholder(tf.float32, shape=(1, 3, 16, 16), name='0')
tf.transpose(g1_input, perm=[0, 2, 3, 1], name='transpose')
:
0 Placeholder*
dtype0*
shape:
:
1 Placeholder*
dtype0*
shape:

pow1Pow01*
T0"
\ No newline at end of file
:
0 Placeholder*
shape:*
dtype0

rsqrtRsqrt0*
T0"
\ No newline at end of file
:
0 Placeholder*
dtype0*
shape:
:
1 Placeholder*
dtype0*
shape:
*
sqdiffSquaredDifference01*
T0"
\ No newline at end of file
:
0 Placeholder*
dtype0*
shape:
(
stopgradient StopGradient0*
T0"
\ No newline at end of file
:
0 Placeholder*
shape:*
dtype0
:
1 Placeholder*
shape:*
dtype0

sub1Sub01*
T0"
\ No newline at end of file
:
0 Placeholder*
dtype0*
shape:

tanhTanh0*
T0"
\ No newline at end of file
#include <iostream>
#include <vector>
#include <migraphx/literal.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/simplify_reshapes.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/eliminate_identity.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/program.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/tf.hpp>
#include "test.hpp"
migraphx::program optimize_tf(const std::string& name, bool is_nhwc)
{
auto prog = migraphx::parse_tf(name, is_nhwc);
if(is_nhwc)
migraphx::run_passes(prog,
{migraphx::simplify_reshapes{},
migraphx::dead_code_elimination{},
migraphx::eliminate_identity{}});
return prog;
}
TEST_CASE(add_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 2, 2, 3}});
auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {1, 2, 2, 3}});
p.add_instruction(migraphx::op::add{}, l0, l1);
auto prog = migraphx::parse_tf("add_test.pb", false);
auto prog = optimize_tf("add_test.pb", false);
EXPECT(p == prog);
}
......@@ -28,7 +43,38 @@ TEST_CASE(add_bcast_test)
auto l2 = p.add_instruction(migraphx::op::multibroadcast{s0.lens()}, l0);
auto l3 = p.add_instruction(migraphx::op::multibroadcast{s0.lens()}, l1);
p.add_instruction(migraphx::op::add{}, l2, l3);
auto prog = migraphx::parse_tf("add_bcast_test.pb", false);
auto prog = optimize_tf("add_bcast_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(assert_less_equal_test)
{
migraphx::program p;
migraphx::shape s0{migraphx::shape::float_type, {2, 3}};
auto l0 = p.add_parameter("0", s0);
auto l1 = p.add_parameter("1", s0);
migraphx::literal l{migraphx::shape{migraphx::shape::int32_type, {2}}, {0, 1}};
auto l2 = p.add_literal(l);
p.add_instruction(migraphx::op::add{}, l0, l1);
auto l3 = p.add_instruction(migraphx::op::identity{}, l0, l1);
p.add_instruction(migraphx::op::identity{}, l3, l2);
auto prog = optimize_tf("assert_less_equal_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(batchmatmul_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 2, 8, 4}});
auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {1, 2, 4, 8}});
auto trans_l0 = p.add_instruction(migraphx::op::transpose{{0, 1, 3, 2}}, l0);
auto trans_l1 = p.add_instruction(migraphx::op::transpose{{0, 1, 3, 2}}, l1);
p.add_instruction(migraphx::op::dot{}, trans_l0, trans_l1);
auto prog = optimize_tf("batchmatmul_test.pb", false);
EXPECT(p == prog);
}
......@@ -51,7 +97,7 @@ TEST_CASE(batchnorm_test)
auto l4 = p.add_parameter("4", s0);
auto l1 = p.add_literal(migraphx::literal{s0, const_vals});
p.add_instruction(op, l0, l1, l2, l3, l4);
auto prog = migraphx::parse_tf("batchnorm_test.pb", true);
auto prog = optimize_tf("batchnorm_test.pb", true);
EXPECT(p == prog);
}
......@@ -65,7 +111,17 @@ TEST_CASE(biasadd_test)
auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {500}});
auto l2 = p.add_instruction(migraphx::op::broadcast{axis, l0->get_shape().lens()}, l1);
p.add_instruction(migraphx::op::add{}, l0, l2);
auto prog = migraphx::parse_tf("biasadd_test.pb", true);
auto prog = optimize_tf("biasadd_test.pb", true);
EXPECT(p == prog);
}
TEST_CASE(cast_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 16, 16}});
p.add_instruction(migraphx::op::convert{migraphx::shape::int32_type}, l0);
auto prog = optimize_tf("cast_test.pb", false);
EXPECT(p == prog);
}
......@@ -83,7 +139,7 @@ TEST_CASE(concat_test)
p.add_literal(migraphx::shape{migraphx::shape::int32_type}, std::vector<int>{axis});
p.add_instruction(migraphx::op::concat{static_cast<std::size_t>(axis)}, l0, l1);
auto prog = migraphx::parse_tf("concat_test.pb", false);
auto prog = optimize_tf("concat_test.pb", false);
EXPECT(p == prog);
}
......@@ -92,7 +148,7 @@ TEST_CASE(const_test)
{
migraphx::program p;
p.add_literal(migraphx::shape{migraphx::shape::float_type}, std::vector<float>{1.0f});
auto prog = migraphx::parse_tf("constant_test.pb", false);
auto prog = optimize_tf("constant_test.pb", false);
EXPECT(p == prog);
}
......@@ -109,12 +165,12 @@ TEST_CASE(conv_test)
migraphx::op::convolution op;
op.padding_mode = migraphx::op::padding_mode_t::same;
op.padding = {1, 1};
op.stride = {1, 1};
op.dilation = {1, 1};
auto l2 = p.add_instruction(migraphx::op::transpose{{0, 3, 1, 2}}, l1);
auto l3 = p.add_instruction(migraphx::op::transpose{{1, 3, 0, 2}}, l2);
p.add_instruction(op, l0, l3);
auto prog = migraphx::parse_tf("conv_test.pb", true);
auto l2 = p.add_instruction(migraphx::op::transpose{{3, 2, 0, 1}}, l1);
p.add_instruction(op, l0, l2);
auto prog = optimize_tf("conv_test.pb", true);
EXPECT(p == prog);
}
......@@ -131,15 +187,56 @@ TEST_CASE(depthwiseconv_test)
migraphx::op::convolution op;
op.padding_mode = migraphx::op::padding_mode_t::same;
op.padding = {1, 1};
op.stride = {1, 1};
op.dilation = {1, 1};
op.group = 3;
auto l2 = p.add_instruction(migraphx::op::transpose{{0, 3, 1, 2}}, l1);
auto l3 = p.add_instruction(migraphx::op::transpose{{1, 3, 0, 2}}, l2);
auto l3 = p.add_instruction(migraphx::op::transpose{{3, 2, 0, 1}}, l1);
auto l4 = p.add_instruction(migraphx::op::contiguous{}, l3);
auto l5 = p.add_instruction(migraphx::op::reshape{{3, 1, 3, 3}}, l4);
p.add_instruction(op, l0, l5);
auto prog = migraphx::parse_tf("depthwise_conv_test.pb", true);
auto prog = optimize_tf("depthwise_conv_test.pb", true);
EXPECT(p == prog);
}
TEST_CASE(expanddims_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 3, 4}});
p.add_literal(0);
p.add_instruction(migraphx::op::reshape{{1, 2, 3, 4}}, l0);
auto prog = optimize_tf("expanddims_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(expanddims_test_neg_dims)
{
// this check makes sure the pb parses negative dim value correctly
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 3, 4}});
p.add_literal(-1);
p.add_instruction(migraphx::op::reshape{{2, 3, 4, 1}}, l0);
auto prog = optimize_tf("expanddims_neg_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(gather_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 4}});
auto l1 =
p.add_literal(migraphx::literal{migraphx::shape{migraphx::shape::int32_type, {2}}, {1, 1}});
p.add_literal(1);
int axis = 1;
p.add_instruction(migraphx::op::gather{axis}, l0, l1);
auto prog = optimize_tf("gather_test.pb", false);
EXPECT(p == prog);
}
......@@ -149,7 +246,7 @@ TEST_CASE(identity_test)
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 16, 16}});
p.add_instruction(migraphx::op::identity{}, l0);
auto prog = migraphx::parse_tf("identity_test.pb", false);
auto prog = optimize_tf("identity_test.pb", false);
EXPECT(p == prog);
}
......@@ -164,7 +261,7 @@ TEST_CASE(matmul_test)
auto trans_l1 = p.add_instruction(migraphx::op::transpose{{1, 0}}, l1);
p.add_instruction(migraphx::op::dot{}, trans_l0, trans_l1);
auto prog = migraphx::parse_tf("matmul_test.pb", false);
auto prog = optimize_tf("matmul_test.pb", false);
EXPECT(p == prog);
}
......@@ -176,12 +273,11 @@ TEST_CASE(mean_test)
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 16, 16}});
p.add_literal(l);
p.add_literal(l);
migraphx::op::pooling op;
op.lengths = {16, 16};
auto l3 = p.add_instruction(op, l0);
p.add_instruction(migraphx::op::squeeze{{2, 3}}, l3);
migraphx::op::reduce_mean op{{2, 3}};
p.add_instruction(op, l0);
auto prog = migraphx::parse_tf("mean_test.pb", false);
auto l3 = p.add_instruction(op, l0);
p.add_instruction(migraphx::op::squeeze{{2, 3}}, l3);
auto prog = optimize_tf("mean_test.pb", false);
EXPECT(p == prog);
}
......@@ -191,14 +287,11 @@ TEST_CASE(mean_test_nhwc)
migraphx::program p;
migraphx::literal l{migraphx::shape{migraphx::shape::int32_type, {2}}, {1, 2}};
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 16, 16}});
p.add_literal(l);
p.add_literal(l);
migraphx::op::pooling op;
op.lengths = {16, 16};
auto l3 = p.add_instruction(op, l0);
p.add_instruction(migraphx::op::squeeze{{2, 3}}, l3);
p.add_instruction(op, l0);
auto prog = migraphx::parse_tf("mean_test_nhwc.pb", true);
auto l1 = p.add_instruction(migraphx::op::transpose{{0, 2, 3, 1}}, l0);
migraphx::op::reduce_mean op{{1, 2}};
auto l2 = p.add_instruction(op, l1);
p.add_instruction(migraphx::op::squeeze{{1, 2}}, l2);
auto prog = optimize_tf("mean_test_nhwc.pb", true);
EXPECT(p == prog);
}
......@@ -210,7 +303,24 @@ TEST_CASE(mul_test)
auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {1, 1, 1, 16}});
p.add_instruction(migraphx::op::mul{}, l0, l1);
auto prog = migraphx::parse_tf("mul_test.pb", false);
auto prog = optimize_tf("mul_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(onehot_test)
{
migraphx::program p;
auto l0 = p.add_literal(
migraphx::literal{migraphx::shape{migraphx::shape::int32_type, {5}}, {1, 1, 1, 1, 1}});
p.add_literal(2);
p.add_literal(1.0f);
p.add_literal(0.0f);
auto l1 = p.add_literal(
migraphx::literal{migraphx::shape{migraphx::shape::float_type, {2, 2}}, {1, 0, 0, 1}});
int axis = 0;
p.add_instruction(migraphx::op::gather{axis}, l1, l0);
auto prog = optimize_tf("onehot_test.pb", false);
EXPECT(p == prog);
}
......@@ -232,7 +342,7 @@ TEST_CASE(pack_test)
return p.add_instruction(migraphx::op::unsqueeze{{axis}}, arg);
});
p.add_instruction(migraphx::op::concat{static_cast<size_t>(axis)}, unsqueezed_args);
auto prog = migraphx::parse_tf("pack_test.pb", false);
auto prog = optimize_tf("pack_test.pb", false);
EXPECT(p == prog);
}
......@@ -240,12 +350,15 @@ TEST_CASE(pack_test)
TEST_CASE(pack_test_nhwc)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 2, 1, 1}});
auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {1, 2, 1, 1}});
auto l2 = p.add_parameter("2", migraphx::shape{migraphx::shape::float_type, {1, 2, 1, 1}});
std::vector<migraphx::instruction_ref> args{l0, l1, l2};
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 2, 1, 1}});
auto lt0 = p.add_instruction(migraphx::op::transpose{{0, 2, 3, 1}}, l0);
auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {1, 2, 1, 1}});
auto lt1 = p.add_instruction(migraphx::op::transpose{{0, 2, 3, 1}}, l1);
auto l2 = p.add_parameter("2", migraphx::shape{migraphx::shape::float_type, {1, 2, 1, 1}});
auto lt2 = p.add_instruction(migraphx::op::transpose{{0, 2, 3, 1}}, l2);
std::vector<migraphx::instruction_ref> args{lt0, lt1, lt2};
std::vector<migraphx::instruction_ref> unsqueezed_args;
int64_t nchw_axis = 1;
int64_t nchw_axis = 3;
std::transform(args.begin(),
args.end(),
......@@ -254,7 +367,7 @@ TEST_CASE(pack_test_nhwc)
return p.add_instruction(migraphx::op::unsqueeze{{nchw_axis}}, arg);
});
p.add_instruction(migraphx::op::concat{static_cast<size_t>(nchw_axis)}, unsqueezed_args);
auto prog = migraphx::parse_tf("pack_test_nhwc.pb", true);
auto prog = optimize_tf("pack_test_nhwc.pb", true);
EXPECT(p == prog);
}
......@@ -272,8 +385,18 @@ TEST_CASE(pooling_test)
avg_pool_op.lengths = {2, 2};
max_pool_op.lengths = {2, 2};
p.add_instruction(max_pool_op, l0);
p.add_instruction(avg_pool_op, l0);
auto prog = migraphx::parse_tf("pooling_test.pb", true);
auto prog = optimize_tf("pooling_test.pb", true);
EXPECT(p == prog);
}
TEST_CASE(pow_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 2, 2, 3}});
auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {1, 2, 2, 3}});
p.add_instruction(migraphx::op::pow{}, l0, l1);
auto prog = optimize_tf("pow_test.pb", false);
EXPECT(p == prog);
}
......@@ -283,7 +406,7 @@ TEST_CASE(relu_test)
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 16, 16}});
p.add_instruction(migraphx::op::relu{}, l0);
auto prog = migraphx::parse_tf("relu_test.pb", false);
auto prog = optimize_tf("relu_test.pb", false);
EXPECT(p == prog);
}
......@@ -293,7 +416,7 @@ TEST_CASE(relu6_test)
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 16, 16}});
p.add_instruction(migraphx::op::clip{6.0, 0.0}, l0);
auto prog = migraphx::parse_tf("relu6_test.pb", false);
auto prog = optimize_tf("relu6_test.pb", false);
EXPECT(p == prog);
}
......@@ -306,7 +429,37 @@ TEST_CASE(reshape_test)
// in tf, the second arg is a literal that contains new dimensions
p.add_literal(migraphx::literal{s0, {1, 1, 1, 16}});
p.add_instruction(migraphx::op::reshape{{1, 1, 1, 16}}, l0);
auto prog = migraphx::parse_tf("reshape_test.pb", false);
auto prog = optimize_tf("reshape_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(rsqrt_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 16, 16}});
p.add_instruction(migraphx::op::rsqrt{}, l0);
auto prog = optimize_tf("rsqrt_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(slice_test)
{
migraphx::program p;
std::size_t num_axes = 2;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {5, 10}});
migraphx::shape s0{migraphx::shape::int32_type, {num_axes}};
p.add_literal(migraphx::literal{s0, {1, 0}});
p.add_literal(migraphx::literal{s0, {2, -1}});
migraphx::op::slice op;
op.starts = {1, 0};
op.ends = {3, 10};
op.axes = std::vector<int64_t>(num_axes);
std::iota(op.axes.begin(), op.axes.end(), 0);
p.add_instruction(op, l0);
auto prog = optimize_tf("slice_test.pb", false);
EXPECT(p == prog);
}
......@@ -314,12 +467,20 @@ TEST_CASE(reshape_test)
TEST_CASE(softmax_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3}});
auto dims = l0->get_shape().lens();
auto r = p.add_instruction(migraphx::op::reshape{{long(dims[0]), long(dims[1]), 1, 1}}, l0);
auto s = p.add_instruction(migraphx::op::softmax{}, r);
p.add_instruction(migraphx::op::reshape{{long(dims[0]), long(dims[1])}}, s);
auto prog = migraphx::parse_tf("softmax_test.pb", false);
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3}});
p.add_instruction(migraphx::op::softmax{1}, l0);
auto prog = optimize_tf("softmax_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(sqdiff_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 2, 2, 3}});
auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {1, 2, 2, 3}});
p.add_instruction(migraphx::op::sqdiff{}, l0, l1);
auto prog = optimize_tf("sqdiff_test.pb", false);
EXPECT(p == prog);
}
......@@ -329,7 +490,17 @@ TEST_CASE(squeeze_test)
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 2, 3, 1}});
p.add_instruction(migraphx::op::squeeze{{0, 3}}, l0);
auto prog = migraphx::parse_tf("squeeze_test.pb", false);
auto prog = optimize_tf("squeeze_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(stopgradient_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 16, 16}});
p.add_instruction(migraphx::op::identity{}, l0);
auto prog = optimize_tf("stopgradient_test.pb", false);
EXPECT(p == prog);
}
......@@ -338,21 +509,74 @@ TEST_CASE(stridedslice_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 10, 1, 1}});
auto l1 = p.add_instruction(migraphx::op::transpose{{0, 2, 3, 1}}, l0);
std::size_t num_axes = 4;
migraphx::op::slice op;
op.starts = {0, 0, 0, 0};
op.ends = {1, 5, 1, 1};
op.ends = {1, 1, 1, 5};
op.axes = std::vector<int64_t>(num_axes);
std::iota(op.axes.begin(), op.axes.end(), 0);
auto l2 = p.add_instruction(op, l1);
auto shrink_axis = 1;
p.add_instruction(migraphx::op::squeeze{{shrink_axis}}, l2);
auto prog = optimize_tf("stridedslice_test.pb", true);
EXPECT(p == prog);
}
TEST_CASE(stridedslice_masks_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 10, 3, 3}});
std::size_t num_axes = 4;
migraphx::op::slice op;
op.starts = {0, 1, 1, 0};
op.ends = {1, 3, 3, 10};
op.axes = std::vector<int64_t>(num_axes);
std::iota(op.axes.begin(), op.axes.end(), 0);
// add literals for starts, ends, and strides in tf (NHWC format)
p.add_literal(migraphx::shape{migraphx::shape::int32_type, {4}}, std::vector<int>{0, 1, 1, 0});
p.add_literal(migraphx::shape{migraphx::shape::int32_type, {4}}, std::vector<int>{0, 0, 0, 0});
p.add_literal(migraphx::shape{migraphx::shape::int32_type, {4}}, std::vector<int>{1, 1, 1, 5});
p.add_literal(migraphx::shape{migraphx::shape::int32_type, {4}}, std::vector<int>{1, 1, 1, 1});
auto l1 = p.add_instruction(op, l0);
auto shrink_axis = 2;
p.add_instruction(migraphx::op::squeeze{{shrink_axis}}, l1);
auto prog = migraphx::parse_tf("stridedslice_test.pb", true);
auto l1 = p.add_instruction(migraphx::op::transpose{{0, 2, 3, 1}}, l0);
auto l2 = p.add_instruction(op, l1);
p.add_instruction(migraphx::op::transpose{{0, 3, 1, 2}}, l2);
auto prog = migraphx::parse_tf("stridedslice_masks_test.pb", true);
EXPECT(p == prog);
}
TEST_CASE(sub_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 2, 2, 3}});
auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {1, 2, 2, 3}});
p.add_instruction(migraphx::op::sub{}, l0, l1);
auto prog = migraphx::parse_tf("sub_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(tanh_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 2, 2, 3}});
auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {1, 2, 2, 3}});
p.add_instruction(migraphx::op::sub{}, l0, l1);
auto prog = migraphx::parse_tf("sub_test.pb", false);
EXPECT(p == prog);
}
TEST_CASE(transpose_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 16, 16}});
migraphx::shape s0{migraphx::shape::int32_type, {4}};
p.add_literal(migraphx::literal{s0, {0, 2, 3, 1}});
p.add_instruction(migraphx::op::transpose{{0, 2, 3, 1}}, l0);
auto prog = optimize_tf("transpose_test.pb", false);
EXPECT(p == prog);
}
......
#include <iostream>
#include <vector>
#include <migraphx/literal.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/cpu/target.hpp>
#include <migraphx/verify.hpp>
#include <migraphx/quantization.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/propagate_constant.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/onnx.hpp>
#include "test.hpp"
#include <migraphx/half.hpp>
TEST_CASE(param_add)
{
auto create_program_float = [] {
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
auto p1 = p.add_parameter("x", s);
auto p2 = p.add_parameter("y", s);
p.add_instruction(migraphx::op::add{}, p1, p2);
return p;
};
auto create_program_half = [] {
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
auto p1 = p.add_parameter("x", s);
auto hp1 = p.insert_instruction(std::next(p1), migraphx::op::convert{}, p1);
auto p2 = p.add_parameter("y", s);
auto hp2 = p.insert_instruction(std::next(p2), migraphx::op::convert{}, p2);
auto hs = p.add_instruction(migraphx::op::add{}, hp1, hp2);
p.add_instruction(migraphx::op::convert{migraphx::shape::float_type}, hs);
return p;
};
{
auto p1 = create_program_float();
auto p2 = create_program_half();
migraphx::quantize(p1);
EXPECT(p1 == p2);
}
{
auto p1 = create_program_float();
auto p2 = create_program_half();
migraphx::quantize(p1, {"add"});
EXPECT(p1 == p2);
}
}
TEST_CASE(param_add_sub)
{
auto create_program_float = [] {
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
auto p1 = p.add_parameter("x", s);
auto p2 = p.add_parameter("y", s);
auto sum = p.add_instruction(migraphx::op::add{}, p1, p2);
auto diff = p.add_instruction(migraphx::op::sub{}, sum, p2);
p.add_instruction(migraphx::op::add{}, diff, p1);
return p;
};
auto create_program_half_add = [] {
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
auto p1 = p.add_parameter("x", s);
auto hp1 = p.insert_instruction(
std::next(p1), migraphx::op::convert{migraphx::shape::half_type}, p1);
auto p2 = p.add_parameter("y", s);
auto hp2 = p.insert_instruction(
std::next(p2), migraphx::op::convert{migraphx::shape::half_type}, p2);
auto hsum = p.add_instruction(migraphx::op::add{}, hp1, hp2);
auto sum = p.add_instruction(migraphx::op::convert{migraphx::shape::float_type}, hsum);
auto diff = p.add_instruction(migraphx::op::sub{}, sum, p2);
auto hdiff = p.add_instruction(
migraphx::op::convert{migraphx::op::convert{migraphx::shape::half_type}}, diff);
auto res = p.add_instruction(migraphx::op::add{}, hdiff, hp1);
p.add_instruction(migraphx::op::convert{migraphx::shape::float_type}, res);
return p;
};
auto create_program_half_sub = [] {
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
auto p1 = p.add_parameter("x", s);
auto p2 = p.add_parameter("y", s);
auto hp2 = p.insert_instruction(
std::next(p2), migraphx::op::convert{migraphx::shape::half_type}, p2);
auto sum = p.add_instruction(migraphx::op::add{}, p1, p2);
auto hsum = p.add_instruction(migraphx::op::convert{migraphx::shape::half_type}, sum);
auto hdiff = p.add_instruction(migraphx::op::sub{}, hsum, hp2);
auto diff = p.add_instruction(migraphx::op::convert{migraphx::shape::float_type}, hdiff);
p.add_instruction(migraphx::op::add{}, diff, p1);
return p;
};
auto create_program_half_all = [] {
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
auto p1 = p.add_parameter("x", s);
auto hp1 = p.insert_instruction(
std::next(p1), migraphx::op::convert{migraphx::shape::half_type}, p1);
auto p2 = p.add_parameter("y", s);
auto hp2 = p.insert_instruction(
std::next(p2), migraphx::op::convert{migraphx::shape::half_type}, p2);
auto hsum = p.add_instruction(migraphx::op::add{}, hp1, hp2);
auto hdiff = p.add_instruction(migraphx::op::sub{}, hsum, hp2);
auto hres = p.add_instruction(migraphx::op::add{}, hdiff, hp1);
p.add_instruction(migraphx::op::convert{migraphx::shape::float_type}, hres);
return p;
};
{
auto p1 = create_program_float();
auto p2 = create_program_half_add();
migraphx::quantize(p1, {"add"});
EXPECT(p1 == p2);
}
{
auto p1 = create_program_float();
auto p2 = create_program_half_sub();
migraphx::quantize(p1, {"sub"});
EXPECT(p1 == p2);
}
{
auto p1 = create_program_float();
auto p2 = create_program_half_all();
migraphx::quantize(p1);
migraphx::run_passes(p1, {migraphx::dead_code_elimination{}});
EXPECT(p1 == p2);
}
}
TEST_CASE(literal_add)
{
auto create_program_float = [] {
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
std::vector<float> data(2 * 3);
std::iota(data.begin(), data.end(), 1.0f);
auto l1 = p.add_literal(migraphx::literal(s, data));
auto l2 = p.add_literal(migraphx::literal(s, data));
p.add_instruction(migraphx::op::add{}, l1, l2);
return p;
};
auto create_program_half = [] {
migraphx::program p;
migraphx::shape s{migraphx::shape::half_type, {2, 3}};
std::vector<migraphx::half> data(2 * 3);
std::iota(data.begin(), data.end(), 1.0f);
auto l1 = p.add_literal(migraphx::literal(s, data));
auto l2 = p.add_literal(migraphx::literal(s, data));
auto hs = p.add_instruction(migraphx::op::add{}, l1, l2);
p.add_instruction(migraphx::op::convert{migraphx::shape::float_type}, hs);
return p;
};
{
auto p1 = create_program_float();
auto p2 = create_program_half();
migraphx::quantize(p1, {"all"});
migraphx::run_passes(p1,
{migraphx::propagate_constant{}, migraphx::dead_code_elimination{}});
migraphx::run_passes(p2,
{migraphx::propagate_constant{}, migraphx::dead_code_elimination{}});
EXPECT(p1 == p2);
}
{
auto p1 = create_program_float();
auto p2 = create_program_half();
migraphx::quantize(p1, {"add"});
migraphx::run_passes(p1,
{migraphx::propagate_constant{}, migraphx::dead_code_elimination{}});
migraphx::run_passes(p2,
{migraphx::propagate_constant{}, migraphx::dead_code_elimination{}});
EXPECT(p1 == p2);
}
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment