Commit 94e3a2e4 authored by Shucai Xiao's avatar Shucai Xiao
Browse files

change size_t to int

parent 26bd92d8
......@@ -23,7 +23,7 @@ struct parse_squeeze : op_parser<parse_squeeze>
if(op_axes.empty()) // no squeeze_dims provided, remove any dim that equals 1
{
for(size_t i = 0; i < input_dims.size(); i++)
for(int i = 0; i < input_dims.size(); i++)
{
if(input_dims.at(i) == 1)
{
......
......@@ -82,7 +82,7 @@ instruction_ref tf_parser::node_info::add_broadcastable_binary_op(const std::str
return add_common_op(*mm, make_op(op_name), {arg0, arg1});
}
int64_t tf_parser::parse_axis(const int64_t dim, const size_t num_dims) const
int64_t tf_parser::parse_axis(const int64_t dim, const int num_dims) const
{
int64_t new_dim = dim;
if(is_nhwc and num_dims >= 4)
......@@ -111,11 +111,11 @@ instruction_ref tf_parser::node_info::add_literal(literal l) const
return mm->add_literal(std::move(l));
}
std::vector<int64_t> get_axes_from_mask(const size_t num_axes, const uint32_t mask)
std::vector<int64_t> get_axes_from_mask(const int num_axes, const uint32_t mask)
{
uint32_t bitwise_compare = 1;
std::vector<int64_t> axes;
for(size_t i = 0; i < num_axes; i++)
for(int i = 0; i < num_axes; i++)
{
// the LSB corresponds to axis 0 when determining which axes to begin
if(((mask >> i) & bitwise_compare) == 1)
......@@ -165,9 +165,9 @@ static tf_parser::attribute_map get_attributes(const tensorflow::NodeDef& node)
return result;
}
static std::vector<size_t> parse_dims(const tensorflow::TensorShapeProto& s)
static std::vector<int> parse_dims(const tensorflow::TensorShapeProto& s)
{
std::vector<size_t> dims;
std::vector<int> dims;
auto input_dims = s.dim();
std::transform(input_dims.begin(),
input_dims.end(),
......@@ -178,7 +178,7 @@ static std::vector<size_t> parse_dims(const tensorflow::TensorShapeProto& s)
template <class T>
static std::vector<T> get_data_vals(const google::protobuf::RepeatedField<T>& data,
const size_t& shape_size)
const int& shape_size)
{
std::vector<T> data_vals(shape_size);
// check if shape has enough data values given existing fields
......@@ -193,7 +193,7 @@ static std::vector<T> get_data_vals(const google::protobuf::RepeatedField<T>& da
template <class T>
static literal
create_literal(shape::type_t shape_type, const std::vector<size_t>& dims, std::vector<T> data)
create_literal(shape::type_t shape_type, const std::vector<int>& dims, std::vector<T> data)
{
// assume if explicit value is mentioned in protobuf and dim size <= 1, treat as scalar
if(dims.empty() or (dims.size() == 1 and dims.front() == 1))
......@@ -245,7 +245,7 @@ void tf_parser::parse_graph(const tensorflow::GraphDef& graph)
const std::string& name = input.name();
attribute_map input_attrs = get_attributes(input);
shape::type_t shape_type = parse_type(input_attrs.at("dtype").type());
std::vector<size_t> dims = parse_dims(input_attrs.at("shape").shape());
std::vector<int> dims = parse_dims(input_attrs.at("shape").shape());
if(contains(map_input_dims, name))
{
......@@ -341,7 +341,7 @@ void tf_parser::parse_node(const std::string& name)
assert(!result.empty());
// First output has no ":" delimiter
instructions[name] = result.front();
for(size_t i = 1; i < result.size(); i++)
for(int i = 1; i < result.size(); i++)
{
instructions[name + ":" + std::to_string(i)] = result.at(i);
}
......@@ -423,8 +423,8 @@ shape::type_t tf_parser::parse_type(const tensorflow::DataType t) const
literal tf_parser::parse_tensor(const tensorflow::TensorProto& t) const
{
std::vector<size_t> dims = parse_dims(t.tensor_shape());
size_t shape_size = std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<size_t>());
std::vector<int> dims = parse_dims(t.tensor_shape());
int shape_size = std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<int>());
if(!t.tensor_content().empty()) // has raw data
{
const std::string& s = t.tensor_content();
......
......@@ -17,7 +17,7 @@ struct value_base_impl : cloneable<value_base_impl>
virtual const cpp_type* if_##vt() const { return nullptr; }
MIGRAPHX_VISIT_VALUE_TYPES(MIGRAPHX_VALUE_GENERATE_BASE_FUNCTIONS)
virtual std::vector<value>* if_array() { return nullptr; }
virtual std::unordered_map<std::string, std::size_t>* if_object() { return nullptr; }
virtual std::unordered_map<std::string, int>* if_object() { return nullptr; }
virtual value_base_impl* if_value() const { return nullptr; }
value_base_impl() = default;
value_base_impl(const value_base_impl&) = default;
......@@ -47,15 +47,15 @@ struct array_value_holder : value_base_impl::derive<array_value_holder>
struct object_value_holder : value_base_impl::derive<object_value_holder>
{
object_value_holder() {}
object_value_holder(std::vector<value> d, std::unordered_map<std::string, std::size_t> l)
object_value_holder(std::vector<value> d, std::unordered_map<std::string, int> l)
: data(std::move(d)), lookup(std::move(l))
{
}
virtual value::type_t get_type() override { return value::object_type; }
virtual std::vector<value>* if_array() override { return &data; }
virtual std::unordered_map<std::string, std::size_t>* if_object() override { return &lookup; }
virtual std::unordered_map<std::string, int>* if_object() override { return &lookup; }
std::vector<value> data;
std::unordered_map<std::string, std::size_t> lookup;
std::unordered_map<std::string, int> lookup;
};
value::value(const value& rhs) : x(rhs.x ? rhs.x->clone() : nullptr), key(rhs.key) {}
......@@ -85,8 +85,8 @@ void set_vector(std::shared_ptr<value_base_impl>& x,
}
else
{
std::unordered_map<std::string, std::size_t> lookup;
std::size_t i = 0;
std::unordered_map<std::string, int> lookup;
int i = 0;
for(auto&& e : v)
{
lookup[e.get_key()] = i;
......@@ -251,7 +251,7 @@ bool value::contains(const std::string& pkey) const
return false;
return true;
}
std::size_t value::size() const
int value::size() const
{
auto* a = if_array_impl(x);
if(a == nullptr)
......@@ -307,14 +307,14 @@ const value& value::back() const
assert(this->size() > 0);
return *std::prev(end());
}
value& value::at(std::size_t i)
value& value::at(int i)
{
auto* a = if_array_impl(x);
if(a == nullptr)
MIGRAPHX_THROW("Not an array");
return a->at(i);
}
const value& value::at(std::size_t i) const
const value& value::at(int i) const
{
auto* a = if_array_impl(x);
if(a == nullptr)
......@@ -339,12 +339,12 @@ const value& value::at(const std::string& pkey) const
MIGRAPHX_THROW("Key not found: " + pkey);
return *r;
}
value& value::operator[](std::size_t i)
value& value::operator[](int i)
{
assert(i < this->size());
return *(begin() + i);
}
const value& value::operator[](std::size_t i) const
const value& value::operator[](int i) const
{
assert(i < this->size());
return *(begin() + i);
......@@ -352,13 +352,13 @@ const value& value::operator[](std::size_t i) const
value& value::operator[](const std::string& pkey) { return *emplace(pkey, nullptr).first; }
void value::clear() { get_array_throw(x).clear(); }
void value::resize(std::size_t n)
void value::resize(int n)
{
if(not is_array())
MIGRAPHX_THROW("Expected an array.");
get_array_impl(x).resize(n);
}
void value::resize(std::size_t n, const value& v)
void value::resize(int n, const value& v)
{
if(not is_array())
MIGRAPHX_THROW("Expected an array.");
......
......@@ -7,7 +7,7 @@
struct record_event
{
std::size_t event = 0;
int event = 0;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
......@@ -24,7 +24,7 @@ struct record_event
struct wait_event
{
std::size_t event = 0;
int event = 0;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
......@@ -41,7 +41,7 @@ struct wait_event
struct set_stream
{
std::size_t stream = 0;
int stream = 0;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
......@@ -58,14 +58,14 @@ struct set_stream
struct test_stream_model
{
std::size_t max_stream = 0;
std::unordered_map<migraphx::instruction_ref, std::size_t> ins2stream{};
std::size_t get_nstream() const { return max_stream + 1; }
std::size_t get_stream(migraphx::instruction_ref ins) const { return ins2stream.at(ins); }
std::size_t get_event_id(migraphx::instruction_ref ins) const
int max_stream = 0;
std::unordered_map<migraphx::instruction_ref, int> ins2stream{};
int get_nstream() const { return max_stream + 1; }
int get_stream(migraphx::instruction_ref ins) const { return ins2stream.at(ins); }
int get_event_id(migraphx::instruction_ref ins) const
{
auto v = ins->get_operator().to_value();
return v["event"].to<std::size_t>();
return v["event"].to<int>();
}
bool has_stream(migraphx::instruction_ref ins) const { return ins2stream.count(ins) > 0; }
bool is_record(migraphx::instruction_ref ins) const { return ins->name() == "record_event"; }
......@@ -76,8 +76,8 @@ struct program_model
{
migraphx::program p;
migraphx::module* mm = p.get_main_module();
std::unordered_map<migraphx::instruction_ref, std::size_t> ins2stream{};
std::size_t max_stream = 0;
std::unordered_map<migraphx::instruction_ref, int> ins2stream{};
int max_stream = 0;
template <class... Ts>
migraphx::instruction_ref add_literal(Ts... xs)
......@@ -92,7 +92,7 @@ struct program_model
}
template <class... Ts>
migraphx::instruction_ref add_instruction_stream(std::size_t n, Ts... xs)
migraphx::instruction_ref add_instruction_stream(int n, Ts... xs)
{
max_stream = std::max(max_stream, n);
auto ins = mm->add_instruction(xs...);
......@@ -107,7 +107,7 @@ struct program_model
}
template <class... Ts>
migraphx::instruction_ref add_return_stream(std::size_t n, Ts... xs)
migraphx::instruction_ref add_return_stream(int n, Ts... xs)
{
max_stream = std::max(max_stream, n);
auto ins = mm->add_return({xs...});
......
......@@ -148,8 +148,8 @@ TEST_CASE(scalar_shape)
TEST_CASE(strided_shape)
{
std::vector<std::size_t> lens = {2, 2};
std::vector<std::size_t> strides = {1, 2};
std::vector<int> lens = {2, 2};
std::vector<int> strides = {1, 2};
auto s = migraphx::shape(migraphx_shape_float_type, lens, strides);
EXPECT(s.lengths() == lens);
EXPECT(s.strides() == strides);
......@@ -169,9 +169,9 @@ TEST_CASE(set_loop_default_iter_num)
option.set_default_loop_iterations(15);
auto p = migraphx::parse_onnx("loop_default_test.onnx", option);
auto out_shapes = p.get_output_shapes();
std::vector<std::size_t> out_lens0 = {1};
std::vector<int> out_lens0 = {1};
EXPECT(out_shapes[0].lengths() == out_lens0);
std::vector<std::size_t> out_lens1 = {15, 1};
std::vector<int> out_lens1 = {15, 1};
EXPECT(out_shapes[1].lengths() == out_lens1);
}
......
......@@ -52,7 +52,7 @@ TEST_CASE(if_pl_test)
auto output = outputs[0];
auto lens = output.get_shape().lengths();
auto elem_num =
std::accumulate(lens.begin(), lens.end(), 1, std::multiplies<std::size_t>());
std::accumulate(lens.begin(), lens.end(), 1, std::multiplies<int>());
float* data_ptr = reinterpret_cast<float*>(output.data());
std::vector<float> ret(data_ptr, data_ptr + elem_num);
......@@ -101,14 +101,14 @@ TEST_CASE(loop_test)
auto output = outputs[0];
auto lens = output.get_shape().lengths();
auto elem_num =
std::accumulate(lens.begin(), lens.end(), 1, std::multiplies<std::size_t>());
std::accumulate(lens.begin(), lens.end(), 1, std::multiplies<int>());
float* data_ptr = reinterpret_cast<float*>(output.data());
std::vector<std::vector<float>> ret;
ret.push_back({data_ptr, data_ptr + elem_num});
output = outputs[1];
lens = output.get_shape().lengths();
elem_num = std::accumulate(lens.begin(), lens.end(), 1, std::multiplies<std::size_t>());
elem_num = std::accumulate(lens.begin(), lens.end(), 1, std::multiplies<int>());
data_ptr = reinterpret_cast<float*>(output.data());
ret.push_back({data_ptr, data_ptr + elem_num});
......
......@@ -12,7 +12,7 @@ TEST_CASE(load_tf)
TEST_CASE(load_tf_default_dim)
{
migraphx::tf_options tf_options;
size_t batch = 2;
int batch = 2;
tf_options.set_default_dim_value(batch);
tf_options.set_nhwc();
auto p = migraphx::parse_tf("conv_batch_test.pb", tf_options);
......@@ -24,7 +24,7 @@ TEST_CASE(load_tf_default_dim)
TEST_CASE(load_tf_param_shape)
{
migraphx::tf_options tf_options;
std::vector<size_t> new_shape{1, 3};
std::vector<int> new_shape{1, 3};
tf_options.set_input_parameter_shape("0", new_shape);
tf_options.set_input_parameter_shape("1", new_shape);
auto p = migraphx::parse_tf("add_test.pb", tf_options);
......
......@@ -6,7 +6,7 @@
#include <basic_ops.hpp>
#include <test.hpp>
void run_pass(migraphx::module& m, std::size_t align = 32)
void run_pass(migraphx::module& m, int align = 32)
{
migraphx::run_passes(
m, {migraphx::eliminate_allocation{"allocate", align}, migraphx::dead_code_elimination{}});
......
......@@ -11,7 +11,7 @@
struct concat
{
concat(std::size_t axis) { op.axis = axis; }
concat(int axis) { op.axis = axis; }
migraphx::op::concat op;
template <class Self, class F>
......@@ -105,7 +105,7 @@ struct simple_op
template <class... Ts>
migraphx::shape create_shape(Ts... xs)
{
return migraphx::shape{migraphx::shape::float_type, {std::size_t(xs)...}};
return migraphx::shape{migraphx::shape::float_type, {int(xs)...}};
}
using load = migraphx::op::load;
......@@ -120,7 +120,7 @@ TEST_CASE(simple)
auto m1 = m.add_instruction(simple_op{}, a1);
auto a2 = m.add_instruction(allocate{create_shape(1)});
auto m2 = m.add_instruction(simple_op{}, a2);
std::size_t axis = 0;
int axis = 0;
auto a3 = m.add_instruction(allocate{create_shape(2)});
m.add_instruction(concat(axis), m1, m2, a3);
return m;
......@@ -153,7 +153,7 @@ TEST_CASE(negative_axis1)
auto m1 = m.add_instruction(simple_op{}, a1);
auto a2 = m.add_instruction(allocate{create_shape(2, 2)});
auto m2 = m.add_instruction(simple_op{}, a2);
std::size_t axis = -1;
int axis = -1;
auto a3 = m.add_instruction(allocate{create_shape(4, 2)});
m.add_instruction(concat(axis), m1, m2, a3);
return m;
......@@ -176,7 +176,7 @@ TEST_CASE(negative_axis2)
auto m1 = m.add_instruction(simple_op{}, a1);
auto a2 = m.add_instruction(allocate{create_shape(2, 2)});
auto m2 = m.add_instruction(simple_op{}, a2);
std::size_t axis = -2;
int axis = -2;
auto a3 = m.add_instruction(allocate{create_shape(4, 2)});
m.add_instruction(concat(axis), m1, m2, a3);
return m;
......@@ -209,7 +209,7 @@ TEST_CASE(negative_axis3)
auto m1 = m.add_instruction(simple_op{}, a1);
auto a2 = m.add_instruction(allocate{create_shape(1, 2, 2)});
auto m2 = m.add_instruction(simple_op{}, a2);
std::size_t axis = -2;
int axis = -2;
auto a3 = m.add_instruction(allocate{create_shape(1, 4, 2)});
m.add_instruction(concat(axis), m1, m2, a3);
return m;
......@@ -242,7 +242,7 @@ TEST_CASE(reversed)
auto m1 = m.add_instruction(simple_op{}, a1);
auto a2 = m.add_instruction(allocate{create_shape(1)});
auto m2 = m.add_instruction(simple_op{}, a2);
std::size_t axis = 0;
int axis = 0;
auto a3 = m.add_instruction(allocate{create_shape(2)});
m.add_instruction(concat(axis), m2, m1, a3);
return m;
......@@ -273,7 +273,7 @@ TEST_CASE(nested)
auto m1 = m.add_instruction(simple_op{}, a1);
auto a2 = m.add_instruction(allocate{create_shape(1)});
auto m2 = m.add_instruction(simple_op{}, a2);
std::size_t axis = 0;
int axis = 0;
auto a3 = m.add_instruction(allocate{create_shape(2)});
return m.add_instruction(concat(axis), m1, m2, a3);
};
......@@ -281,7 +281,7 @@ TEST_CASE(nested)
migraphx::module m;
auto concat1 = concat_test_program(m);
auto concat2 = concat_test_program(m);
std::size_t axis = 0;
int axis = 0;
auto a1 = m.add_instruction(allocate{create_shape(4)});
m.add_instruction(concat(axis), concat1, concat2, a1);
return m;
......@@ -324,7 +324,7 @@ TEST_CASE(basic)
auto a3 =
m.add_instruction(allocate{migraphx::shape{migraphx::shape::float_type, {1, 5, 8, 8}}});
auto p3 = m.add_instruction(simple_op{}, a3);
std::size_t axis = 1;
int axis = 1;
auto a4 = m.add_instruction(
allocate{migraphx::shape{migraphx::shape::float_type, {1, 10, 8, 8}}});
m.add_instruction(concat(axis), m1, m2, p3, a4);
......@@ -367,7 +367,7 @@ TEST_CASE(wont_work)
auto a3 =
m.add_instruction(allocate{migraphx::shape{migraphx::shape::float_type, {2, 5, 8, 8}}});
auto p3 = m.add_instruction(simple_op{}, a3);
std::size_t axis = 1;
int axis = 1;
auto a4 = m.add_instruction(
allocate{migraphx::shape{migraphx::shape::float_type, {2, 10, 8, 8}}});
m.add_instruction(concat(axis), m1, m2, p3, a4);
......@@ -384,7 +384,7 @@ TEST_CASE(wont_work)
auto a3 =
m.add_instruction(allocate{migraphx::shape{migraphx::shape::float_type, {2, 5, 8, 8}}});
auto p3 = m.add_instruction(simple_op{}, a3);
std::size_t axis = 1;
int axis = 1;
auto a4 = m.add_instruction(
allocate{migraphx::shape{migraphx::shape::float_type, {2, 10, 8, 8}}});
m.add_instruction(concat(axis), m1, m2, p3, a4);
......
......@@ -17,9 +17,9 @@ void run_pass(migraphx::module& m)
}
migraphx::instruction_ref
create_im2col(migraphx::instruction_ref& l_img, size_t channels, migraphx::module& m)
create_im2col(migraphx::instruction_ref& l_img, int channels, migraphx::module& m)
{
size_t f[2] = {1, 1};
int f[2] = {1, 1};
std::vector<int32_t> weights(channels * f[0] * f[1]);
migraphx::shape s_weights{migraphx::shape::int32_type, {1, channels, f[0], f[1]}};
auto l_weights = m.add_literal(migraphx::literal{s_weights, weights});
......@@ -28,7 +28,7 @@ create_im2col(migraphx::instruction_ref& l_img, size_t channels, migraphx::modul
migraphx::instruction_ref
create_conv(migraphx::instruction_ref& l_img,
size_t channels,
int channels,
migraphx::module& m,
migraphx::op::padding_mode_t padding_mode = migraphx::op::padding_mode_t::default_)
{
......@@ -43,8 +43,8 @@ create_conv(migraphx::instruction_ref& l_img,
TEST_CASE(rewrite_pad)
{
migraphx::module m;
size_t img_dim[2] = {2, 2};
size_t channels = 1;
int img_dim[2] = {2, 2};
int channels = 1;
std::vector<int32_t> input(channels * img_dim[0] * img_dim[1]);
std::iota(input.begin(), input.end(), 0);
......@@ -69,9 +69,9 @@ TEST_CASE(rewrite_pad)
auto om1 = l1->get_operator().to_value();
auto om2 = l2->get_operator().to_value();
EXPECT(op0["padding"].to_vector<std::size_t>() == std::vector<std::size_t>{1, 1, 1, 1});
EXPECT(om1["padding"].to_vector<std::size_t>() == std::vector<std::size_t>{1, 1, 1, 1});
EXPECT(om2["padding"].to_vector<std::size_t>() == std::vector<std::size_t>{1, 1, 1, 1});
EXPECT(op0["padding"].to_vector<int>() == std::vector<int>{1, 1, 1, 1});
EXPECT(om1["padding"].to_vector<int>() == std::vector<int>{1, 1, 1, 1});
EXPECT(om2["padding"].to_vector<int>() == std::vector<int>{1, 1, 1, 1});
EXPECT(std::none_of(
m.begin(), m.end(), [](const migraphx::instruction& ins) { return ins.name() == "pad"; }));
......@@ -81,8 +81,8 @@ TEST_CASE(rewrite_pad_im2col_asymmetric)
{
migraphx::module m;
size_t img_dim[2] = {2, 2};
size_t channels = 1;
int img_dim[2] = {2, 2};
int channels = 1;
std::vector<int32_t> input(channels * img_dim[0] * img_dim[1]);
std::iota(input.begin(), input.end(), 0);
......@@ -98,7 +98,7 @@ TEST_CASE(rewrite_pad_im2col_asymmetric)
EXPECT(l0->get_shape() == s0);
auto op0 = l0->get_operator().to_value();
EXPECT(op0["padding"].to_vector<std::size_t>() == std::vector<std::size_t>{0, 0, 2, 2});
EXPECT(op0["padding"].to_vector<int>() == std::vector<int>{0, 0, 2, 2});
run_pass(m);
EXPECT(std::none_of(
......
......@@ -13,10 +13,10 @@ TEST_CASE(gpu_context)
EXPECT(v.size() == 2);
EXPECT(v.contains("events"));
EXPECT(v.at("events").without_key().to<std::size_t>() == 0);
EXPECT(v.at("events").without_key().to<int>() == 0);
EXPECT(v.contains("streams"));
EXPECT(v.at("streams").without_key().to<std::size_t>() == 3);
EXPECT(v.at("streams").without_key().to<int>() == 3);
migraphx::gpu::context g_ctx;
g_ctx.from_value(v);
......
......@@ -2,25 +2,25 @@
#include <migraphx/gpu/pack_args.hpp>
template <class T>
std::size_t packed_sizes()
int packed_sizes()
{
return sizeof(T);
}
template <class T, class U, class... Ts>
std::size_t packed_sizes()
int packed_sizes()
{
return sizeof(T) + packed_sizes<U, Ts...>();
}
template <class... Ts>
std::size_t sizes()
int sizes()
{
return migraphx::gpu::pack_args({Ts{}...}).size();
}
template <class... Ts>
std::size_t padding()
int padding()
{
EXPECT(sizes<Ts...>() >= packed_sizes<Ts...>());
return sizes<Ts...>() - packed_sizes<Ts...>();
......
......@@ -285,7 +285,7 @@ inline std::ostream& operator<<(std::ostream& os, const color& c)
#ifndef _WIN32
static const bool use_color = isatty(STDOUT_FILENO) != 0;
if(use_color)
return os << "\033[" << static_cast<std::size_t>(c) << "m";
return os << "\033[" << static_cast<int>(c) << "m";
#endif
return os;
}
......@@ -615,7 +615,7 @@ struct driver
[](const std::string& name) -> std::vector<std::string> { return {name}; };
std::vector<argument> arguments = {};
std::vector<std::string> failed = {};
std::size_t ran = 0;
int ran = 0;
bool quiet = false;
};
......
......@@ -16,9 +16,9 @@ void run_pass(migraphx::module& m)
}
migraphx::instruction_ref
create_im2col(migraphx::instruction_ref& l_img, size_t channels, migraphx::module& m)
create_im2col(migraphx::instruction_ref& l_img, int channels, migraphx::module& m)
{
size_t f[2] = {1, 1};
int f[2] = {1, 1};
std::vector<int32_t> weights(channels * f[0] * f[1]);
migraphx::shape s_weights{migraphx::shape::int32_type, {1, channels, f[0], f[1]}};
auto l_weights = m.add_literal(migraphx::literal{s_weights, weights});
......@@ -28,7 +28,7 @@ create_im2col(migraphx::instruction_ref& l_img, size_t channels, migraphx::modul
migraphx::instruction_ref
create_conv(migraphx::instruction_ref& l_img,
size_t channels,
int channels,
migraphx::module& m,
migraphx::op::padding_mode_t padding_mode = migraphx::op::padding_mode_t::default_)
{
......@@ -44,8 +44,8 @@ create_conv(migraphx::instruction_ref& l_img,
TEST_CASE(rewrite_pad)
{
migraphx::module m;
size_t img_dim[2] = {2, 2};
size_t channels = 1;
int img_dim[2] = {2, 2};
int channels = 1;
std::vector<int32_t> input(channels * img_dim[0] * img_dim[1]);
std::iota(input.begin(), input.end(), 0);
......@@ -68,8 +68,8 @@ TEST_CASE(rewrite_pad_symmetric)
{
migraphx::module m;
size_t img_dim[2] = {2, 2};
size_t channels = 1;
int img_dim[2] = {2, 2};
int channels = 1;
std::vector<int32_t> input(channels * img_dim[0] * img_dim[1]);
std::iota(input.begin(), input.end(), 0);
......
......@@ -46,15 +46,15 @@ bool no_allocate(const migraphx::module& m)
return std::none_of(m.begin(), m.end(), [](auto&& ins) { return ins.name() == "allocate"; });
}
bool is_overlap(std::pair<std::size_t, std::size_t> x, std::pair<std::size_t, std::size_t> y)
bool is_overlap(std::pair<int, int> x, std::pair<int, int> y)
{
return std::max(x.first, y.first) < std::min(x.second, y.second);
}
std::pair<std::size_t, std::size_t> get_load_interval(migraphx::instruction_ref a)
std::pair<int, int> get_load_interval(migraphx::instruction_ref a)
{
auto v = a->get_operator().to_value();
auto offset = v.at("offset").to<std::size_t>();
auto offset = v.at("offset").to<int>();
auto s = migraphx::from_value<migraphx::shape>(v.at("shape"));
return {offset, offset + s.bytes()};
}
......
......@@ -75,7 +75,7 @@ TEST_CASE(gather_test_1)
EXPECT(m1 == m2);
}
migraphx::module create_padded_op(const std::vector<size_t>& pad_vals)
migraphx::module create_padded_op(const std::vector<int>& pad_vals)
{
migraphx::module m;
migraphx::shape s{migraphx::shape::float_type, {2, 3, 4, 5}};
......
......@@ -33,11 +33,11 @@ migraphx::program optimize_onnx(const std::string& name, bool eliminate_deadcode
TEST_CASE(rnn_test_bidirectional)
{
std::size_t sl = 5; // sequence len
std::size_t bs = 3; // batch size
std::size_t hs = 20; // hidden size
std::size_t is = 10; // input size
std::size_t nd = 2; // num directions
int sl = 5; // sequence len
int bs = 3; // batch size
int hs = 20; // hidden size
int is = 10; // input size
int nd = 2; // num directions
float clip = 0.0f;
migraphx::shape seq_shape{migraphx::shape::float_type, {sl, bs, is}};
migraphx::shape w_shape{migraphx::shape::float_type, {nd, hs, is}};
......@@ -79,11 +79,11 @@ TEST_CASE(rnn_test_bidirectional)
TEST_CASE(rnn_test_one_direction)
{
std::size_t sl = 5; // sequence len
std::size_t bs = 3; // batch size
std::size_t hs = 20; // hidden size
std::size_t is = 10; // input size
std::size_t nd = 1; // num directions
int sl = 5; // sequence len
int bs = 3; // batch size
int hs = 20; // hidden size
int is = 10; // input size
int nd = 1; // num directions
float clip = 0.0f;
migraphx::shape seq_shape{migraphx::shape::float_type, {sl, bs, is}};
migraphx::shape w_shape{migraphx::shape::float_type, {nd, hs, is}};
......@@ -220,11 +220,11 @@ TEST_CASE(rnn_test_one_direction)
TEST_CASE(gru_test)
{
std::size_t sl = 5; // sequence len
std::size_t bs = 3; // batch size
std::size_t hs = 20; // hidden size
std::size_t is = 10; // input size
std::size_t nd = 2; // num directions
int sl = 5; // sequence len
int bs = 3; // batch size
int hs = 20; // hidden size
int is = 10; // input size
int nd = 2; // num directions
float clip = 0.0f;
// forward
{
......@@ -352,11 +352,11 @@ TEST_CASE(gru_test)
TEST_CASE(gru_test_args)
{
std::size_t sl = 5; // sequence len
std::size_t bs = 3; // batch size
std::size_t hs = 20; // hidden size
std::size_t is = 10; // input size
std::size_t nd = 2; // num directions
int sl = 5; // sequence len
int bs = 3; // batch size
int hs = 20; // hidden size
int is = 10; // input size
int nd = 2; // num directions
float clip = 0.0f;
// 3 arguments
......@@ -474,11 +474,11 @@ TEST_CASE(gru_test_args)
TEST_CASE(gru_test_actv_funcs)
{
std::size_t sl = 5; // sequence len
std::size_t bs = 3; // batch size
std::size_t hs = 20; // hidden size
std::size_t is = 10; // input size
std::size_t nd = 2; // num directions
int sl = 5; // sequence len
int bs = 3; // batch size
int hs = 20; // hidden size
int is = 10; // input size
int nd = 2; // num directions
float clip = 0.0f;
// bidirection, 0 actv function
{
......@@ -733,11 +733,11 @@ TEST_CASE(gru_test_actv_funcs)
TEST_CASE(lstm_forward)
{
std::size_t sl = 5; // sequence len
std::size_t bs = 3; // batch size
std::size_t hs = 20; // hidden size
std::size_t is = 10; // input size
std::size_t nd = 1; // num directions
int sl = 5; // sequence len
int bs = 3; // batch size
int hs = 20; // hidden size
int is = 10; // input size
int nd = 1; // num directions
float clip = 0.0f;
int input_forget = 1;
migraphx::shape seq_shape{migraphx::shape::float_type, {sl, bs, is}};
......@@ -1072,11 +1072,11 @@ TEST_CASE(lstm_forward)
// activation functions
TEST_CASE(lstm_forward_actv_func)
{
std::size_t sl = 5; // sequence len
std::size_t bs = 3; // batch size
std::size_t hs = 20; // hidden size
std::size_t is = 10; // input size
std::size_t nd = 1; // num directions
int sl = 5; // sequence len
int bs = 3; // batch size
int hs = 20; // hidden size
int is = 10; // input size
int nd = 1; // num directions
float clip = 0.0f;
int input_forget = 1;
migraphx::shape seq_shape{migraphx::shape::float_type, {sl, bs, is}};
......@@ -1196,11 +1196,11 @@ TEST_CASE(lstm_forward_actv_func)
TEST_CASE(lstm_reverse)
{
std::size_t sl = 5; // sequence len
std::size_t bs = 3; // batch size
std::size_t hs = 20; // hidden size
std::size_t is = 10; // input size
std::size_t nd = 1; // num directions
int sl = 5; // sequence len
int bs = 3; // batch size
int hs = 20; // hidden size
int is = 10; // input size
int nd = 1; // num directions
float clip = 0.0f;
int input_forget = 1;
migraphx::shape seq_shape{migraphx::shape::float_type, {sl, bs, is}};
......@@ -1321,11 +1321,11 @@ TEST_CASE(lstm_reverse)
TEST_CASE(lstm_bidirectional)
{
std::size_t sl = 5; // sequence len
std::size_t bs = 3; // batch size
std::size_t hs = 20; // hidden size
std::size_t is = 10; // input size
std::size_t nd = 2; // num directions
int sl = 5; // sequence len
int bs = 3; // batch size
int hs = 20; // hidden size
int is = 10; // input size
int nd = 2; // num directions
float clip = 0.0f;
int input_forget = 1;
migraphx::shape seq_shape{migraphx::shape::float_type, {sl, bs, is}};
......@@ -1573,11 +1573,11 @@ TEST_CASE(lstm_bidirectional)
TEST_CASE(lstm_bi_actv_funcs)
{
std::size_t sl = 5; // sequence len
std::size_t bs = 3; // batch size
std::size_t hs = 20; // hidden size
std::size_t is = 10; // input size
std::size_t nd = 2; // num directions
int sl = 5; // sequence len
int bs = 3; // batch size
int hs = 20; // hidden size
int is = 10; // input size
int nd = 2; // num directions
float clip = 0.0f;
int input_forget = 1;
migraphx::shape seq_shape{migraphx::shape::float_type, {sl, bs, is}};
......
......@@ -499,7 +499,7 @@ TEST_CASE(constant_fill_input_as_shape_test)
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_literal(migraphx::literal{{migraphx::shape::int32_type, {2}}, {2, 3}});
std::vector<std::size_t> dims(l0->get_shape().elements());
std::vector<int> dims(l0->get_shape().elements());
migraphx::literal ls = l0->get_literal();
ls.visit([&](auto s) { dims.assign(s.begin(), s.end()); });
migraphx::shape s{migraphx::shape::float_type, dims};
......@@ -997,7 +997,7 @@ TEST_CASE(spacetodepth_simple_test)
TEST_CASE(spacetodepth_invalid_blocksize)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("spacetodepth_invalid_blocksize_test.onnx"); }));
EXPECT(test::throws([&] { migraphx::parse_onnx("spacetodepth_invalid_blockintest.onnx"); }));
}
TEST_CASE(spacetodepth_nondivisibility_test)
......@@ -1053,7 +1053,7 @@ TEST_CASE(dequantizelinear_zero_point_test)
migraphx::program make_dequantizelinear_axis_prog()
{
migraphx::program p;
std::vector<size_t> input_lens{1, 1, 5, 1};
std::vector<int> input_lens{1, 1, 5, 1};
int axis = 2;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("0", {migraphx::shape::int8_type, input_lens});
......@@ -1426,7 +1426,7 @@ TEST_CASE(gemm_ex_brcst_test)
auto l0 = mm->add_parameter("1", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 6}});
auto l1 = mm->add_parameter("2", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 7}});
auto l2 = mm->add_parameter("3", migraphx::shape{migraphx::shape::float_type, {1, 1, 6, 1}});
std::vector<std::size_t> out_lens{1, 1, 6, 7};
std::vector<int> out_lens{1, 1, 6, 7};
auto alpha = 0.5f;
auto beta = 0.8f;
auto a_l = mm->add_literal(alpha);
......@@ -1459,7 +1459,7 @@ TEST_CASE(gemm_half_test)
t_a = mm->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::half_type}}), t_a);
t_a = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", {0, 1, 3, 2}}}), t_a);
std::vector<std::size_t> lens = {1, 1, 6, 7};
std::vector<int> lens = {1, 1, 6, 7};
auto dot = migraphx::add_apply_alpha_beta(*mm, {t_a, l1}, migraphx::make_op("dot"), 1.0f, 0.0f);
l2 = mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", lens}}), l2);
l2 = mm->add_instruction(
......@@ -1585,7 +1585,7 @@ TEST_CASE(hardsigmoid_default_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens{1, 3, 4, 5};
std::vector<int> input_lens{1, 3, 4, 5};
auto input_type = migraphx::shape::float_type;
migraphx::shape s{input_type, input_lens};
auto x = mm->add_parameter("x", s);
......@@ -1618,7 +1618,7 @@ TEST_CASE(hardsigmoid_double_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens{1, 3, 4, 5};
std::vector<int> input_lens{1, 3, 4, 5};
auto input_type = migraphx::shape::double_type;
migraphx::shape s{input_type, input_lens};
auto x = mm->add_parameter("x", s);
......@@ -1651,7 +1651,7 @@ TEST_CASE(hardsigmoid_half_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens{1, 3, 4, 5};
std::vector<int> input_lens{1, 3, 4, 5};
auto input_type = migraphx::shape::half_type;
migraphx::shape s{input_type, input_lens};
auto x = mm->add_parameter("x", s);
......@@ -1684,7 +1684,7 @@ TEST_CASE(hardswish_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens{2, 5};
std::vector<int> input_lens{2, 5};
auto input_type = migraphx::shape::float_type;
migraphx::shape s{input_type, input_lens};
auto x = mm->add_parameter("x", s);
......@@ -2051,7 +2051,7 @@ TEST_CASE(initializer_not_an_input)
TEST_CASE(instance_norm_test)
{
std::vector<size_t> dims{1, 2, 3, 3};
std::vector<int> dims{1, 2, 3, 3};
migraphx::shape s1{migraphx::shape::float_type, dims};
migraphx::shape s2{migraphx::shape::float_type, {2}};
......@@ -2511,7 +2511,7 @@ TEST_CASE(mean_single_input_test)
TEST_CASE(mean_test)
{
const std::size_t num_data = 3;
const int num_data = 3;
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::half_type, {1, 2, 3}};
......@@ -2553,7 +2553,7 @@ TEST_CASE(multinomial_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
size_t sample_size = 10;
int sample_size = 10;
float seed = 0.0f;
auto input = mm->add_parameter("input", migraphx::shape{migraphx::shape::float_type, {1, 10}});
......@@ -2596,7 +2596,7 @@ TEST_CASE(multinomial_int64_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
size_t sample_size = 10;
int sample_size = 10;
float seed = 1.0f;
migraphx::shape::type_t dtype = migraphx::shape::type_t::int64_type;
......@@ -3019,7 +3019,7 @@ TEST_CASE(quantizelinear_zero_point_test)
migraphx::program make_quantizelinear_axis_prog()
{
migraphx::program p;
std::vector<size_t> input_lens{1, 1, 5, 1};
std::vector<int> input_lens{1, 1, 5, 1};
int axis = 2;
auto* mm = p.get_main_module();
......@@ -3586,7 +3586,7 @@ TEST_CASE(resize_downsample_linear_test)
EXPECT(p == prog);
}
TEST_CASE(resize_outsize_test)
TEST_CASE(resize_outintest)
{
migraphx::program p;
auto* mm = p.get_main_module();
......@@ -3608,7 +3608,7 @@ TEST_CASE(resize_outsize_test)
auto r = mm->add_instruction(migraphx::make_op("gather", {{"axis", 0}}), lrsp, li);
mm->add_return({r});
auto prog = migraphx::parse_onnx("resize_outsize_test.onnx");
auto prog = migraphx::parse_onnx("resize_outintest.onnx");
EXPECT(p == prog);
}
......@@ -3974,7 +3974,7 @@ TEST_CASE(selu_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> lens = {2, 3};
std::vector<int> lens = {2, 3};
migraphx::shape s{migraphx::shape::double_type, lens};
auto x = mm->add_parameter("x", s);
......@@ -4203,7 +4203,7 @@ TEST_CASE(softplus_test)
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens{5};
std::vector<int> input_lens{5};
auto input_type = migraphx::shape::float_type;
auto x = mm->add_parameter("x", migraphx::shape{input_type, input_lens});
......@@ -4223,7 +4223,7 @@ TEST_CASE(softplus_nd_test)
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens{3, 4, 5};
std::vector<int> input_lens{3, 4, 5};
auto input_type = migraphx::shape::half_type;
auto x = mm->add_parameter("x", migraphx::shape{input_type, input_lens});
......@@ -4243,7 +4243,7 @@ TEST_CASE(softsign_test)
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens{5};
std::vector<int> input_lens{5};
auto input_type = migraphx::shape::float_type;
auto x = mm->add_parameter("x", migraphx::shape{input_type, input_lens});
......@@ -4263,7 +4263,7 @@ TEST_CASE(softsign_nd_test)
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<std::size_t> input_lens{3, 4, 5};
std::vector<int> input_lens{3, 4, 5};
auto input_type = migraphx::shape::half_type;
auto x = mm->add_parameter("x", migraphx::shape{input_type, input_lens});
......
......@@ -439,7 +439,7 @@ TEST_CASE(mean_test)
});
migraphx::parameter_map pp;
for(std::size_t i = 0; i < num_data; ++i)
for(int i = 0; i < num_data; ++i)
pp[std::to_string(i)] = migraphx::argument(s, data[i].data());
auto result = p.eval(pp).back();
......
......@@ -60,7 +60,7 @@ void throws_shape(const migraphx::shape&, Ts...)
TEST_CASE(batch_norm_inference_shape)
{
const size_t channels = 3;
const int channels = 3;
migraphx::shape s{migraphx::shape::float_type, {4, channels, 3, 3}};
migraphx::shape vars{migraphx::shape::float_type, {channels}};
expect_shape(s, migraphx::make_op("batch_norm_inference"), s, vars, vars, vars, vars);
......@@ -71,7 +71,7 @@ TEST_CASE(batch_norm_inference_shape)
TEST_CASE(broadcast)
{
{
std::vector<std::size_t> lens{1, 1};
std::vector<int> lens{1, 1};
migraphx::shape input{migraphx::shape::float_type, {1}, {0}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {1, 1}, {0, 0}},
migraphx::make_op("broadcast", {{"axis", 0}, {"out_lens", lens}}),
......@@ -79,19 +79,19 @@ TEST_CASE(broadcast)
}
{
std::vector<std::size_t> lens{1, 1};
std::vector<int> lens{1, 1};
migraphx::shape input{migraphx::shape::float_type, {2}};
throws_shape(migraphx::op::broadcast{1, lens}, input);
}
{
std::vector<std::size_t> lens{2, 2};
std::vector<int> lens{2, 2};
migraphx::shape input{migraphx::shape::float_type, {1, 2}};
throws_shape(migraphx::op::broadcast{1, lens}, input);
}
{
std::vector<std::size_t> lens{3, 2, 4, 3};
std::vector<int> lens{3, 2, 4, 3};
migraphx::shape input{migraphx::shape::float_type, {4, 3}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {3, 2, 4, 3}, {0, 0, 3, 1}},
migraphx::make_op("broadcast", {{"axis", 2}, {"out_lens", lens}}),
......@@ -99,7 +99,7 @@ TEST_CASE(broadcast)
}
{
std::vector<std::size_t> lens{3, 2, 4, 3};
std::vector<int> lens{3, 2, 4, 3};
migraphx::shape input{migraphx::shape::float_type, {4, 4}};
throws_shape(migraphx::make_op("broadcast", {{"axis", 2}, {"out_lens", lens}}), input);
}
......@@ -366,11 +366,11 @@ TEST_CASE(get_tuple_elem_test)
TEST_CASE(gru)
{
{
std::size_t batch_size = 2;
std::size_t seq_len = 2;
std::size_t hidden_size = 4;
std::size_t input_size = 3;
std::size_t num_dirct = 1;
int batch_size = 2;
int seq_len = 2;
int hidden_size = 4;
int input_size = 3;
int num_dirct = 1;
float clip = 0.0f;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
......@@ -399,11 +399,11 @@ TEST_CASE(gru)
}
{
std::size_t batch_size = 2;
std::size_t seq_len = 2;
std::size_t hidden_size = 4;
std::size_t input_size = 3;
std::size_t num_dirct = 1;
int batch_size = 2;
int seq_len = 2;
int hidden_size = 4;
int input_size = 3;
int num_dirct = 1;
float clip = 0.0f;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
......@@ -432,11 +432,11 @@ TEST_CASE(gru)
}
{
std::size_t batch_size = 2;
std::size_t seq_len = 2;
std::size_t hidden_size = 4;
std::size_t input_size = 3;
std::size_t num_dirct = 2;
int batch_size = 2;
int seq_len = 2;
int hidden_size = 4;
int input_size = 3;
int num_dirct = 2;
float clip = 0.0f;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
......@@ -465,11 +465,11 @@ TEST_CASE(gru)
}
{
std::size_t batch_size = 2;
std::size_t seq_len = 2;
std::size_t hidden_size = 4;
std::size_t input_size = 3;
std::size_t num_dirct = 1;
int batch_size = 2;
int seq_len = 2;
int hidden_size = 4;
int input_size = 3;
int num_dirct = 1;
float clip = 0.0f;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
......@@ -496,11 +496,11 @@ TEST_CASE(gru)
}
{
std::size_t batch_size = 2;
std::size_t seq_len = 2;
std::size_t hidden_size = 4;
std::size_t input_size = 3;
std::size_t num_dirct = 1;
int batch_size = 2;
int seq_len = 2;
int hidden_size = 4;
int input_size = 3;
int num_dirct = 1;
float clip = 0.0f;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
......@@ -527,11 +527,11 @@ TEST_CASE(gru)
}
{
std::size_t batch_size = 2;
std::size_t seq_len = 2;
std::size_t hidden_size = 4;
std::size_t input_size = 3;
std::size_t num_dirct = 2;
int batch_size = 2;
int seq_len = 2;
int hidden_size = 4;
int input_size = 3;
int num_dirct = 2;
float clip = 0.0f;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
......@@ -610,11 +610,11 @@ TEST_CASE(logsoftmax) { test_softmax_variations<migraphx::op::logsoftmax>(); }
TEST_CASE(lstm)
{
{
std::size_t batch_size = 2;
std::size_t seq_len = 2;
std::size_t hidden_size = 4;
std::size_t input_size = 3;
std::size_t num_dirct = 1;
int batch_size = 2;
int seq_len = 2;
int hidden_size = 4;
int input_size = 3;
int num_dirct = 1;
float clip = 0.0f;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
......@@ -639,11 +639,11 @@ TEST_CASE(lstm)
}
{
std::size_t batch_size = 2;
std::size_t seq_len = 2;
std::size_t hidden_size = 4;
std::size_t input_size = 3;
std::size_t num_dirct = 1;
int batch_size = 2;
int seq_len = 2;
int hidden_size = 4;
int input_size = 3;
int num_dirct = 1;
float clip = 0.0f;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
......@@ -672,11 +672,11 @@ TEST_CASE(lstm)
}
{
std::size_t batch_size = 2;
std::size_t seq_len = 2;
std::size_t hidden_size = 4;
std::size_t input_size = 3;
std::size_t num_dirct = 2;
int batch_size = 2;
int seq_len = 2;
int hidden_size = 4;
int input_size = 3;
int num_dirct = 2;
float clip = 0.0f;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
......@@ -705,11 +705,11 @@ TEST_CASE(lstm)
}
{
std::size_t batch_size = 2;
std::size_t seq_len = 2;
std::size_t hidden_size = 4;
std::size_t input_size = 3;
std::size_t num_dirct = 1;
int batch_size = 2;
int seq_len = 2;
int hidden_size = 4;
int input_size = 3;
int num_dirct = 1;
float clip = 0.0f;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
......@@ -736,11 +736,11 @@ TEST_CASE(lstm)
}
{
std::size_t batch_size = 2;
std::size_t seq_len = 2;
std::size_t hidden_size = 4;
std::size_t input_size = 3;
std::size_t num_dirct = 1;
int batch_size = 2;
int seq_len = 2;
int hidden_size = 4;
int input_size = 3;
int num_dirct = 1;
float clip = 0.0f;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
......@@ -767,11 +767,11 @@ TEST_CASE(lstm)
}
{
std::size_t batch_size = 2;
std::size_t seq_len = 2;
std::size_t hidden_size = 4;
std::size_t input_size = 3;
std::size_t num_dirct = 2;
int batch_size = 2;
int seq_len = 2;
int hidden_size = 4;
int input_size = 3;
int num_dirct = 2;
float clip = 0.0f;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
......@@ -901,71 +901,71 @@ TEST_CASE(matmul)
TEST_CASE(multibroadcast)
{
{
std::vector<std::size_t> lens{4, 2, 5, 3};
std::vector<int> lens{4, 2, 5, 3};
migraphx::shape input{migraphx::shape::float_type, {2, 1, 3}};
expect_shape(migraphx::shape{migraphx::shape::float_type, lens, {0, 3, 0, 1}},
migraphx::make_op("multibroadcast", {{"out_lens", lens}}),
input);
}
{
std::vector<std::size_t> lens{4, 2, 5, 3};
std::vector<int> lens{4, 2, 5, 3};
migraphx::shape input{migraphx::shape::float_type, {2, 1, 1}};
expect_shape(migraphx::shape{migraphx::shape::float_type, lens, {0, 1, 0, 0}},
migraphx::make_op("multibroadcast", {{"out_lens", lens}}),
input);
}
{
std::vector<std::size_t> lens{4, 2, 5, 3};
std::vector<int> lens{4, 2, 5, 3};
migraphx::shape input{migraphx::shape::float_type, {5, 1}};
expect_shape(migraphx::shape{migraphx::shape::float_type, lens, {0, 0, 1, 0}},
migraphx::make_op("multibroadcast", {{"out_lens", lens}}),
input);
}
{
std::vector<std::size_t> lens{4, 2, 5, 3};
std::vector<int> lens{4, 2, 5, 3};
migraphx::shape input{migraphx::shape::float_type, {4, 1, 1, 1}};
expect_shape(migraphx::shape{migraphx::shape::float_type, lens, {1, 0, 0, 0}},
migraphx::make_op("multibroadcast", {{"out_lens", lens}}),
input);
}
{
std::vector<std::size_t> lens{4, 2, 5, 3};
std::vector<int> lens{4, 2, 5, 3};
migraphx::shape input{migraphx::shape::float_type, {3}};
expect_shape(migraphx::shape{migraphx::shape::float_type, lens, {0, 0, 0, 1}},
migraphx::make_op("multibroadcast", {{"out_lens", lens}}),
input);
}
{
std::vector<std::size_t> lens{4, 4, 1, 3};
std::vector<int> lens{4, 4, 1, 3};
migraphx::shape input{migraphx::shape::float_type, {4, 1, 3}};
expect_shape(migraphx::shape{migraphx::shape::float_type, lens, {0, 3, 3, 1}},
migraphx::make_op("multibroadcast", {{"out_lens", lens}}),
input);
}
{
std::vector<std::size_t> lens{4, 1, 1, 3};
std::vector<int> lens{4, 1, 1, 3};
migraphx::shape input{migraphx::shape::float_type, {4, 1, 1, 1}};
expect_shape(migraphx::shape{migraphx::shape::float_type, lens, {1, 1, 1, 0}},
migraphx::make_op("multibroadcast", {{"out_lens", lens}}),
input);
}
{
std::vector<std::size_t> lens{4, 1, 3};
std::vector<int> lens{4, 1, 3};
migraphx::shape input{migraphx::shape::float_type, {4, 1, 1, 1}};
throws_shape(migraphx::make_op("multibroadcast", {{"out_lens", lens}}), input);
}
{
std::vector<std::size_t> lens{4, 1, 3};
std::vector<int> lens{4, 1, 3};
migraphx::shape input{migraphx::shape::float_type, {}};
throws_shape(migraphx::make_op("multibroadcast", {{"out_lens", lens}}), input);
}
{
std::vector<std::size_t> lens{2, 3, 4, 5};
std::vector<int> lens{2, 3, 4, 5};
migraphx::shape input{migraphx::shape::float_type, {3, 4}};
throws_shape(migraphx::make_op("multibroadcast", {{"out_lens", lens}}), input);
}
{
std::vector<std::size_t> lens{2, 3, 4, 5};
std::vector<int> lens{2, 3, 4, 5};
migraphx::shape input{migraphx::shape::float_type, {2, 3, 4}};
throws_shape(migraphx::make_op("multibroadcast", {{"out_lens", lens}}), input);
}
......@@ -1118,7 +1118,7 @@ TEST_CASE(reshape_shape)
for(auto&& new_shape :
std::vector<std::vector<int64_t>>{{8, 3, 1, 1}, {1, 3, 4, 2}, {1, 3, 4, 2}})
{
std::vector<std::size_t> lens(new_shape.size());
std::vector<int> lens(new_shape.size());
std::copy(new_shape.begin(), new_shape.end(), lens.begin());
migraphx::shape output{migraphx::shape::float_type, lens};
expect_shape(output, migraphx::make_op("reshape", {{"dims", new_shape}}), input);
......@@ -1150,11 +1150,11 @@ TEST_CASE(reshape_shape)
TEST_CASE(rnn)
{
{
std::size_t batch_size = 2;
std::size_t seq_len = 2;
std::size_t hidden_size = 4;
std::size_t input_size = 3;
std::size_t num_dirct = 1;
int batch_size = 2;
int seq_len = 2;
int hidden_size = 4;
int input_size = 3;
int num_dirct = 1;
float clip = 0.0f;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
......@@ -1181,11 +1181,11 @@ TEST_CASE(rnn)
}
{
std::size_t batch_size = 2;
std::size_t seq_len = 2;
std::size_t hidden_size = 4;
std::size_t input_size = 3;
std::size_t num_dirct = 1;
int batch_size = 2;
int seq_len = 2;
int hidden_size = 4;
int input_size = 3;
int num_dirct = 1;
float clip = 0.0f;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
......@@ -1212,11 +1212,11 @@ TEST_CASE(rnn)
}
{
std::size_t batch_size = 2;
std::size_t seq_len = 2;
std::size_t hidden_size = 4;
std::size_t input_size = 3;
std::size_t num_dirct = 2;
int batch_size = 2;
int seq_len = 2;
int hidden_size = 4;
int input_size = 3;
int num_dirct = 2;
float clip = 0.0f;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
......@@ -1243,11 +1243,11 @@ TEST_CASE(rnn)
}
{
std::size_t batch_size = 2;
std::size_t seq_len = 2;
std::size_t hidden_size = 4;
std::size_t input_size = 3;
std::size_t num_dirct = 1;
int batch_size = 2;
int seq_len = 2;
int hidden_size = 4;
int input_size = 3;
int num_dirct = 1;
float clip = 0.0f;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
......@@ -1272,11 +1272,11 @@ TEST_CASE(rnn)
}
{
std::size_t batch_size = 2;
std::size_t seq_len = 2;
std::size_t hidden_size = 4;
std::size_t input_size = 3;
std::size_t num_dirct = 1;
int batch_size = 2;
int seq_len = 2;
int hidden_size = 4;
int input_size = 3;
int num_dirct = 1;
float clip = 0.0f;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
......@@ -1301,11 +1301,11 @@ TEST_CASE(rnn)
}
{
std::size_t batch_size = 2;
std::size_t seq_len = 2;
std::size_t hidden_size = 4;
std::size_t input_size = 3;
std::size_t num_dirct = 2;
int batch_size = 2;
int seq_len = 2;
int hidden_size = 4;
int input_size = 3;
int num_dirct = 2;
float clip = 0.0f;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment