"src/targets/vscode:/vscode.git/clone" did not exist on "fd911fabdfb17110ca83ccd66211165ebb74e7c2"
Commit 94e3a2e4 authored by Shucai Xiao's avatar Shucai Xiao
Browse files

change size_t to int

parent 26bd92d8
......@@ -20,7 +20,7 @@ static void inline_submodule(module& m, instruction_ref ins, bool cond)
{
auto val = out->get_operator().to_value();
assert(val.contains("index"));
auto index = val.at("index").to<std::size_t>();
auto index = val.at("index").to<int>();
m.replace_instruction(out, mod_outputs.at(index));
}
}
......
......@@ -133,7 +133,7 @@ std::string to_json_string(const value& val)
return j.dump();
}
migraphx::value from_json_string(const char* str, std::size_t size)
migraphx::value from_json_string(const char* str, int size)
{
json j = json::parse(str, str + size);
return j.get<value>();
......
......@@ -16,7 +16,7 @@ program load_buffer(const std::vector<char>& buffer, const file_options& options
{
return load_buffer(buffer.data(), buffer.size(), options);
}
program load_buffer(const char* buffer, std::size_t size, const file_options& options)
program load_buffer(const char* buffer, int size, const file_options& options)
{
program p;
if(options.format == "msgpack")
......
......@@ -476,7 +476,7 @@ std::unordered_map<std::string, shape> module::get_parameter_shapes() const
bool module::has_instruction(instruction_ref ins) const { return impl->contains(ins); }
std::size_t module::size() const { return impl->instructions.size(); }
int module::size() const { return impl->instructions.size(); }
instruction_ref module::begin() const { return impl->instructions.begin(); }
instruction_ref module::end() const { return impl->instructions.end(); }
......
......@@ -47,7 +47,7 @@ MSGPACK_API_VERSION_NAMESPACE(MSGPACK_DEFAULT_API_NS)
}
case msgpack::type::BIN:
{
v = migraphx::value::binary{o.via.bin.ptr, o.via.bin.size};
v = migraphx::value::binary{o.via.bin.ptr, static_cast<int>(o.via.bin.size)};
break;
}
case msgpack::type::ARRAY:
......@@ -150,7 +150,7 @@ inline namespace MIGRAPHX_INLINE_NS {
struct vector_stream
{
std::vector<char> buffer{};
vector_stream& write(const char* b, std::size_t n)
vector_stream& write(const char* b, int n)
{
buffer.insert(buffer.end(), b, b + n);
return *this;
......@@ -163,7 +163,7 @@ std::vector<char> to_msgpack(const value& v)
msgpack::pack(vs, v);
return vs.buffer;
}
value from_msgpack(const char* buffer, std::size_t size)
value from_msgpack(const char* buffer, int size)
{
msgpack::object_handle oh = msgpack::unpack(buffer, size);
return oh.get().as<value>();
......
......@@ -120,8 +120,8 @@ auto tune_attribute(const std::vector<int64_t>& vec,
auto tune_pad_attribute(const value& val)
{
std::vector<size_t> vec_attrs = val.to_vector<size_t>();
std::vector<size_t> result(vec_attrs.begin(), vec_attrs.end());
std::vector<int> vec_attrs = val.to_vector<int>();
std::vector<int> result(vec_attrs.begin(), vec_attrs.end());
std::copy(vec_attrs.begin(), vec_attrs.end(), std::back_inserter(result));
return result;
......
......@@ -13,7 +13,7 @@ void check_arg_empty(const argument& arg, const std::string& msg)
}
}
void check_attr_sizes(size_t kdims, size_t attr_size, const std::string& error_msg)
void check_attr_sizes(int kdims, int attr_size, const std::string& error_msg)
{
if(kdims != attr_size)
{
......
......@@ -5,7 +5,7 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
void recalc_conv_attributes(value& v, size_t kdims)
void recalc_conv_attributes(value& v, int kdims)
{
if(not(v["padding"].size() == kdims or v["padding"].size() == kdims * 2))
{
......
......@@ -10,7 +10,7 @@ inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
void check_arg_empty(const argument& arg, const std::string& msg);
void check_attr_sizes(size_t kdims, size_t attr_size, const std::string& error_msg);
void check_attr_sizes(int kdims, int attr_size, const std::string& error_msg);
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
......
......@@ -8,7 +8,7 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
void recalc_conv_attributes(value& v, size_t kdims);
void recalc_conv_attributes(value& v, int kdims);
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
......
......@@ -62,7 +62,7 @@ struct parse_constant_fill : op_parser<parse_constant_fill>
migraphx::argument in = args[0]->eval();
check_arg_empty(in, "ConstantFill: dynamic shape is not supported");
std::vector<std::size_t> dims;
std::vector<int> dims;
in.visit([&](auto input) { dims.assign(input.begin(), input.end()); });
migraphx::shape s(type, dims);
std::vector<float> values(s.elements(), value);
......@@ -76,7 +76,7 @@ struct parse_constant_fill : op_parser<parse_constant_fill>
}
literal ls = parser.parse_value(info.attributes.at("shape"));
std::vector<std::size_t> dims;
std::vector<int> dims;
ls.visit([&](auto s) { dims.assign(s.begin(), s.end()); });
migraphx::shape s{type, dims};
std::vector<float> values(s.elements(), value);
......
......@@ -52,7 +52,7 @@ struct parse_constant_of_shape : op_parser<parse_constant_of_shape>
migraphx::argument in = args[0]->eval();
check_arg_empty(in, "ConstantOfShape: dynamic shape is not supported");
std::vector<std::size_t> dims;
std::vector<int> dims;
in.visit([&](auto input) { dims.assign(input.begin(), input.end()); });
s = migraphx::shape{type, dims};
}
......
......@@ -49,7 +49,7 @@ struct parse_deconvolution : op_parser<parse_deconvolution>
if(not asym_padding)
{
size_t pad_ndims = padding.size() / 2;
int pad_ndims = padding.size() / 2;
check_attr_sizes(kdims, pad_ndims, "PARSE_CONV_TRANSPOSE: inconsistent paddings");
values["padding"].clear();
std::transform(padding.begin(),
......@@ -119,7 +119,7 @@ struct parse_deconvolution : op_parser<parse_deconvolution>
if(contains(info.attributes, "output_padding"))
{
size_t non_kdims = dims.size() * 2 - kdims;
int non_kdims = dims.size() * 2 - kdims;
std::vector<int64_t> output_padding(non_kdims, 0);
copy(info.attributes["output_padding"].ints(), std::back_inserter(output_padding));
check_attr_sizes(kdims,
......
......@@ -44,7 +44,7 @@ struct parse_gather_elements : op_parser<parse_gather_elements>
// to the gather operator
arg_data = info.add_instruction(make_op("reshape", {{"dims", {data_elem_num}}}), arg_data);
std::size_t elem_num = ind_s.elements();
int elem_num = ind_s.elements();
std::vector<int> ind_index(elem_num);
std::iota(ind_index.begin(), ind_index.end(), 0);
......
......@@ -20,11 +20,11 @@ struct parse_gru : op_parser<parse_gru>
std::vector<instruction_ref> args) const
{
migraphx::shape input_shape = args[0]->get_shape();
std::size_t hidden_size = args[2]->get_shape().lens()[2];
int hidden_size = args[2]->get_shape().lens()[2];
if(contains(info.attributes, "hidden_size"))
{
std::size_t hidden_size_att =
int hidden_size_att =
parser.parse_value(info.attributes.at("hidden_size")).at<int>();
if(hidden_size != hidden_size_att)
{
......
......@@ -55,7 +55,7 @@ struct parse_if : op_parser<parse_if>
const auto& vec_shapes = out_s.sub_shapes();
std::vector<instruction_ref> out_inss;
for(std::size_t i = 0; i < vec_shapes.size(); ++i)
for(int i = 0; i < vec_shapes.size(); ++i)
{
auto ret = info.add_instruction(make_op("get_tuple_elem", {{"index", i}}), if_ret);
out_inss.push_back(ret);
......
......@@ -57,7 +57,7 @@ struct parse_loop : op_parser<parse_loop>
const auto& vec_shapes = out_s.sub_shapes();
std::vector<instruction_ref> out_inss;
for(std::size_t i = 0; i < vec_shapes.size(); ++i)
for(int i = 0; i < vec_shapes.size(); ++i)
{
auto r = info.add_instruction(make_op("get_tuple_elem", {{"index", i}}), ret);
out_inss.push_back(r);
......
......@@ -103,11 +103,11 @@ struct parse_lstm : op_parser<parse_lstm>
std::vector<instruction_ref> args) const
{
migraphx::shape input_shape = args[0]->get_shape();
std::size_t hidden_size = args[2]->get_shape().lens()[2];
int hidden_size = args[2]->get_shape().lens()[2];
if(contains(info.attributes, "hidden_size"))
{
std::size_t hidden_size_att =
int hidden_size_att =
parser.parse_value(info.attributes.at("hidden_size")).at<int>();
if(hidden_size != hidden_size_att)
{
......
......@@ -15,7 +15,7 @@ instruction_ref parse_prefix_scan_oper(const std::string& op_name,
{
migraphx::argument in = args[1]->eval();
check_arg_empty(in, "PARSE_PREFIX_SCAN: axis - dynamic shape not supported");
std::vector<std::size_t> axis_in;
std::vector<int> axis_in;
in.visit([&](auto input) { axis_in.assign(input.begin(), input.end()); });
int64_t axis = axis_in[0];
......
......@@ -34,7 +34,7 @@ struct parse_range : op_parser<parse_range>
auto limit_val = limit.front();
auto delta_val = delta.front();
size_t num_elements = static_cast<size_t>(
int num_elements = static_cast<int>(
ceil(static_cast<double>(limit_val - start_val) / static_cast<double>(delta_val)));
assert(num_elements > 0);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment