Commit 94e3a2e4 authored by Shucai Xiao's avatar Shucai Xiao
Browse files

change size_t to int

parent 26bd92d8
......@@ -18,7 +18,7 @@ namespace op {
struct multibroadcast
{
std::vector<std::size_t> output_lens;
std::vector<int> output_lens;
template <class Self, class F>
static auto reflect(Self& self, F f)
......@@ -55,7 +55,7 @@ struct multibroadcast
}
}
std::vector<size_t> bcast_strides(output_lens.size(), 0);
std::vector<int> bcast_strides(output_lens.size(), 0);
for(std::ptrdiff_t i = input.lens().size() - 1; i >= 0; i--)
{
if(output_lens[i + offset] == input.lens()[i])
......
......@@ -60,7 +60,7 @@ struct nonmaxsuppression
std::sort(y.begin(), y.end());
}
std::array<float, 2>& operator[](std::size_t i) { return i == 0 ? x : y; }
std::array<float, 2>& operator[](int i) { return i == 0 ? x : y; }
float area() const
{
......@@ -71,7 +71,7 @@ struct nonmaxsuppression
};
template <class T>
box batch_box(const T* boxes, std::size_t bidx) const
box batch_box(const T* boxes, int bidx) const
{
box result{};
const T* start = boxes + 4 * bidx;
......@@ -134,13 +134,13 @@ struct nonmaxsuppression
result.visit([&](auto out) { std::fill(out.begin(), out.end(), 0); });
std::size_t max_output_boxes_per_class = 0;
int max_output_boxes_per_class = 0;
float iou_threshold = 0.0f;
float score_threshold = 0.0f;
if(args.size() > 2)
{
max_output_boxes_per_class = args.at(2).at<std::size_t>();
max_output_boxes_per_class = args.at(2).at<int>();
}
// max_output_boxes_per_class is 0, no output
if(max_output_boxes_per_class == 0)
......@@ -174,7 +174,7 @@ struct nonmaxsuppression
auto bidx = idx[0];
auto cidx = idx[1];
std::size_t score_offset = (bidx * class_num + cidx) * box_num;
int score_offset = (bidx * class_num + cidx) * box_num;
const float* batch_boxes = boxes + bidx * box_num * 4;
std::priority_queue<std::pair<float, int64_t>> sorted_boxes;
auto insert_to_sorted_boxes =
......
......@@ -38,10 +38,10 @@ struct pad
{
check_shapes{inputs, *this}.has(1);
auto&& idims = inputs.front().lens();
std::vector<std::size_t> rdims(idims.begin(), idims.end());
std::size_t num_dims = rdims.size();
std::vector<int> rdims(idims.begin(), idims.end());
int num_dims = rdims.size();
for(std::size_t i = 0; i < num_dims; i++)
for(int i = 0; i < num_dims; i++)
{
rdims[i] += pads[i] + pads[i + num_dims];
}
......@@ -50,7 +50,7 @@ struct pad
return s;
}
std::size_t pad_ndims() const
int pad_ndims() const
{
assert(pads.size() % 2 == 0);
return pads.size() / 2;
......@@ -58,7 +58,7 @@ struct pad
bool symmetric() const
{
std::size_t num_dims = pads.size() / 2;
int num_dims = pads.size() / 2;
return std::equal(
pads.begin(), pads.begin() + num_dims, pads.begin() + num_dims, pads.end());
}
......
......@@ -57,7 +57,7 @@ struct pooling
const shape& input = inputs.at(0);
auto input_lens = input.lens();
size_t kdims = input_lens.size() - 2;
int kdims = input_lens.size() - 2;
auto input_size = inputs[0].lens().size();
auto padding_size = padding.size();
if(not(input_size == padding_size / 2 + 2 or input_size == padding_size + 2))
......@@ -67,7 +67,7 @@ struct pooling
std::vector<int> output_lens(input_lens.begin(), input_lens.begin() + 2);
for(size_t i = 0; i < kdims; i++)
for(int i = 0; i < kdims; i++)
{
std::ptrdiff_t dim_size;
auto padding_factor = 2 * padding[i];
......@@ -83,7 +83,7 @@ struct pooling
return inputs[0].with_lens(output_lens);
}
size_t kdims() const
int kdims() const
{
check_attribute_size();
return stride.size();
......
......@@ -45,8 +45,8 @@ struct quant_dot
to_string_range(a.lens()) + "} x {" + to_string_range(b.lens()) + "}");
}
std::size_t dim_0 = a.lens().size() - 2;
std::size_t dim_1 = a.lens().size() - 1;
int dim_0 = a.lens().size() - 2;
int dim_1 = a.lens().size() - 1;
if(a.lens()[dim_1] != b.lens()[dim_0])
{
MIGRAPHX_THROW("QUANT_DOT: inner dimensions do not match: {" +
......
......@@ -31,28 +31,28 @@ struct reshape
{
check_shapes{inputs, *this}.has(1).standard();
auto&& idims = inputs.front().lens();
std::vector<std::size_t> rdims(dims.begin(), dims.end());
std::vector<int> rdims(dims.begin(), dims.end());
auto n_neg_dims = std::count(dims.begin(), dims.end(), -1);
if(n_neg_dims > 1)
MIGRAPHX_THROW("Reshape: Dimensions for reshape can only have one -1 dim");
for(std::size_t i = 0; i < dims.size(); i++)
for(int i = 0; i < dims.size(); i++)
{
if(dims[i] == 0)
rdims[i] = idims[i];
// since rdims using size_t type, -1 is the max value
// is size_t that cause later compuation incorrect
// since rdims using int type, -1 is the max value
// is int that cause later compuation incorrect
if(dims[i] == -1)
rdims[i] = 1;
}
if(n_neg_dims > 0)
{
size_t missing_dim =
int missing_dim =
inputs.front().elements() /
std::accumulate(rdims.begin(), rdims.end(), 1, std::multiplies<int64_t>());
for(std::size_t i = 0; i < rdims.size(); i++)
for(int i = 0; i < rdims.size(); i++)
{
if(dims[i] == -1)
rdims[i] = missing_dim;
......
......@@ -18,7 +18,7 @@ namespace op {
struct scalar
{
std::vector<std::size_t> scalar_bcast_lens;
std::vector<int> scalar_bcast_lens;
template <class Self, class F>
static auto reflect(Self& self, F f)
......@@ -32,7 +32,7 @@ struct scalar
{
check_shapes{inputs, *this}.has(1).only_dims(1).nelements(1);
auto t = inputs.at(0).type();
std::vector<std::size_t> strides(scalar_bcast_lens.size(), 0);
std::vector<int> strides(scalar_bcast_lens.size(), 0);
return {t, scalar_bcast_lens, strides};
}
......
......@@ -46,8 +46,8 @@ struct squeeze
{
MIGRAPHX_THROW("squeeze axis dimension should be equal to 1");
}
std::vector<std::size_t> new_lens;
std::vector<std::size_t> new_strides;
std::vector<int> new_lens;
std::vector<int> new_strides;
if(axes.empty())
{
for(auto i : range(old_lens.size()))
......
......@@ -100,7 +100,7 @@ struct topk
auto* out_ind = res_ind.cast<int64_t>();
par_for(comp_s.elements(), [&](auto i) {
auto idx = comp_s.multi(i);
std::vector<std::size_t> indices(k);
std::vector<int> indices(k);
std::iota(indices.begin(), indices.end(), 0);
auto comp = [&](auto i1, auto i2) {
......@@ -114,7 +114,7 @@ struct topk
};
auto hp = this->make_heap(indices, comp);
for(std::size_t ii = indices.size(); ii < axis_dim; ++ii)
for(int ii = indices.size(); ii < axis_dim; ++ii)
{
hp.try_push(ii);
}
......
......@@ -43,9 +43,9 @@ struct transpose
{
MIGRAPHX_THROW("TRANSPOSE: Invalid permutation");
}
std::vector<size_t> output_lens(input_lens.size());
std::vector<size_t> output_strides(input_lens.size());
for(std::size_t i = 0; i < output_lens.size(); i++)
std::vector<int> output_lens(input_lens.size());
std::vector<int> output_strides(input_lens.size());
for(int i = 0; i < output_lens.size(); i++)
{
output_lens[i] = input_lens[dims[i]];
output_strides[i] = input_strides[dims[i]];
......
......@@ -50,11 +50,11 @@ struct unsqueeze
MIGRAPHX_THROW("UNSQUEEZE: Input must be a scalar");
}
std::size_t new_size = old_lens.size() + axes.size();
int new_size = old_lens.size() + axes.size();
std::vector<std::size_t> new_lens(new_size);
std::size_t p = 0;
for(std::size_t i = 0; i < new_size; i++)
std::vector<int> new_lens(new_size);
int p = 0;
for(int i = 0; i < new_size; i++)
{
if(std::find(axes.begin(), axes.end(), i) != axes.end())
{
......
......@@ -13,10 +13,10 @@ template <class... Ts>
auto par_dfor(Ts... xs)
{
return [=](auto f) {
using array_type = std::array<std::size_t, sizeof...(Ts)>;
array_type lens = {{static_cast<std::size_t>(xs)...}};
auto n = std::accumulate(lens.begin(), lens.end(), 1, std::multiplies<std::size_t>{});
const std::size_t min_grain = 8;
using array_type = std::array<int, sizeof...(Ts)>;
array_type lens = {{static_cast<int>(xs)...}};
auto n = std::accumulate(lens.begin(), lens.end(), 1, std::multiplies<int>{});
const int min_grain = 8;
if(n > 2 * min_grain)
{
array_type strides;
......@@ -24,16 +24,16 @@ auto par_dfor(Ts... xs)
std::partial_sum(lens.rbegin(),
lens.rend() - 1,
strides.rbegin() + 1,
std::multiplies<std::size_t>());
std::multiplies<int>());
auto size =
std::accumulate(lens.begin(), lens.end(), 1, std::multiplies<std::size_t>());
par_for(size, min_grain, [&](std::size_t i) {
std::accumulate(lens.begin(), lens.end(), 1, std::multiplies<int>());
par_for(size, min_grain, [&](int i) {
array_type indices;
std::transform(strides.begin(),
strides.end(),
lens.begin(),
indices.begin(),
[&](size_t stride, size_t len) { return (i / stride) % len; });
[&](int stride, int len) { return (i / stride) % len; });
migraphx::unpack(f, indices);
});
}
......
......@@ -13,7 +13,7 @@ inline Vector reorder_dims(const Vector& dims, const std::vector<int64_t>& permu
{
Vector result(dims.size());
assert(dims.size() == permutation.size());
for(std::size_t i = 0; i < dims.size(); i++)
for(int i = 0; i < dims.size(); i++)
{
result[i] = dims[permutation[i]];
}
......
......@@ -53,7 +53,7 @@ struct program
std::vector<argument> eval(parameter_map params) const;
std::size_t size() const;
int size() const;
std::vector<shape> get_output_shapes() const;
......@@ -68,7 +68,7 @@ struct program
void finalize();
void
perf_report(std::ostream& os, std::size_t n, parameter_map params, std::size_t batch = 1) const;
perf_report(std::ostream& os, int n, parameter_map params, int batch = 1) const;
void mark(const parameter_map& params, marker&& m);
......
......@@ -19,8 +19,8 @@ struct module;
struct capture_arguments_pass
{
std::vector<std::string> ins_names = {"dot", "convolution"};
std::function<void(std::size_t, std::vector<argument>)> f{};
std::size_t* param_index = nullptr;
std::function<void(int, std::vector<argument>)> f{};
int* param_index = nullptr;
std::string name() const { return "capture_arguments"; }
void apply(module& m) const;
};
......
......@@ -48,7 +48,7 @@ struct raw_data : raw_data_base
* @param n The index to read from
*/
template <class Visitor>
void visit_at(Visitor v, std::size_t n = 0) const
void visit_at(Visitor v, int n = 0) const
{
auto&& derived = static_cast<const Derived&>(*this);
if(derived.empty())
......@@ -96,7 +96,7 @@ struct raw_data : raw_data_base
* @return The element as `T`
*/
template <class T>
T at(std::size_t n = 0) const
T at(int n = 0) const
{
T result;
this->visit_at([&](auto x) { result = x; }, n);
......
......@@ -24,7 +24,7 @@ auto with_char(F f)
inline std::string
replace_string(std::string subject, const std::string& search, const std::string& replace)
{
size_t pos = 0;
int pos = 0;
while((pos = subject.find(search, pos)) != std::string::npos)
{
subject.replace(pos, search.length(), replace);
......
......@@ -11,9 +11,9 @@ inline namespace MIGRAPHX_INLINE_NS {
struct tf_options
{
bool is_nhwc = false;
unsigned int batch_size = 1;
int batch_size = 1;
/// Explicitly specify the dims of an input
std::unordered_map<std::string, std::vector<std::size_t>> map_input_dims = {};
std::unordered_map<std::string, std::vector<int>> map_input_dims = {};
std::vector<std::string> output_node_names = {};
};
......
......@@ -157,10 +157,10 @@ struct value
{
}
template <class T>
binary(T* data, std::size_t s) : base(data, data + s)
binary(T* data, int s) : base(data, data + s)
{
}
explicit binary(std::size_t s) : base(s) {}
explicit binary(int s) : base(s) {}
};
value() = default;
......@@ -263,7 +263,7 @@ struct value
value* find(const std::string& pkey);
const value* find(const std::string& pkey) const;
bool contains(const std::string& pkey) const;
std::size_t size() const;
int size() const;
bool empty() const;
const value* data() const;
value* data();
......@@ -276,17 +276,17 @@ struct value
const value& front() const;
value& back();
const value& back() const;
value& at(std::size_t i);
const value& at(std::size_t i) const;
value& at(int i);
const value& at(int i) const;
value& at(const std::string& pkey);
const value& at(const std::string& pkey) const;
value& operator[](std::size_t i);
const value& operator[](std::size_t i) const;
value& operator[](int i);
const value& operator[](int i) const;
value& operator[](const std::string& pkey);
void clear();
void resize(std::size_t n);
void resize(std::size_t n, const value& v);
void resize(int n);
void resize(int n, const value& v);
std::pair<value*, bool> insert(const value& v);
value* insert(const value* pos, const value& v);
......
......@@ -115,7 +115,7 @@ T range_product(R1&& r1, R2&& r2, T state, Reducer r, Product p)
}
template <class R1, class R2, class Compare>
std::size_t mismatch_idx(R1&& r1, R2&& r2, Compare compare)
int mismatch_idx(R1&& r1, R2&& r2, Compare compare)
{
auto p = std::mismatch(r1.begin(), r1.end(), r2.begin(), compare);
return std::distance(r1.begin(), p.first);
......@@ -138,7 +138,7 @@ double max_diff(R1&& r1, R2&& r2)
}
template <class R1, class R2, class T>
std::size_t mismatch_diff(R1&& r1, R2&& r2, T diff)
int mismatch_diff(R1&& r1, R2&& r2, T diff)
{
return mismatch_idx(r1, r2, [&](auto x, auto y) {
auto d = abs_diff(x, y);
......@@ -149,7 +149,7 @@ std::size_t mismatch_diff(R1&& r1, R2&& r2, T diff)
template <class R1, class R2>
double rms_range(const R1& r1, const R2& r2)
{
std::size_t n = range_distance(r1);
int n = range_distance(r1);
if(n == range_distance(r2))
{
double square_difference = range_product(r1, r2, 0.0, sum_fn{}, square_diff);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment