Commit 17b9cfb9 authored by Scott Thornton's avatar Scott Thornton
Browse files

Formatting

parent 5b68317c
......@@ -285,10 +285,10 @@ struct transpose
int output_alias(const std::vector<shape>&) const { return 0; }
};
// The contiguous operator takes a non-standard input tensor and returns
// the same tensor but in standard form. For example, if input tensor A which has lens = (4,5)
// The contiguous operator takes a non-standard input tensor and returns
// the same tensor but in standard form. For example, if input tensor A which has lens = (4,5)
// is first transposed, i.e. lens = (5,4), this tensor's data layout remained the same
// during the transpose operation; only it's shape lengths and strides were changed.
// during the transpose operation; only it's shape lengths and strides were changed.
// This leaves the tensor in a non-standard form. The contiguous operator copies the
// underlying data such that resulting tensor is returned to a standard form.
struct contiguous
......@@ -716,13 +716,14 @@ struct flatten
int output_alias(const std::vector<shape>&) const { return 0; }
};
// The broadcast operator performs the numpy-style broadcasting of an axis of a given tensor. This is achieved
// primarily by setting the stride of the broadcasted axis to zero. Linear indicies are computed from multi-indicies
// by computing the inner product on the multi-index with the strides. For example, if we have a tensor A(2,3) it has
// lengths of (2,3) and strides of (3,1). If we want to compute the linear offset that corresponds to the element
// on the 2nd row (i = 1) and 3rd column (j = 2), we compute the following inner product (1,2) dot (3, 1) =
// 1*3 + 2*1 = 5. It is obvious from there that we can negate the effects of a given axis by setting the
// stride of that axis to zero.
// The broadcast operator performs the numpy-style broadcasting of an axis of a given tensor. This
// is achieved primarily by setting the stride of the broadcasted axis to zero. Linear indicies are
// computed from multi-indicies by computing the inner product on the multi-index with the strides.
// For example, if we have a tensor A(2,3) it has lengths of (2,3) and strides of (3,1). If we want
// to compute the linear offset that corresponds to the element on the 2nd row (i = 1) and 3rd
// column (j = 2), we compute the following inner product (1,2) dot (3, 1) = 1*3 + 2*1 = 5. It is
// obvious from there that we can negate the effects of a given axis by setting the stride of that
// axis to zero.
struct broadcast
{
uint64_t axis = 0;
......
......@@ -43,7 +43,7 @@ struct onnx_parser
using op_func = std::function<instruction_ref(attribute_map, std::vector<instruction_ref>)>;
node_map nodes;
std::unordered_map<std::string, instruction_ref> instructions;
program prog = program();
program prog = program();
bool is_pytorch = false;
std::unordered_map<std::string, op_func> ops;
......@@ -140,7 +140,7 @@ struct onnx_parser
// Copy the larger vector to output_lens
std::vector<std::size_t> output_lens = *s1;
auto offset = s1->size() - s0->size();
auto offset = s1->size() - s0->size();
std::transform(s0->begin(),
s0->end(),
s1->begin() + offset,
......@@ -182,17 +182,17 @@ struct onnx_parser
op::convolution op;
if(contains(attributes, "pads"))
{
if (contains(attributes, "auto_pad"))
if(contains(attributes, "auto_pad"))
{
MIGRAPH_THROW("auto_pad and padding cannot be specified simultaneously");
}
std::vector<std::size_t> padding(4);
copy(attributes["pads"].ints(), padding.begin());
if (padding.size() != 4)
if(padding.size() != 4)
{
MIGRAPH_THROW("padding should have 4 values");
}
if (padding[0] != padding[2] || padding[1] != padding[3])
if(padding[0] != padding[2] || padding[1] != padding[3])
{
MIGRAPH_THROW("migraphx does not support asymetric padding");
}
......@@ -207,15 +207,15 @@ struct onnx_parser
{
copy(attributes["dilations"].ints(), op.dilation.begin());
}
if (contains(attributes, "auto_pad"))
if(contains(attributes, "auto_pad"))
{
auto s = attributes["auto_pad"].s();
if (contains(attributes, "pads") and to_upper(s) != "NOTSET")
if(contains(attributes, "pads") and to_upper(s) != "NOTSET")
{
MIGRAPH_THROW("auto_pad and padding cannot be specified simultaneously");
}
if (s.find("SAME") >= 0)
if(s.find("SAME") >= 0)
{
op.padding_mode = op::convolution::same;
}
......@@ -244,11 +244,11 @@ struct onnx_parser
{
std::vector<std::size_t> padding(4);
copy(attributes["pads"].ints(), padding.begin());
if (padding.size() != 4)
if(padding.size() != 4)
{
MIGRAPH_THROW("padding should have 4 values");
}
if (padding[0] != padding[2] || padding[1] != padding[3])
if(padding[0] != padding[2] || padding[1] != padding[3])
{
MIGRAPH_THROW("migraphx does not support asymetric padding");
}
......@@ -263,10 +263,10 @@ struct onnx_parser
{
copy(attributes["kernel_shape"].ints(), op.lengths.begin());
}
if (contains(attributes, "auto_pad"))
if(contains(attributes, "auto_pad"))
{
auto s = attributes["auto_pad"].s();
if (to_upper(s) != "NOTSET")
if(to_upper(s) != "NOTSET")
{
MIGRAPH_THROW("auto_pad is not supported for pooling");
}
......@@ -482,9 +482,8 @@ struct onnx_parser
if(model.ParseFromIstream(&is))
{
auto str_toupper = [](std::string s) {
std::transform(s.begin(), s.end(), s.begin(),
[](unsigned char c){ return std::toupper(c);
});
std::transform(
s.begin(), s.end(), s.begin(), [](unsigned char c) { return std::toupper(c); });
return s;
};
auto producer_name = str_toupper(model.producer_name());
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment