"data/vscode:/vscode.git/clone" did not exist on "1185f0b1a09abe59756f73ac5ed16eaf5c9c381f"
Commit 17b9cfb9 authored by Scott Thornton's avatar Scott Thornton
Browse files

Formatting

parent 5b68317c
...@@ -285,10 +285,10 @@ struct transpose ...@@ -285,10 +285,10 @@ struct transpose
int output_alias(const std::vector<shape>&) const { return 0; } int output_alias(const std::vector<shape>&) const { return 0; }
}; };
// The contiguous operator takes a non-standard input tensor and returns // The contiguous operator takes a non-standard input tensor and returns
// the same tensor but in standard form. For example, if input tensor A which has lens = (4,5) // the same tensor but in standard form. For example, if input tensor A which has lens = (4,5)
// is first transposed, i.e. lens = (5,4), this tensor's data layout remained the same // is first transposed, i.e. lens = (5,4), this tensor's data layout remained the same
// during the transpose operation; only it's shape lengths and strides were changed. // during the transpose operation; only it's shape lengths and strides were changed.
// This leaves the tensor in a non-standard form. The contiguous operator copies the // This leaves the tensor in a non-standard form. The contiguous operator copies the
// underlying data such that resulting tensor is returned to a standard form. // underlying data such that resulting tensor is returned to a standard form.
struct contiguous struct contiguous
...@@ -716,13 +716,14 @@ struct flatten ...@@ -716,13 +716,14 @@ struct flatten
int output_alias(const std::vector<shape>&) const { return 0; } int output_alias(const std::vector<shape>&) const { return 0; }
}; };
// The broadcast operator performs the numpy-style broadcasting of an axis of a given tensor. This is achieved // The broadcast operator performs the numpy-style broadcasting of an axis of a given tensor. This
// primarily by setting the stride of the broadcasted axis to zero. Linear indicies are computed from multi-indicies // is achieved primarily by setting the stride of the broadcasted axis to zero. Linear indicies are
// by computing the inner product on the multi-index with the strides. For example, if we have a tensor A(2,3) it has // computed from multi-indicies by computing the inner product on the multi-index with the strides.
// lengths of (2,3) and strides of (3,1). If we want to compute the linear offset that corresponds to the element // For example, if we have a tensor A(2,3) it has lengths of (2,3) and strides of (3,1). If we want
// on the 2nd row (i = 1) and 3rd column (j = 2), we compute the following inner product (1,2) dot (3, 1) = // to compute the linear offset that corresponds to the element on the 2nd row (i = 1) and 3rd
// 1*3 + 2*1 = 5. It is obvious from there that we can negate the effects of a given axis by setting the // column (j = 2), we compute the following inner product (1,2) dot (3, 1) = 1*3 + 2*1 = 5. It is
// stride of that axis to zero. // obvious from there that we can negate the effects of a given axis by setting the stride of that
// axis to zero.
struct broadcast struct broadcast
{ {
uint64_t axis = 0; uint64_t axis = 0;
......
...@@ -43,7 +43,7 @@ struct onnx_parser ...@@ -43,7 +43,7 @@ struct onnx_parser
using op_func = std::function<instruction_ref(attribute_map, std::vector<instruction_ref>)>; using op_func = std::function<instruction_ref(attribute_map, std::vector<instruction_ref>)>;
node_map nodes; node_map nodes;
std::unordered_map<std::string, instruction_ref> instructions; std::unordered_map<std::string, instruction_ref> instructions;
program prog = program(); program prog = program();
bool is_pytorch = false; bool is_pytorch = false;
std::unordered_map<std::string, op_func> ops; std::unordered_map<std::string, op_func> ops;
...@@ -140,7 +140,7 @@ struct onnx_parser ...@@ -140,7 +140,7 @@ struct onnx_parser
// Copy the larger vector to output_lens // Copy the larger vector to output_lens
std::vector<std::size_t> output_lens = *s1; std::vector<std::size_t> output_lens = *s1;
auto offset = s1->size() - s0->size(); auto offset = s1->size() - s0->size();
std::transform(s0->begin(), std::transform(s0->begin(),
s0->end(), s0->end(),
s1->begin() + offset, s1->begin() + offset,
...@@ -182,17 +182,17 @@ struct onnx_parser ...@@ -182,17 +182,17 @@ struct onnx_parser
op::convolution op; op::convolution op;
if(contains(attributes, "pads")) if(contains(attributes, "pads"))
{ {
if (contains(attributes, "auto_pad")) if(contains(attributes, "auto_pad"))
{ {
MIGRAPH_THROW("auto_pad and padding cannot be specified simultaneously"); MIGRAPH_THROW("auto_pad and padding cannot be specified simultaneously");
} }
std::vector<std::size_t> padding(4); std::vector<std::size_t> padding(4);
copy(attributes["pads"].ints(), padding.begin()); copy(attributes["pads"].ints(), padding.begin());
if (padding.size() != 4) if(padding.size() != 4)
{ {
MIGRAPH_THROW("padding should have 4 values"); MIGRAPH_THROW("padding should have 4 values");
} }
if (padding[0] != padding[2] || padding[1] != padding[3]) if(padding[0] != padding[2] || padding[1] != padding[3])
{ {
MIGRAPH_THROW("migraphx does not support asymetric padding"); MIGRAPH_THROW("migraphx does not support asymetric padding");
} }
...@@ -207,15 +207,15 @@ struct onnx_parser ...@@ -207,15 +207,15 @@ struct onnx_parser
{ {
copy(attributes["dilations"].ints(), op.dilation.begin()); copy(attributes["dilations"].ints(), op.dilation.begin());
} }
if (contains(attributes, "auto_pad")) if(contains(attributes, "auto_pad"))
{ {
auto s = attributes["auto_pad"].s(); auto s = attributes["auto_pad"].s();
if (contains(attributes, "pads") and to_upper(s) != "NOTSET") if(contains(attributes, "pads") and to_upper(s) != "NOTSET")
{ {
MIGRAPH_THROW("auto_pad and padding cannot be specified simultaneously"); MIGRAPH_THROW("auto_pad and padding cannot be specified simultaneously");
} }
if (s.find("SAME") >= 0) if(s.find("SAME") >= 0)
{ {
op.padding_mode = op::convolution::same; op.padding_mode = op::convolution::same;
} }
...@@ -244,11 +244,11 @@ struct onnx_parser ...@@ -244,11 +244,11 @@ struct onnx_parser
{ {
std::vector<std::size_t> padding(4); std::vector<std::size_t> padding(4);
copy(attributes["pads"].ints(), padding.begin()); copy(attributes["pads"].ints(), padding.begin());
if (padding.size() != 4) if(padding.size() != 4)
{ {
MIGRAPH_THROW("padding should have 4 values"); MIGRAPH_THROW("padding should have 4 values");
} }
if (padding[0] != padding[2] || padding[1] != padding[3]) if(padding[0] != padding[2] || padding[1] != padding[3])
{ {
MIGRAPH_THROW("migraphx does not support asymetric padding"); MIGRAPH_THROW("migraphx does not support asymetric padding");
} }
...@@ -263,10 +263,10 @@ struct onnx_parser ...@@ -263,10 +263,10 @@ struct onnx_parser
{ {
copy(attributes["kernel_shape"].ints(), op.lengths.begin()); copy(attributes["kernel_shape"].ints(), op.lengths.begin());
} }
if (contains(attributes, "auto_pad")) if(contains(attributes, "auto_pad"))
{ {
auto s = attributes["auto_pad"].s(); auto s = attributes["auto_pad"].s();
if (to_upper(s) != "NOTSET") if(to_upper(s) != "NOTSET")
{ {
MIGRAPH_THROW("auto_pad is not supported for pooling"); MIGRAPH_THROW("auto_pad is not supported for pooling");
} }
...@@ -482,9 +482,8 @@ struct onnx_parser ...@@ -482,9 +482,8 @@ struct onnx_parser
if(model.ParseFromIstream(&is)) if(model.ParseFromIstream(&is))
{ {
auto str_toupper = [](std::string s) { auto str_toupper = [](std::string s) {
std::transform(s.begin(), s.end(), s.begin(), std::transform(
[](unsigned char c){ return std::toupper(c); s.begin(), s.end(), s.begin(), [](unsigned char c) { return std::toupper(c); });
});
return s; return s;
}; };
auto producer_name = str_toupper(model.producer_name()); auto producer_name = str_toupper(model.producer_name());
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment