"vscode:/vscode.git/clone" did not exist on "dd1da8aa07b75efb02a3e4753550152a3482ec2f"
Unverified Commit 9852aaef authored by bpickrel's avatar bpickrel Committed by GitHub
Browse files

Clang format ver10 (#1106)

Update the base version of clang-format from 5.0 to 10.0
parent af0148ce
......@@ -4,7 +4,7 @@
# are installed, and if so, uses the installed version to format
# the staged changes.
base=clang-format-5.0
base=clang-format-10
format=""
yapf_base=yapf
yapf_format=""
......
......@@ -133,7 +133,7 @@ jobs:
-o -iname '*.cpp.in' \
-o -iname '*.cl' \
| grep -v 'build/' \
| xargs -n 1 -P 1 -I{} -t sh -c 'clang-format-5.0 -style=file {} | diff - {}'
| xargs -n 1 -P 1 -I{} -t sh -c 'clang-format-10 -style=file {} | diff - {}'
find . -iname '*.py' \
| grep -v 'build/' \
| xargs -n 1 -P 1 -I{} -t sh -c 'yapf {} | diff - {}'
......
......@@ -12,7 +12,7 @@ RUN sh -c 'echo deb [arch=amd64 trusted=yes] http://repo.radeon.com/rocm/apt/4.5
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated \
apt-utils \
build-essential \
clang-format-5.0 \
clang-format-10 \
cmake \
curl \
doxygen \
......
......@@ -205,7 +205,7 @@ Depending on your setup `sudo` may be required for the pip install.
All the code is formatted using clang-format. To format a file, use:
```
clang-format-5.0 -style=file -i <path-to-source-file>
clang-format-10 -style=file -i <path-to-source-file>
```
Also, githooks can be installed to format the code per-commit:
......
......@@ -12,7 +12,7 @@ RUN sh -c 'echo deb [arch=amd64 trusted=yes] http://repo.radeon.com/rocm/apt/4.5
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated \
apt-utils \
build-essential \
clang-format-5.0 \
clang-format-10 \
cmake \
curl \
doxygen \
......
......@@ -25,7 +25,8 @@ extern "C" {
#endif
// return code, more to be added later
typedef enum {
typedef enum
{
migraphx_status_success = 0,
migraphx_status_bad_param = 1,
migraphx_status_unknown_target = 3,
......@@ -35,7 +36,8 @@ typedef enum {
#define MIGRAPHX_SHAPE_GENERATE_ENUM_TYPES(x, t) migraphx_shape_##x,
/// An enum to represent the different data type inputs
typedef enum {
typedef enum
{
migraphx_shape_tuple_type,
MIGRAPHX_SHAPE_VISIT_TYPES(MIGRAPHX_SHAPE_GENERATE_ENUM_TYPES)
} migraphx_shape_datatype_t;
......
......@@ -101,8 +101,8 @@ template <class M>
auto bind_match(M m, std::string name)
{
return make_function_matcher(
[ =, name = std::move(name) ](matcher_context & ctx, instruction_ref ins)
->optional<instruction_ref> {
[=, name = std::move(name)](matcher_context& ctx,
instruction_ref ins) -> optional<instruction_ref> {
auto result = m.match(ctx, ins);
if(result)
{
......@@ -536,7 +536,7 @@ auto skip_output(Ms... ms)
inline auto name(std::string s)
{
return make_basic_pred_matcher(
[ =, s = std::move(s) ](instruction_ref ins) { return ins->name() == s; });
[=, s = std::move(s)](instruction_ref ins) { return ins->name() == s; });
}
inline auto name_contains(const std::string& name)
......@@ -547,7 +547,7 @@ inline auto name_contains(const std::string& name)
inline auto name(std::unordered_set<std::string> names)
{
return make_basic_pred_matcher([ =, names = std::move(names) ](instruction_ref ins) {
return make_basic_pred_matcher([=, names = std::move(names)](instruction_ref ins) {
return names.count(ins->name()) > 0;
});
}
......
......@@ -97,7 +97,6 @@ struct deconvolution
shape win_shape{output_shape.type(), win_size};
par_dfor(in_n, wei_c)([&](int o, int k) {
shape_for_each(win_shape, [&](auto idx_win) {
const int w = idx_win[0];
......@@ -140,9 +139,7 @@ struct deconvolution
weights(idx_wei.begin(), idx_wei.end());
}
});
});
});
return result;
}
......
......@@ -181,7 +181,8 @@ struct nonmaxsuppression
make_function_output_iterator([&](const auto& x) { sorted_boxes.push(x); });
int64_t box_idx = 0;
transform_if(scores.begin() + score_offset,
transform_if(
scores.begin() + score_offset,
scores.begin() + score_offset + box_num,
insert_to_sorted_boxes,
[&](auto sc) {
......
......@@ -253,7 +253,6 @@ struct roialign
max_pool{});
output(n, c, ph, pw) = output_val;
});
});
});
......
......@@ -64,7 +64,6 @@ struct unary : op_name<Derived>
input.end(),
output.begin(),
static_cast<const Derived&>(*this).apply());
});
});
return result;
......
......@@ -41,7 +41,6 @@ auto par_dfor(Ts... xs)
{
dfor(xs...)(f);
}
};
}
......
......@@ -35,7 +35,7 @@ struct shape
m(int64_type, int64_t) \
m(uint32_type, uint32_t) \
m(uint64_type, uint64_t)
// clang-format on
// clang-format on
#define MIGRAPHX_SHAPE_GENERATE_ENUM_TYPES(x, t) x,
enum type_t
......@@ -188,8 +188,7 @@ struct shape
{
switch(t)
{
case tuple_type:
{
case tuple_type: {
tv();
return;
}
......
......@@ -131,7 +131,8 @@ inline std::string interpolate_string(const std::string& input,
std::string start = "${",
std::string end = "}")
{
return interpolate_string(input,
return interpolate_string(
input,
[&](auto start_it, auto last_it) {
auto key = trim({start_it, last_it});
auto it = vars.find(key);
......
......@@ -315,8 +315,7 @@ struct value
{
switch(this->get_type())
{
case null_type:
{
case null_type: {
std::nullptr_t null{};
if(this->key.empty())
v(null);
......@@ -325,8 +324,7 @@ struct value
return;
}
#define MIGRAPHX_VALUE_GENERATE_CASE(vt, cpp_type) \
case vt##_type: \
{ \
case vt##_type: { \
if(this->key.empty()) \
v(this->get_##vt()); \
else \
......@@ -346,15 +344,13 @@ struct value
{
switch(this->get_type())
{
case null_type:
{
case null_type: {
std::nullptr_t null{};
v(null);
return;
}
#define MIGRAPHX_VALUE_GENERATE_CASE_VALUE(vt, cpp_type) \
case vt##_type: \
{ \
case vt##_type: { \
v(this->get_##vt()); \
return; \
}
......
......@@ -14,44 +14,36 @@ MSGPACK_API_VERSION_NAMESPACE(MSGPACK_DEFAULT_API_NS)
{
switch(o.type)
{
case msgpack::type::NIL:
{
case msgpack::type::NIL: {
v = nullptr;
break;
}
case msgpack::type::BOOLEAN:
{
case msgpack::type::BOOLEAN: {
v = o.as<bool>();
break;
}
case msgpack::type::POSITIVE_INTEGER:
{
case msgpack::type::POSITIVE_INTEGER: {
v = o.as<std::uint64_t>();
break;
}
case msgpack::type::NEGATIVE_INTEGER:
{
case msgpack::type::NEGATIVE_INTEGER: {
v = o.as<std::int64_t>();
break;
}
case msgpack::type::FLOAT32:
case msgpack::type::FLOAT64:
{
case msgpack::type::FLOAT64: {
v = o.as<double>();
break;
}
case msgpack::type::STR:
{
case msgpack::type::STR: {
v = o.as<std::string>();
break;
}
case msgpack::type::BIN:
{
case msgpack::type::BIN: {
v = migraphx::value::binary{o.via.bin.ptr, o.via.bin.size};
break;
}
case msgpack::type::ARRAY:
{
case msgpack::type::ARRAY: {
migraphx::value r = migraphx::value::array{};
std::for_each(
o.via.array.ptr,
......@@ -60,8 +52,7 @@ MSGPACK_API_VERSION_NAMESPACE(MSGPACK_DEFAULT_API_NS)
v = r;
break;
}
case msgpack::type::MAP:
{
case msgpack::type::MAP: {
migraphx::value r = migraphx::value::object{};
std::for_each(o.via.map.ptr,
o.via.map.ptr + o.via.map.size,
......@@ -71,7 +62,8 @@ MSGPACK_API_VERSION_NAMESPACE(MSGPACK_DEFAULT_API_NS)
v = r;
break;
}
case msgpack::type::EXT: { MIGRAPHX_THROW("msgpack EXT type not supported.");
case msgpack::type::EXT: {
MIGRAPHX_THROW("msgpack EXT type not supported.");
}
}
return o;
......
......@@ -382,8 +382,7 @@ literal onnx_parser::parse_tensor(const onnx::TensorProto& t) const
case onnx::TensorProto::INT64: return create_literal(shape::int64_type, dims, t.int64_data());
case onnx::TensorProto::UINT64:
return create_literal(shape::uint64_type, dims, t.uint64_data());
case onnx::TensorProto::FLOAT16:
{
case onnx::TensorProto::FLOAT16: {
std::vector<uint16_t> data_uint16(t.int32_data().begin(), t.int32_data().end());
std::vector<half> data_half;
std::transform(data_uint16.begin(),
......@@ -453,7 +452,8 @@ shape::type_t get_type(int dtype)
case 11: return shape::double_type;
case 12: return shape::uint32_type;
case 13: return shape::uint64_type;
default: { MIGRAPHX_THROW("Prototensor data type " + std::to_string(dtype) + " not supported");
default: {
MIGRAPHX_THROW("Prototensor data type " + std::to_string(dtype) + " not supported");
}
}
}
......
......@@ -809,7 +809,8 @@ void generic_get_unused_modules(Map& m, const std::vector<T*>& mods, OutputItera
std::transform(mods.begin(), mods.end(), std::inserter(used, used.end()), [](auto&& mod) {
return mod->name();
});
transform_if(m.begin(),
transform_if(
m.begin(),
m.end(),
out,
[&](auto&& pp) { return not contains(used, pp.first); },
......
......@@ -303,15 +303,15 @@ MIGRAPHX_PYBIND11_MODULE(migraphx, m)
.def("name", &migraphx::operation::name);
m.def("parse_tf",
m.def(
"parse_tf",
[](const std::string& filename,
bool is_nhwc,
unsigned int batch_size,
std::unordered_map<std::string, std::vector<std::size_t>> map_input_dims,
std::vector<std::string> output_names) {
return migraphx::parse_tf(
filename,
migraphx::tf_options{is_nhwc, batch_size, map_input_dims, output_names});
filename, migraphx::tf_options{is_nhwc, batch_size, map_input_dims, output_names});
},
"Parse tf protobuf (default format is nhwc)",
py::arg("filename"),
......@@ -320,7 +320,8 @@ MIGRAPHX_PYBIND11_MODULE(migraphx, m)
py::arg("map_input_dims") = std::unordered_map<std::string, std::vector<std::size_t>>(),
py::arg("output_names") = std::vector<std::string>());
m.def("parse_onnx",
m.def(
"parse_onnx",
[](const std::string& filename,
unsigned int default_dim_value,
std::unordered_map<std::string, std::vector<std::size_t>> map_input_dims,
......@@ -343,7 +344,8 @@ MIGRAPHX_PYBIND11_MODULE(migraphx, m)
py::arg("print_program_on_error") = false,
py::arg("max_loop_iterations") = 10);
m.def("parse_onnx_buffer",
m.def(
"parse_onnx_buffer",
[](const std::string& onnx_buffer,
unsigned int default_dim_value,
std::unordered_map<std::string, std::vector<std::size_t>> map_input_dims,
......@@ -363,7 +365,8 @@ MIGRAPHX_PYBIND11_MODULE(migraphx, m)
py::arg("skip_unknown_operators") = false,
py::arg("print_program_on_error") = false);
m.def("load",
m.def(
"load",
[](const std::string& name, const std::string& format) {
migraphx::file_options options;
options.format = format;
......@@ -373,7 +376,8 @@ MIGRAPHX_PYBIND11_MODULE(migraphx, m)
py::arg("filename"),
py::arg("format") = "msgpack");
m.def("save",
m.def(
"save",
[](const migraphx::program& p, const std::string& name, const std::string& format) {
migraphx::file_options options;
options.format = format;
......
......@@ -39,9 +39,7 @@ bool reduce_dim(std::vector<shape>& shapes, std::size_t n)
std::size_t reduce_dim_all(std::vector<shape>& shapes, std::size_t n)
{
while(reduce_dim(shapes, n) and n < shapes.size())
{
}
while(reduce_dim(shapes, n) and n < shapes.size()) {}
return n + 1;
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment