"vscode:/vscode.git/clone" did not exist on "24cde76a152fbffde30fa2be0d08dcbad490530e"
Unverified Commit 9852aaef authored by bpickrel's avatar bpickrel Committed by GitHub
Browse files

Clang format ver10 (#1106)

Update the base version of clang-format from 5.0 to 10.0
parent af0148ce
...@@ -335,7 +335,6 @@ struct find_concat_op ...@@ -335,7 +335,6 @@ struct find_concat_op
} }
auto y = p.insert_instruction(ins, op, concats); auto y = p.insert_instruction(ins, op, concats);
return {y}; return {y};
}; };
std::vector<instruction_ref> args; std::vector<instruction_ref> args;
......
...@@ -316,7 +316,6 @@ struct find_nested_concat ...@@ -316,7 +316,6 @@ struct find_nested_concat
else else
args.push_back(i); args.push_back(i);
} }
})(ins->inputs()); })(ins->inputs());
p.replace_instruction(ins, ins->get_operator(), args); p.replace_instruction(ins, ins->get_operator(), args);
} }
......
...@@ -213,7 +213,6 @@ template <std::size_t N, class... Xs> ...@@ -213,7 +213,6 @@ template <std::size_t N, class... Xs>
bool is_vectorizable(const Xs&... xs) bool is_vectorizable(const Xs&... xs)
{ {
return all_of({xs...}, [](const auto& s) { return all_of({xs...}, [](const auto& s) {
if(s.standard() and (s.lens().back() % N) == 0) if(s.standard() and (s.lens().back() % N) == 0)
return true; return true;
if(s.broadcasted()) if(s.broadcasted())
......
...@@ -44,12 +44,13 @@ __device__ void block_scan(index idx, Op op, T init, ForStride fs, Input input, ...@@ -44,12 +44,13 @@ __device__ void block_scan(index idx, Op op, T init, ForStride fs, Input input,
template <index_int N, class Op, class T, class Input, class Output> template <index_int N, class Op, class T, class Input, class Output>
__device__ void block_scan(index idx, Op op, T init, index_int n, Input input, Output output) __device__ void block_scan(index idx, Op op, T init, index_int n, Input input, Output output)
{ {
block_scan<N>(idx, block_scan<N>(
op, idx,
init, op,
[&](auto f) -> decltype(f(index_int{})) { return idx.local_stride(n, f); }, init,
input, [&](auto f) -> decltype(f(index_int{})) { return idx.local_stride(n, f); },
output); input,
output);
} }
} // namespace device } // namespace device
......
...@@ -14,28 +14,23 @@ constexpr void visit_tensor_size(index_int n, F f) ...@@ -14,28 +14,23 @@ constexpr void visit_tensor_size(index_int n, F f)
{ {
switch(n) switch(n)
{ {
case 1: case 1: {
{
f(std::integral_constant<index_int, 1>{}); f(std::integral_constant<index_int, 1>{});
break; break;
} }
case 2: case 2: {
{
f(std::integral_constant<index_int, 2>{}); f(std::integral_constant<index_int, 2>{});
break; break;
} }
case 3: case 3: {
{
f(std::integral_constant<index_int, 3>{}); f(std::integral_constant<index_int, 3>{});
break; break;
} }
case 4: case 4: {
{
f(std::integral_constant<index_int, 4>{}); f(std::integral_constant<index_int, 4>{});
break; break;
} }
case 5: case 5: {
{
f(std::integral_constant<index_int, 5>{}); f(std::integral_constant<index_int, 5>{});
break; break;
} }
......
...@@ -25,22 +25,23 @@ argument nonzero(hipStream_t stream, const argument& result, const argument& arg ...@@ -25,22 +25,23 @@ argument nonzero(hipStream_t stream, const argument& result, const argument& arg
// fill all output to 0 first // fill all output to 0 first
idx.local_stride(out_elem_num, [&](auto j) { ptr[j] = 0; }); idx.local_stride(out_elem_num, [&](auto j) { ptr[j] = 0; });
block_scan<block_size>(idx, block_scan<block_size>(
sum{}, idx,
0, sum{},
elem_num, 0,
[&](auto j) { return (float_equal(in_ptr[j], 0)) ? 0 : 1; }, elem_num,
[&](auto j, auto x) { [&](auto j) { return (float_equal(in_ptr[j], 0)) ? 0 : 1; },
auto out_loc = x - 1; [&](auto j, auto x) {
if(float_equal(in_ptr[j], 0)) auto out_loc = x - 1;
return; if(float_equal(in_ptr[j], 0))
return;
auto index = si.multi(j); auto index = si.multi(j);
for(size_t k = 0; k < index.size(); ++k) for(size_t k = 0; k < index.size(); ++k)
{ {
ptr[k * elem_num + out_loc] = index[k]; ptr[k * elem_num + out_loc] = index[k];
} }
}); });
}); });
}); });
......
...@@ -24,12 +24,13 @@ void prefix_scan_sum(hipStream_t stream, const argument& result, const argument& ...@@ -24,12 +24,13 @@ void prefix_scan_sum(hipStream_t stream, const argument& result, const argument&
k[axis] = j; k[axis] = j;
return k; return k;
}; };
block_scan<block_size>(idx, block_scan<block_size>(
sum{}, idx,
0, sum{},
n, 0,
[&](auto j) { return input[compute_idx(j)]; }, n,
[&](auto j, auto x) { output[compute_idx(j)] = x; }); [&](auto j) { return input[compute_idx(j)]; },
[&](auto j, auto x) { output[compute_idx(j)] = x; });
}); });
}); });
} }
......
...@@ -109,10 +109,9 @@ argument register_on_gpu(const argument& arg) ...@@ -109,10 +109,9 @@ argument register_on_gpu(const argument& arg)
{ {
auto arg_shared = arg.share(); auto arg_shared = arg.share();
auto p = share(register_on_gpu(arg_shared.data(), arg_shared.get_shape().bytes())); auto p = share(register_on_gpu(arg_shared.data(), arg_shared.get_shape().bytes()));
return {arg_shared.get_shape(), return {arg_shared.get_shape(), [p, a = std::move(arg_shared)]() mutable {
[ p, a = std::move(arg_shared) ]() mutable {return get_device_ptr(p.get()); return get_device_ptr(p.get());
} }}; // namespace gpu
}; // namespace gpu
} // namespace MIGRAPHX_INLINE_NS } // namespace MIGRAPHX_INLINE_NS
argument to_gpu(const argument& arg, bool host) argument to_gpu(const argument& arg, bool host)
......
...@@ -48,7 +48,7 @@ MIGRAPHX_INTEGRAL_CONSTANT_BINARY_OP(>=) ...@@ -48,7 +48,7 @@ MIGRAPHX_INTEGRAL_CONSTANT_BINARY_OP(>=)
MIGRAPHX_INTEGRAL_CONSTANT_BINARY_OP(==) MIGRAPHX_INTEGRAL_CONSTANT_BINARY_OP(==)
MIGRAPHX_INTEGRAL_CONSTANT_BINARY_OP(!=) MIGRAPHX_INTEGRAL_CONSTANT_BINARY_OP(!=)
MIGRAPHX_INTEGRAL_CONSTANT_BINARY_OP(&) MIGRAPHX_INTEGRAL_CONSTANT_BINARY_OP(&)
MIGRAPHX_INTEGRAL_CONSTANT_BINARY_OP (^) MIGRAPHX_INTEGRAL_CONSTANT_BINARY_OP(^)
MIGRAPHX_INTEGRAL_CONSTANT_BINARY_OP(|) MIGRAPHX_INTEGRAL_CONSTANT_BINARY_OP(|)
MIGRAPHX_INTEGRAL_CONSTANT_BINARY_OP(&&) MIGRAPHX_INTEGRAL_CONSTANT_BINARY_OP(&&)
MIGRAPHX_INTEGRAL_CONSTANT_BINARY_OP(||) MIGRAPHX_INTEGRAL_CONSTANT_BINARY_OP(||)
......
...@@ -432,7 +432,6 @@ struct miopen_apply ...@@ -432,7 +432,6 @@ struct miopen_apply
reshapes[2], reshapes[2],
reshapes[3], reshapes[3],
output); output);
}); });
} }
...@@ -492,7 +491,6 @@ struct miopen_apply ...@@ -492,7 +491,6 @@ struct miopen_apply
void add_roialign() void add_roialign()
{ {
apply_map.emplace("roialign", [=](instruction_ref ins) { apply_map.emplace("roialign", [=](instruction_ref ins) {
auto s = ins->get_shape(); auto s = ins->get_shape();
auto op_val = ins->get_operator().to_value(); auto op_val = ins->get_operator().to_value();
auto output = insert_allocation(ins, s); auto output = insert_allocation(ins, s);
......
...@@ -499,8 +499,7 @@ literal tf_parser::parse_tensor(const tensorflow::TensorProto& t) const ...@@ -499,8 +499,7 @@ literal tf_parser::parse_tensor(const tensorflow::TensorProto& t) const
return create_literal(shape::int64_type, dims, get_data_vals(t.int64_val(), shape_size)); return create_literal(shape::int64_type, dims, get_data_vals(t.int64_val(), shape_size));
case tensorflow::DataType::DT_BOOL: case tensorflow::DataType::DT_BOOL:
return create_literal(shape::int32_type, dims, get_data_vals(t.bool_val(), shape_size)); return create_literal(shape::int32_type, dims, get_data_vals(t.bool_val(), shape_size));
case tensorflow::DataType::DT_HALF: case tensorflow::DataType::DT_HALF: {
{
std::vector<int> data_int32 = get_data_vals(t.half_val(), shape_size); std::vector<int> data_int32 = get_data_vals(t.half_val(), shape_size);
std::vector<uint16_t> data_uint16(data_int32.begin(), data_int32.end()); std::vector<uint16_t> data_uint16(data_int32.begin(), data_int32.end());
std::vector<half> data_half; std::vector<half> data_half;
......
...@@ -197,7 +197,7 @@ struct lhs_expression ...@@ -197,7 +197,7 @@ struct lhs_expression
TEST_LHS_REOPERATOR(%) TEST_LHS_REOPERATOR(%)
TEST_LHS_REOPERATOR(&) TEST_LHS_REOPERATOR(&)
TEST_LHS_REOPERATOR(|) TEST_LHS_REOPERATOR(|)
TEST_LHS_REOPERATOR (^) TEST_LHS_REOPERATOR(^)
}; };
template <class F> template <class F>
......
...@@ -25,7 +25,8 @@ extern "C" { ...@@ -25,7 +25,8 @@ extern "C" {
#endif #endif
// return code, more to be added later // return code, more to be added later
typedef enum { typedef enum
{
migraphx_status_success = 0, migraphx_status_success = 0,
migraphx_status_bad_param = 1, migraphx_status_bad_param = 1,
migraphx_status_unknown_target = 3, migraphx_status_unknown_target = 3,
...@@ -35,7 +36,8 @@ typedef enum { ...@@ -35,7 +36,8 @@ typedef enum {
#define MIGRAPHX_SHAPE_GENERATE_ENUM_TYPES(x, t) migraphx_shape_##x, #define MIGRAPHX_SHAPE_GENERATE_ENUM_TYPES(x, t) migraphx_shape_##x,
/// An enum to represent the different data type inputs /// An enum to represent the different data type inputs
typedef enum { typedef enum
{
migraphx_shape_tuple_type, migraphx_shape_tuple_type,
MIGRAPHX_SHAPE_VISIT_TYPES(MIGRAPHX_SHAPE_GENERATE_ENUM_TYPES) MIGRAPHX_SHAPE_VISIT_TYPES(MIGRAPHX_SHAPE_GENERATE_ENUM_TYPES)
} migraphx_shape_datatype_t; } migraphx_shape_datatype_t;
......
...@@ -7,10 +7,10 @@ fi ...@@ -7,10 +7,10 @@ fi
if type -p python3.8 > /dev/null ; then if type -p python3.8 > /dev/null ; then
PYTHON=python3.8 PYTHON=python3.8
fi fi
ls -1 $DIR/include/ | xargs -n 1 -P $(nproc) -I{} -t bash -c "$PYTHON $DIR/te.py $DIR/include/{} | clang-format-5.0 -style=file > $SRC_DIR/include/migraphx/{}" ls -1 $DIR/include/ | xargs -n 1 -P $(nproc) -I{} -t bash -c "$PYTHON $DIR/te.py $DIR/include/{} | clang-format-10 -style=file > $SRC_DIR/include/migraphx/{}"
function api { function api {
$PYTHON $DIR/api.py $SRC_DIR/api/migraphx.py $1 | clang-format-5.0 -style=file > $2 $PYTHON $DIR/api.py $SRC_DIR/api/migraphx.py $1 | clang-format-10 -style=file > $2
} }
api $DIR/api/migraphx.h $SRC_DIR/api/include/migraphx/migraphx.h api $DIR/api/migraphx.h $SRC_DIR/api/include/migraphx/migraphx.h
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment