Unverified Commit 8443ecd1 authored by Shucai Xiao's avatar Shucai Xiao Committed by GitHub
Browse files

Normalize ops (#667)



* add a pass to normalize ops

* clang format

* add unit tests

* clang format

* code backup

* clang format

* code backup

* clang format

* add support for slice in the normalize_op function

* clang format

* add operation method api for whether we need to call normalize_op

* clang format

* fix review comments

* clang format

* rename a function namejJ

* clang format

* change compute_shape to normalize_compute_shape for corresponding operators

* clang format

* remove unnecessary code

* fix various issues

* clang format

* add attributes to operators having axis attributes

* clang format

* fixed jenkins build error

* clang format

* fix a bug related to slice

* clang format

* code backup

* clang format

* code backup

* clang format

* rename a file

* fix cppcheck error

* some code refinement

* clang format

* change attributes to enum

* clang format

* refine the enum

* clang format

* remove unnecessary code

* add unit tests for more code coverage and fixed a bug

* clang format

* remove unnecessary changes

* change normalize_axes to normalize

* clang format

* revert back the changes in broadcast.hpp

* rename normalize_axes to normalize

* fix review comments

* clang format

* Add flag to enable cpu backend

* Make buffers shared

* Enable optimizations

* Formatting

* Try to avoid ambiguous assign in value class

* fixed a build error

* clang format

* add the normalize_ops pass to the ref target

* refactor program to module to normalize_ops pass
Co-authored-by: default avatarPaul <pfultz2@yahoo.com>
Co-authored-by: default avatarmvermeulen <5479696+mvermeulen@users.noreply.github.com>
parent f8b56a66
...@@ -785,7 +785,10 @@ struct cpu_softmax : auto_register_op<cpu_softmax<Op>> ...@@ -785,7 +785,10 @@ struct cpu_softmax : auto_register_op<cpu_softmax<Op>>
} }
std::string name() const { return "cpu::" + op.name(); } std::string name() const { return "cpu::" + op.name(); }
shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); } shape compute_shape(const std::vector<shape>& inputs) const
{
return op.normalize_compute_shape(inputs);
}
argument compute(context&, const shape& output_shape, std::vector<argument> args) const argument compute(context&, const shape& output_shape, std::vector<argument> args) const
{ {
argument result{output_shape}; argument result{output_shape};
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <migraphx/cpu/lowering.hpp> #include <migraphx/cpu/lowering.hpp>
#include <migraphx/pass.hpp> #include <migraphx/pass.hpp>
#include <migraphx/generate.hpp> #include <migraphx/generate.hpp>
#include <migraphx/normalize_ops.hpp>
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
...@@ -32,7 +33,8 @@ std::string target::name() const { return "cpu"; } ...@@ -32,7 +33,8 @@ std::string target::name() const { return "cpu"; }
std::vector<pass> target::get_passes(migraphx::context&, const compile_options&) const std::vector<pass> target::get_passes(migraphx::context&, const compile_options&) const
{ {
return {decompose{}, return {normalize_ops{},
decompose{},
dead_code_elimination{}, dead_code_elimination{},
simplify_reshapes{}, simplify_reshapes{},
eliminate_identity{}, eliminate_identity{},
......
...@@ -9,7 +9,7 @@ namespace gpu { ...@@ -9,7 +9,7 @@ namespace gpu {
shape hip_argmax::compute_shape(const std::vector<shape>& inputs) const shape hip_argmax::compute_shape(const std::vector<shape>& inputs) const
{ {
check_shapes{inputs, *this}.has(2).standard(); check_shapes{inputs, *this}.has(2).standard();
return op.compute_shape({inputs.at(0)}); return op.normalize_compute_shape({inputs.at(0)});
} }
argument hip_argmax::compute(context& ctx, const shape&, const std::vector<argument>& args) const argument hip_argmax::compute(context& ctx, const shape&, const std::vector<argument>& args) const
......
...@@ -9,7 +9,7 @@ namespace gpu { ...@@ -9,7 +9,7 @@ namespace gpu {
shape hip_argmin::compute_shape(const std::vector<shape>& inputs) const shape hip_argmin::compute_shape(const std::vector<shape>& inputs) const
{ {
check_shapes{inputs, *this}.has(2).standard(); check_shapes{inputs, *this}.has(2).standard();
return op.compute_shape({inputs.at(0)}); return op.normalize_compute_shape({inputs.at(0)});
} }
argument hip_argmin::compute(context& ctx, const shape&, const std::vector<argument>& args) const argument hip_argmin::compute(context& ctx, const shape&, const std::vector<argument>& args) const
......
...@@ -9,7 +9,7 @@ namespace gpu { ...@@ -9,7 +9,7 @@ namespace gpu {
shape hip_concat::compute_shape(std::vector<shape> inputs) const shape hip_concat::compute_shape(std::vector<shape> inputs) const
{ {
inputs.pop_back(); inputs.pop_back();
return op.compute_shape(inputs); return op.normalize_compute_shape(inputs);
} }
argument hip_concat::compute(context& ctx, argument hip_concat::compute(context& ctx,
......
...@@ -10,13 +10,12 @@ inline namespace MIGRAPHX_INLINE_NS { ...@@ -10,13 +10,12 @@ inline namespace MIGRAPHX_INLINE_NS {
namespace gpu { namespace gpu {
namespace device { namespace device {
argument gather(hipStream_t stream, argument result, argument arg1, argument arg2, int axis) argument gather(hipStream_t stream, argument result, argument arg1, argument arg2, int64_t axis)
{ {
auto axis_index = (axis < 0) ? (axis + arg1.get_shape().lens().size()) : axis;
const auto& input_shape = arg1.get_shape(); const auto& input_shape = arg1.get_shape();
auto lens = input_shape.lens(); auto lens = input_shape.lens();
auto axis_dim_size = lens[axis_index]; auto axis_dim_size = lens[axis];
lens[axis_index] = arg2.get_shape().elements(); lens[axis] = arg2.get_shape().elements();
shape out_comp_shape{result.get_shape().type(), lens}; shape out_comp_shape{result.get_shape().type(), lens};
std::size_t nelements = result.get_shape().elements(); std::size_t nelements = result.get_shape().elements();
...@@ -26,11 +25,11 @@ argument gather(hipStream_t stream, argument result, argument arg1, argument arg ...@@ -26,11 +25,11 @@ argument gather(hipStream_t stream, argument result, argument arg1, argument arg
const auto* indices_ptr = device_cast(indices.data()); const auto* indices_ptr = device_cast(indices.data());
auto* output_ptr = device_cast(output.data()); auto* output_ptr = device_cast(output.data());
gs_launch(stream, nelements, 256)([=](auto i) __device__ { gs_launch(stream, nelements, 256)([=](auto i) __device__ {
auto idx = out_comp.multi(i); auto idx = out_comp.multi(i);
auto in_index = indices_ptr[idx[axis_index]]; auto in_index = indices_ptr[idx[axis]];
in_index = (in_index < 0) ? in_index + axis_dim_size : in_index; in_index = (in_index < 0) ? in_index + axis_dim_size : in_index;
idx[axis_index] = in_index; idx[axis] = in_index;
output_ptr[i] = input[idx]; output_ptr[i] = input[idx];
}); });
}); });
}); });
......
...@@ -9,7 +9,7 @@ namespace gpu { ...@@ -9,7 +9,7 @@ namespace gpu {
shape hip_gather::compute_shape(std::vector<shape> inputs) const shape hip_gather::compute_shape(std::vector<shape> inputs) const
{ {
inputs.pop_back(); inputs.pop_back();
return op.compute_shape(inputs); return op.normalize_compute_shape(inputs);
} }
argument hip_gather::compute(context& ctx, const shape&, const std::vector<argument>& args) const argument hip_gather::compute(context& ctx, const shape&, const std::vector<argument>& args) const
......
...@@ -10,7 +10,7 @@ inline namespace MIGRAPHX_INLINE_NS { ...@@ -10,7 +10,7 @@ inline namespace MIGRAPHX_INLINE_NS {
namespace gpu { namespace gpu {
namespace device { namespace device {
argument gather(hipStream_t stream, argument result, argument arg1, argument arg2, int axis); argument gather(hipStream_t stream, argument result, argument arg1, argument arg2, int64_t axis);
} // namespace device } // namespace device
} // namespace gpu } // namespace gpu
......
...@@ -34,7 +34,7 @@ struct reduce_op : oper<Derived> ...@@ -34,7 +34,7 @@ struct reduce_op : oper<Derived>
std::vector<shape> in_shapes{inputs}; std::vector<shape> in_shapes{inputs};
in_shapes.pop_back(); in_shapes.pop_back();
check_shapes{in_shapes, *this}.standard(); check_shapes{in_shapes, *this}.standard();
return op.compute_shape(in_shapes); return op.normalize_compute_shape(in_shapes);
} }
argument compute(context& ctx, const shape&, const std::vector<argument>& args) const argument compute(context& ctx, const shape&, const std::vector<argument>& args) const
......
...@@ -12,7 +12,7 @@ namespace gpu { ...@@ -12,7 +12,7 @@ namespace gpu {
shape hip_logsoftmax::compute_shape(const std::vector<shape>& inputs) const shape hip_logsoftmax::compute_shape(const std::vector<shape>& inputs) const
{ {
check_shapes{inputs, *this}.has(2).standard(); check_shapes{inputs, *this}.has(2).standard();
return op.compute_shape({inputs.at(0)}); return op.normalize_compute_shape({inputs.at(0)});
} }
argument argument
......
...@@ -9,7 +9,7 @@ namespace gpu { ...@@ -9,7 +9,7 @@ namespace gpu {
shape hip_softmax::compute_shape(const std::vector<shape>& inputs) const shape hip_softmax::compute_shape(const std::vector<shape>& inputs) const
{ {
check_shapes{inputs, *this}.has(2).standard(); check_shapes{inputs, *this}.has(2).standard();
return op.compute_shape({inputs.at(0)}); return op.normalize_compute_shape({inputs.at(0)});
} }
argument hip_softmax::compute(context& ctx, const shape&, const std::vector<argument>& args) const argument hip_softmax::compute(context& ctx, const shape&, const std::vector<argument>& args) const
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <migraphx/eliminate_identity.hpp> #include <migraphx/eliminate_identity.hpp>
#include <migraphx/eliminate_pad.hpp> #include <migraphx/eliminate_pad.hpp>
#include <migraphx/memory_coloring.hpp> #include <migraphx/memory_coloring.hpp>
#include <migraphx/normalize_ops.hpp>
#include <migraphx/propagate_constant.hpp> #include <migraphx/propagate_constant.hpp>
#include <migraphx/register_target.hpp> #include <migraphx/register_target.hpp>
#include <migraphx/remap.hpp> #include <migraphx/remap.hpp>
...@@ -43,6 +44,7 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti ...@@ -43,6 +44,7 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti
// clang-format off // clang-format off
return return
{ {
normalize_ops{},
decompose{}, decompose{},
dead_code_elimination{}, dead_code_elimination{},
simplify_reshapes{}, simplify_reshapes{},
......
...@@ -784,7 +784,10 @@ struct ref_softmax : auto_register_op<ref_softmax<Op>> ...@@ -784,7 +784,10 @@ struct ref_softmax : auto_register_op<ref_softmax<Op>>
} }
std::string name() const { return "ref::" + op.name(); } std::string name() const { return "ref::" + op.name(); }
shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); } shape compute_shape(const std::vector<shape>& inputs) const
{
return op.normalize_compute_shape(inputs);
}
argument compute(context&, const shape& output_shape, std::vector<argument> args) const argument compute(context&, const shape& output_shape, std::vector<argument> args) const
{ {
argument result{output_shape}; argument result{output_shape};
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <migraphx/rewrite_rnn.hpp> #include <migraphx/rewrite_rnn.hpp>
#include <migraphx/dead_code_elimination.hpp> #include <migraphx/dead_code_elimination.hpp>
#include <migraphx/generate.hpp> #include <migraphx/generate.hpp>
#include <migraphx/normalize_ops.hpp>
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
...@@ -16,7 +17,8 @@ std::string target::name() const { return "ref"; } ...@@ -16,7 +17,8 @@ std::string target::name() const { return "ref"; }
std::vector<pass> target::get_passes(migraphx::context&, const compile_options&) const std::vector<pass> target::get_passes(migraphx::context&, const compile_options&) const
{ {
return {rewrite_rnn{}, return {normalize_ops{},
rewrite_rnn{},
dead_code_elimination{}, dead_code_elimination{},
auto_contiguous{}, auto_contiguous{},
dead_code_elimination{}, dead_code_elimination{},
......
...@@ -167,6 +167,13 @@ value& value::operator=(std::nullptr_t) ...@@ -167,6 +167,13 @@ value& value::operator=(std::nullptr_t)
return *this; return *this;
} }
value& value::operator=(const std::initializer_list<value>& i)
{
value rhs = i;
std::swap(rhs.x, x);
return *this;
}
bool value::is_array() const { return x ? x->get_type() == array_type : false; } bool value::is_array() const { return x ? x->get_type() == array_type : false; }
const std::vector<value>& value::value::get_array() const const std::vector<value>& value::value::get_array() const
{ {
......
...@@ -4,6 +4,8 @@ ...@@ -4,6 +4,8 @@
#include <migraphx/op/concat.hpp> #include <migraphx/op/concat.hpp>
#include <migraphx/op/load.hpp> #include <migraphx/op/load.hpp>
#include <migraphx/op/identity.hpp> #include <migraphx/op/identity.hpp>
#include <migraphx/op/normalize_attribute.hpp>
#include <migraphx/normalize_attributes.hpp>
#include <basic_ops.hpp> #include <basic_ops.hpp>
#include <test.hpp> #include <test.hpp>
...@@ -18,11 +20,18 @@ struct concat ...@@ -18,11 +20,18 @@ struct concat
return migraphx::reflect(self.op, f); return migraphx::reflect(self.op, f);
} }
migraphx::value attributes() const
{
migraphx::value normalize;
normalize["axis"] = migraphx::value::array{migraphx::op::normalize_attribute::include_min};
return {{"normalize_axes", normalize}};
}
std::string name() const { return "eliminate_concat::concat"; } std::string name() const { return "eliminate_concat::concat"; }
migraphx::shape compute_shape(std::vector<migraphx::shape> inputs) const migraphx::shape normalize_compute_shape(std::vector<migraphx::shape> inputs) const
{ {
inputs.pop_back(); inputs.pop_back();
return op.compute_shape(std::move(inputs)); return op.normalize_compute_shape(std::move(inputs));
} }
migraphx::argument compute(migraphx::context&, migraphx::argument compute(migraphx::context&,
const migraphx::shape& output_shape, const migraphx::shape& output_shape,
......
#include <migraphx/normalize_ops.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/functional.hpp>
#include <migraphx/op/normalize_attribute.hpp>
#include <basic_ops.hpp>
#include <test.hpp>
struct normalize_test_op
{
std::vector<int64_t> axes = {};
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::pack(f(self.axes, "axes"));
}
migraphx::value attributes() const
{
migraphx::value normalize;
normalize["axes"] = migraphx::value::array{migraphx::op::normalize_attribute::clip_max,
migraphx::op::normalize_attribute::clip_min};
return {{"normalize_axes", normalize}};
}
std::string name() const { return "normalize_ops_test::test_op"; }
migraphx::shape normalize_compute_shape(std::vector<migraphx::shape> inputs) const
{
return inputs[0];
}
migraphx::argument compute(migraphx::context&,
const migraphx::shape& output_shape,
const std::vector<migraphx::argument>&) const
{
return {output_shape};
}
};
void run_pass(migraphx::program& p)
{
migraphx::run_passes(p, {migraphx::normalize_ops{}, migraphx::dead_code_elimination{}});
}
migraphx::program create_gather(int64_t axis)
{
migraphx::program p;
migraphx::shape sd{migraphx::shape::float_type, {2, 3, 4}};
migraphx::shape si{migraphx::shape::int64_type, {2, 3}};
auto di = p.add_parameter("data", sd);
auto ii = p.add_parameter("ind", si);
auto r = p.add_instruction(migraphx::make_op("gather", {{"axis", axis}}), di, ii);
p.add_return({r});
return p;
}
TEST_CASE(gather_test)
{
auto p1 = create_gather(-3);
auto p2 = create_gather(0);
run_pass(p1);
EXPECT(p1 == p2);
}
TEST_CASE(gather_test_1)
{
auto p1 = create_gather(1);
auto p2 = create_gather(1);
run_pass(p1);
EXPECT(p1 == p2);
}
migraphx::program create_reduce_mean(const std::vector<int64_t>& axes)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 3, 4, 5}};
auto si = p.add_parameter("data", s);
auto r = p.add_instruction(migraphx::make_op("reduce_mean", {{"axes", axes}}), si);
p.add_return({r});
return p;
}
TEST_CASE(reduce_mean_test)
{
migraphx::program p1 = create_reduce_mean({0, 1, -1});
migraphx::program p2 = create_reduce_mean({0, 1, 3});
run_pass(p1);
EXPECT(p1 == p2);
}
TEST_CASE(reduce_mean_test_1)
{
migraphx::program p1 = create_reduce_mean({0, 1, 2});
migraphx::program p2 = create_reduce_mean({0, 1, 2});
run_pass(p1);
EXPECT(p1 == p2);
}
migraphx::program create_slice(const std::vector<int64_t>& axes,
const std::vector<int64_t>& starts,
const std::vector<int64_t>& ends)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 3, 4, 5}};
auto si = p.add_parameter("data", s);
auto r = p.add_instruction(
migraphx::make_op("slice", {{"axes", axes}, {"starts", starts}, {"ends", ends}}), si);
p.add_return({r});
return p;
}
TEST_CASE(slice_test)
{
migraphx::program p1 = create_slice({0, 1, -1}, {-5, 1, -3}, {2, 2, 8});
migraphx::program p2 = create_slice({0, 1, 3}, {0, 1, 2}, {2, 2, 5});
run_pass(p1);
EXPECT(p1 == p2);
}
TEST_CASE(slice_test_1)
{
migraphx::program p1 = create_slice({0, 1, 3}, {0, 1, -3}, {1, 2, 5});
migraphx::program p2 = create_slice({0, 1, 3}, {0, 1, 2}, {1, 2, 5});
run_pass(p1);
EXPECT(p1 == p2);
}
migraphx::program create_test_op(const std::vector<int64_t>& axes)
{
migraphx::program p;
migraphx::shape sd{migraphx::shape::float_type, {2, 3, 4}};
auto di = p.add_parameter("data", sd);
auto r = p.add_instruction(normalize_test_op{axes}, di);
p.add_return({r});
return p;
}
TEST_CASE(test_op)
{
std::vector<int64_t> axes1 = {-4, 5};
auto p1 = create_test_op(axes1);
std::vector<int64_t> axes2 = {1, 2};
auto p2 = create_test_op(axes2);
run_pass(p1);
EXPECT(p1 == p2);
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <utility> #include <utility>
#include <migraphx/reflect.hpp> #include <migraphx/reflect.hpp>
#include <migraphx/streamutils.hpp> #include <migraphx/streamutils.hpp>
#include <migraphx/normalize_attributes.hpp>
#include <migraphx/argument.hpp> #include <migraphx/argument.hpp>
#include <migraphx/serialize.hpp> #include <migraphx/serialize.hpp>
#include <migraphx/auto_any_cast.hpp> #include <migraphx/auto_any_cast.hpp>
...@@ -58,6 +59,8 @@ struct operation ...@@ -58,6 +59,8 @@ struct operation
/// Returns true if operation does not require a context to run compute /// Returns true if operation does not require a context to run compute
bool is_context_free(const operation& x); bool is_context_free(const operation& x);
/// Returns true if operation needs normalization before running compute
bool need_normalization(const operation& x);
/// Returns true if the operation has a finalize method /// Returns true if the operation has a finalize method
bool has_finalize(const operation& x); bool has_finalize(const operation& x);
...@@ -96,6 +99,14 @@ auto operator==(const T& x, const U& y) -> decltype(x.name() == y.name()) ...@@ -96,6 +99,14 @@ auto operator==(const T& x, const U& y) -> decltype(x.name() == y.name())
} // namespace operation_operators } // namespace operation_operators
template <class T>
shape normalize_compute_shape_op(T&& x, std::vector<shape> inputs)
{
dependent_type<operation, T> y = x;
normalize_attributes(y, inputs[0].lens());
return any_cast<T>(y).normalize_compute_shape(inputs);
}
template <class T> template <class T>
auto compute_op(rank<2>, auto compute_op(rank<2>,
const T& x, const T& x,
...@@ -175,6 +186,20 @@ auto is_context_free_op(const T& x) -> decltype(is_context_free_op( ...@@ -175,6 +186,20 @@ auto is_context_free_op(const T& x) -> decltype(is_context_free_op(
return {}; return {};
} }
template <class T>
auto need_normalization_op(rank<1>, const T& x, const std::vector<shape>& inputs)
-> decltype(x.normalize_compute_shape(inputs), std::true_type{});
template <class T>
auto need_normalization_op(rank<0>, const T&, const std::vector<shape>&) -> std::false_type;
template <class T>
auto need_normalization_op(const T& x)
-> decltype(need_normalization_op(rank<1>{}, x, std::declval<std::vector<shape>>()))
{
return {};
}
template <class T> template <class T>
std::ptrdiff_t output_alias_op(const T&, const std::vector<shape>&) std::ptrdiff_t output_alias_op(const T&, const std::vector<shape>&)
{ {
...@@ -245,6 +270,10 @@ void from_value_op(T& x, const value& v) ...@@ -245,6 +270,10 @@ void from_value_op(T& x, const value& v)
virtual('name', returns = 'std::string', const = True), virtual('name', returns = 'std::string', const = True),
virtual( virtual(
'is_context_free', returns = 'bool', const = True, default = 'detail::is_context_free_op'), 'is_context_free', returns = 'bool', const = True, default = 'detail::is_context_free_op'),
virtual('need_normalization',
returns = 'bool',
const = True,
default = 'detail::need_normalization_op'),
virtual('has_finalize', returns = 'bool', const = True, default = 'detail::has_finalize_op'), virtual('has_finalize', returns = 'bool', const = True, default = 'detail::has_finalize_op'),
virtual('output_alias', virtual('output_alias',
returns = 'std::ptrdiff_t', returns = 'std::ptrdiff_t',
...@@ -256,7 +285,11 @@ void from_value_op(T& x, const value& v) ...@@ -256,7 +285,11 @@ void from_value_op(T& x, const value& v)
output = 'const shape&', output = 'const shape&',
input = 'const std::vector<shape>&', input = 'const std::vector<shape>&',
default = 'detail::finalize_op'), default = 'detail::finalize_op'),
virtual('compute_shape', returns = 'shape', input = 'const std::vector<shape>&', const = True), virtual('compute_shape',
returns = 'shape',
input = 'const std::vector<shape>&',
const = True,
default = 'detail::normalize_compute_shape_op'),
virtual('compute', virtual('compute',
returns = 'argument', returns = 'argument',
ctx = 'context&', ctx = 'context&',
...@@ -297,6 +330,14 @@ bool is_context_free(const T& x) ...@@ -297,6 +330,14 @@ bool is_context_free(const T& x)
return detail::is_context_free_op(x); return detail::is_context_free_op(x);
} }
inline bool need_normalization(const operation& op) { return op.need_normalization(); }
template <class T>
bool need_normalization(const T& x)
{
return detail::need_normalization_op(x);
}
inline bool has_finalize(const operation& op) { return op.has_finalize(); } inline bool has_finalize(const operation& op) { return op.has_finalize(); }
template <class T> template <class T>
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment