"vscode:/vscode.git/clone" did not exist on "d770a7dc9725fbd2262628d54b6fd680d916852a"
Commit 9a2c7339 authored by Khalique's avatar Khalique
Browse files

add clip op with tests

parent 849f7d92
#ifndef MIGRAPHX_GUARD_OPERATORS_CLIP_HPP
#define MIGRAPHX_GUARD_OPERATORS_CLIP_HPP
#include <array>
#include <migraphx/op/unary.hpp>
#include <migraphx/operation.hpp>
#include <migraphx/check_shapes.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/streamutils.hpp>
#include <migraphx/literal.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/config.hpp>
#include <cmath>
#include <utility>
#include <limits>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace op {
struct clip : unary
{
float max_val = std::numeric_limits<float>::max();
float min_val = std::numeric_limits<float>::min();
std::string name() const { return "clip"; }
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.max_val, "max"), f(self.min_val, "min"));
}
};
} // namespace op
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
......@@ -11,6 +11,7 @@
#include <migraphx/op/batch_norm.hpp>
#include <migraphx/op/binary.hpp>
#include <migraphx/op/broadcast.hpp>
#include <migraphx/op/clip.hpp>
#include <migraphx/op/common.hpp>
#include <migraphx/op/concat.hpp>
#include <migraphx/op/contiguous.hpp>
......
......@@ -63,6 +63,7 @@ struct onnx_parser
add_variadic_op("Max", op::max{});
add_variadic_op("Min", op::min{});
add_mem_op("Clip", &onnx_parser::parse_clip);
add_mem_op("LRN", &onnx_parser::parse_lrn);
add_mem_op("ImageScaler", &onnx_parser::parse_imagescaler);
add_mem_op("LeakyRelu", &onnx_parser::parse_leaky_relu);
......@@ -225,6 +226,20 @@ struct onnx_parser
});
}
instruction_ref parse_clip(const std::string&, const attribute_map& attributes, std::vector<instruction_ref> args)
{
op::clip op;
if(contains(attributes, "max"))
{
op.max_val = parse_value(attributes.at("max")).at<float>();
}
if(contains(attributes, "min"))
{
op.min_val = parse_value(attributes.at("min")).at<float>();
}
return prog.add_instruction(op, std::move(args));
}
instruction_ref
parse_softmax(const std::string&, const attribute_map&, std::vector<instruction_ref> args)
{
......
......@@ -140,6 +140,18 @@ struct cpu_lrn
}
};
struct clip_op
{
op::clip op;
std::string name() const { return "cpu::clip"; }
auto fcn() const
{
auto& max = op.max_val;
auto& min = op.min_val;
return [max, min](auto x) { return x > min ? (x < max ? x : max) : min ; };
}
};
struct cpu_convolution
{
op::convolution op;
......@@ -819,6 +831,7 @@ struct cpu_apply
apply_map["batch_norm_inference"] =
extend_op<cpu_batch_norm_inference, op::batch_norm_inference>();
apply_map["lrn"] = extend_op<cpu_lrn, op::lrn>();
apply_map["clip"] = extend_op<cpu_unary<clip_op>, op::clip>();
apply_map["contiguous"] = extend_op<cpu_contiguous, op::contiguous>();
apply_map["pad"] = extend_op<cpu_pad, op::pad>();
apply_map["concat"] = extend_op<cpu_concat, op::concat>();
......
......@@ -32,6 +32,7 @@ add_library(migraphx_device
device/pad.cpp
device/gather.cpp
device/sub.cpp
device/clip.cpp
)
set_target_properties(migraphx_device PROPERTIES EXPORT_NAME device)
rocm_clang_tidy_check(migraphx_device)
......@@ -65,6 +66,7 @@ add_library(migraphx_gpu
gather.cpp
lrn.cpp
schedule_model.cpp
clip.cpp
)
set_target_properties(migraphx_gpu PROPERTIES EXPORT_NAME gpu)
rocm_clang_tidy_check(migraphx_gpu)
......
#include <migraphx/gpu/clip.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/device/clip.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
shape hip_clip::compute_shape(std::vector<shape> inputs) const
{
inputs.pop_back();
return op.compute_shape(inputs);
}
argument hip_clip::compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args) const
{
assert(output_shape == args.back().get_shape());
(void)output_shape;
device::clip(ctx.get_stream().get(), args.back(), args.front(), op.max_val, op.min_val);
return args.back();
}
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#include <migraphx/gpu/device/clip.hpp>
#include <migraphx/gpu/device/nary.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void clip(hipStream_t stream,
const argument& result,
const argument& arg1,
const float& max,
const float& min)
{
nary(stream, result, arg1)(
[max, min](auto x) { return std::min<decltype(x)>(std::max<decltype(x)>(min, x), max); });
}
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#ifndef MIGRAPHX_GUARD_RTGLIB_CLIP_HPP
#define MIGRAPHX_GUARD_RTGLIB_CLIP_HPP
#include <migraphx/shape.hpp>
#include <migraphx/op/clip.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct context;
struct hip_clip
{
op::clip op;
std::string name() const { return "gpu::clip"; }
shape compute_shape(std::vector<shape> inputs) const;
argument
compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const;
int output_alias(const std::vector<shape>& shapes) const { return shapes.size() - 1; }
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_CLIP_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_CLIP_HPP
#include <migraphx/argument.hpp>
#include <migraphx/config.hpp>
#include <hip/hip_runtime_api.h>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void clip(hipStream_t stream,
const argument& result,
const argument& arg1,
const float& max,
const float& min);
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
......@@ -45,6 +45,7 @@
#include <migraphx/gpu/pad.hpp>
#include <migraphx/gpu/gather.hpp>
#include <migraphx/gpu/lrn.hpp>
#include <migraphx/gpu/clip.hpp>
#include <utility>
#include <functional>
#include <algorithm>
......@@ -101,6 +102,7 @@ struct miopen_apply
add_extend_op<hip_logsoftmax, op::logsoftmax>("logsoftmax");
add_extend_op<hip_gather, op::gather>("gather");
add_extend_op<hip_pad, op::pad>("pad");
add_extend_op<hip_clip, op::clip>("clip");
add_lrn_op();
add_convolution_op();
......
......@@ -1557,4 +1557,21 @@ TEST_CASE(fp16_test)
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(clip_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {3}};
auto l = p.add_literal(migraphx::literal{s, {-1.0, 0.0, 10.0}});
migraphx::op::clip op;
op.max_val = 6.0;
op.min_val = 0.0;
p.add_instruction(op, l);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0.0, 0.0, 6.0};
EXPECT(migraphx::verify_range(results_vector, gold));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -1285,6 +1285,20 @@ struct test_batchnorm_inference : verify_program<test_batchnorm_inference>
}
};
struct test_clip : verify_program<test_clip>
{
migraphx::program create_program() const
{
migraphx::program p;
auto x = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3}});
migraphx::op::clip op;
op.max_val = 6.0;
op.min_val = 0.0;
p.add_instruction(op, x);
return p;
}
};
struct test_conv_bn : verify_program<test_conv_bn>
{
migraphx::program create_program() const
......
......@@ -795,4 +795,17 @@ TEST_CASE(no_pad_test)
EXPECT(p == prog);
}
TEST_CASE(clip_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {3}});
migraphx::op::clip op;
op.max_val = 6.0;
op.min_val = 0.0;
p.add_instruction(op, l0);
auto prog = migraphx::parse_onnx("clip_test.onnx");
EXPECT(p == prog);
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment