Unverified Commit 42a97dfb authored by Shucai Xiao's avatar Shucai Xiao Committed by GitHub
Browse files

Not reflect actv desc (#624)

* not refect activation desriptor for some mipen operators

* clang format
parent 69925294
......@@ -31,6 +31,8 @@ argument miopen_abs::compute(context& ctx,
return args[1];
}
void miopen_abs::finalize(context&, const shape&, const std::vector<shape>&) { ad = make_abs(); }
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
......@@ -31,6 +31,11 @@ argument miopen_elu::compute(context& ctx,
return args[1];
}
void miopen_elu::finalize(context&, const shape&, const std::vector<shape>&)
{
ad = make_elu(op.alpha);
}
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
......@@ -2,7 +2,9 @@
#define MIGRAPHX_GUARD_RTGLIB_ABS_HPP
#include <migraphx/shape.hpp>
#include <migraphx/reflect.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/op/abs.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
......@@ -12,18 +14,20 @@ struct context;
struct miopen_abs
{
op::abs op;
shared<activation_descriptor> ad;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return gpu::reflect(self.ad.get(), f);
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::abs"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const;
void finalize(context&, const shape&, const std::vector<shape>&);
std::ptrdiff_t output_alias(const std::vector<shape>& shapes) const
{
return shapes.size() - 1;
......
#ifndef MIGRAPHX_GUARD_RTGLIB_ELU_HPP
#define MIGRAPHX_GUARD_RTGLIB_ELU_HPP
#include <migraphx/op/elu.hpp>
#include <migraphx/shape.hpp>
#include <migraphx/reflect.hpp>
#include <migraphx/gpu/miopen.hpp>
namespace migraphx {
......@@ -12,18 +14,20 @@ struct context;
struct miopen_elu
{
op::elu op;
shared<activation_descriptor> ad;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return gpu::reflect(self.ad.get(), f);
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::elu"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const;
void finalize(context&, const shape&, const std::vector<shape>&);
std::ptrdiff_t output_alias(const std::vector<shape>& shapes) const
{
return shapes.size() - 1;
......
#ifndef MIGRAPHX_GUARD_RTGLIB_LEAKY_RELU_HPP
#define MIGRAPHX_GUARD_RTGLIB_LEAKY_RELU_HPP
#include <migraphx/op/leaky_relu.hpp>
#include <migraphx/shape.hpp>
#include <migraphx/reflect.hpp>
#include <migraphx/gpu/miopen.hpp>
namespace migraphx {
......@@ -12,18 +14,20 @@ struct context;
struct miopen_leaky_relu
{
op::leaky_relu op;
shared<activation_descriptor> ad;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return gpu::reflect(self.ad.get(), f);
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::leaky_relu"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const;
void finalize(context&, const shape&, const std::vector<shape>&);
std::ptrdiff_t output_alias(const std::vector<shape>& shapes) const
{
return shapes.size() - 1;
......
......@@ -2,6 +2,7 @@
#define MIGRAPHX_GUARD_RTGLIB_LRN_HPP
#include <migraphx/shape.hpp>
#include <migraphx/reflect.hpp>
#include <migraphx/gpu/miopen.hpp>
namespace migraphx {
......@@ -12,18 +13,20 @@ struct context;
struct miopen_lrn
{
op::lrn op;
shared<lrn_descriptor> ldesc;
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return gpu::reflect(self.ldesc.get(), f);
return migraphx::reflect(self.op, f);
}
std::string name() const { return "gpu::lrn"; }
shape compute_shape(const std::vector<shape>& inputs) const;
argument
compute(context& ctx, const shape& output_shape, const std::vector<argument>& args) const;
void finalize(context&, const shape&, const std::vector<shape>&);
std::ptrdiff_t output_alias(const std::vector<shape>& shapes) const
{
return shapes.size() - 1;
......
#include <migraphx/gpu/leaky_relu.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/miopen.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
......@@ -31,6 +32,11 @@ argument miopen_leaky_relu::compute(context& ctx,
return args[1];
}
void miopen_leaky_relu::finalize(context&, const shape&, const std::vector<shape>&)
{
ad = make_leaky_relu(op.alpha);
}
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
......@@ -91,11 +91,6 @@ struct miopen_apply
create_output_names();
add_miopen_simple_op<miopen_abs>("abs", make_abs);
add_miopen_extend_op<miopen_leaky_relu, op::leaky_relu>("leaky_relu", make_leaky_relu);
add_miopen_extend_op<miopen_elu, op::elu>("elu", make_elu);
add_generic_op("acos");
add_generic_op("acosh");
add_generic_op("add");
......@@ -132,13 +127,17 @@ struct miopen_apply
add_generic_op("tan");
add_generic_op("tanh");
add_extend_op("abs");
add_extend_op("argmax");
add_extend_op("argmin");
add_extend_op("clip");
add_extend_op("concat");
add_extend_op("convert");
add_extend_op("elu");
add_extend_op("gather");
add_extend_op("leaky_relu");
add_extend_op("logsoftmax");
add_extend_op("lrn");
add_extend_op("pad");
add_extend_op("reduce_max");
add_extend_op("reduce_mean");
......@@ -152,7 +151,6 @@ struct miopen_apply
add_gemm_op<op::dot>("dot");
add_gemm_op<op::quant_dot>("quant_dot");
add_lrn_op();
add_convolution_op();
add_deconvolution_op();
add_quant_convolution_op();
......@@ -327,17 +325,6 @@ struct miopen_apply
});
}
void add_lrn_op()
{
apply_map.emplace("lrn", [=](instruction_ref ins) {
auto&& op = any_cast<op::lrn>(ins->get_operator());
auto ldesc = make_lrn(op);
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(
ins, miopen_lrn{std::move(ldesc)}, ins->inputs().at(0), output);
});
}
void add_generic_op(const std::string& name) { add_generic_op(name, "gpu::" + name); }
void add_generic_op(const std::string& op_name, const std::string& gpu_name)
......@@ -365,28 +352,6 @@ struct miopen_apply
});
}
template <class T, class Op, class F>
void add_miopen_extend_op(std::string name, F f)
{
apply_map.emplace(name, [=](instruction_ref ins) {
auto&& op = any_cast<Op>(ins->get_operator());
auto ad = f(op.alpha);
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(ins, T{std::move(ad)}, ins->inputs().at(0), output);
});
}
template <class T, class F>
void add_miopen_simple_op(std::string name, F f)
{
apply_map.emplace(name, [=](instruction_ref ins) {
auto ad = f();
auto output = insert_allocation(ins, ins->get_shape());
return prog->replace_instruction(ins, T{std::move(ad)}, ins->inputs().at(0), output);
});
}
void add_batch_norm_inference_op()
{
apply_map.emplace("batch_norm_inference", [=](instruction_ref ins) {
......
......@@ -33,6 +33,11 @@ argument miopen_lrn::compute(context& ctx,
return args[1];
}
void miopen_lrn::finalize(context&, const shape&, const std::vector<shape>&)
{
ldesc = make_lrn(op);
}
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
......@@ -218,11 +218,6 @@ def create_backend_test(testname=None, target_device=None):
backend_test.exclude(r'test_depthtospace_example_cpu')
backend_test.exclude(r'test_expand_dim_changed_cpu')
backend_test.exclude(r'test_expand_dim_unchanged_cpu')
backend_test.exclude(r'test_gather_0_cpu')
backend_test.exclude(r'test_gather_1_cpu')
backend_test.exclude(r'test_gather_elements_0_cpu')
backend_test.exclude(r'test_gather_elements_1_cpu')
backend_test.exclude(r'test_gather_elements_negative_indices_cpu')
backend_test.exclude(r'test_gathernd_example_float32_cpu')
backend_test.exclude(r'test_gathernd_example_int32_batch_dim1_cpu')
backend_test.exclude(r'test_gathernd_example_int32_cpu')
......@@ -241,16 +236,10 @@ def create_backend_test(testname=None, target_device=None):
backend_test.exclude(r'test_less_equal_bcast_expanded_cpu')
backend_test.exclude(r'test_less_equal_cpu')
backend_test.exclude(r'test_less_equal_expanded_cpu')
backend_test.exclude(r'test_max_float16_cpu')
backend_test.exclude(r'test_max_int64_cpu')
backend_test.exclude(r'test_max_uint64_cpu')
backend_test.exclude(r'test_maxpool_2d_uint8_cpu')
backend_test.exclude(r'test_mean_example_cpu')
backend_test.exclude(r'test_mean_one_input_cpu')
backend_test.exclude(r'test_mean_two_inputs_cpu')
backend_test.exclude(r'test_min_float16_cpu')
backend_test.exclude(r'test_min_int64_cpu')
backend_test.exclude(r'test_min_uint64_cpu')
backend_test.exclude(r'test_negative_log_likelihood_loss_*')
backend_test.exclude(r'test_not_2d_cpu')
backend_test.exclude(r'test_not_3d_cpu')
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment