Commit a8eb886b authored by Khalique Ahmed's avatar Khalique Ahmed
Browse files

Merge branch 'develop' of https://github.com/ROCmSoftwarePlatform/AMDMIGraphX into develop

parents 7e604e9b 4f3cc417
......@@ -138,6 +138,7 @@ def create_backend_test(testname=None, target_device=None):
backend_test.include(r'.*test_eyelike.*')
backend_test.include(r'.*test_flatten.*')
backend_test.include(r'.*test_floor.*')
backend_test.include(r'.*test_fmod.*')
backend_test.include(r'.*test_gather.*')
backend_test.include(r'.*test_gemm.*')
backend_test.include(r'.*test_globalaveragepool.*')
......@@ -162,6 +163,7 @@ def create_backend_test(testname=None, target_device=None):
backend_test.include(r'.*test_MaxPool[1-9]d.*')
backend_test.include(r'.*test_mean.*')
backend_test.include(r'.*test_min.*')
backend_test.include(r' .*test_mod.*')
backend_test.include(r'.*test_mul.*')
backend_test.include(r'.*test_multinomial.*')
backend_test.include(r'.*test_Multinomial.*')
......@@ -179,6 +181,7 @@ def create_backend_test(testname=None, target_device=None):
backend_test.include(r'.*test_operator_max_.*')
backend_test.include(r'.*test_operator_maxpool.*')
backend_test.include(r'.*test_operator_min.*')
backend_test.include(r'.*test_operator_mod.*')
backend_test.include(r'.*test_operator_mm.*')
backend_test.include(r'.*test_operator_non_float_params.*')
backend_test.include(r'.*test_operator_params.*')
......
#####################################################################################
# The MIT License (MIT)
#
# Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#####################################################################################
import migraphx
import ctypes
def test_conv_relu():
hip = ctypes.cdll.LoadLibrary("libamdhip64.so")
p = migraphx.parse_onnx("conv_relu_maxpool_test.onnx")
print(p)
print("Compiling ...")
# Need to have offload_copy = False to avoid syncs() back to the host device
p.compile(migraphx.get_target("gpu"), offload_copy=False)
print(p)
params = {}
# Using default value in api for hipSuccess which is always 0
hipSuccess = ctypes.c_long(0)
# Alloc a stream
stream = ctypes.c_void_p()
err = ctypes.c_long(
hip.hipStreamCreateWithFlags(ctypes.byref(stream), ctypes.c_uint(0)))
if err.value != hipSuccess.value:
print("FAILED hipStreamCreate")
return err
# Use to_gpu to push generated argument to the GPU before we perform a run
for key, value in p.get_parameter_shapes().items():
params[key] = migraphx.to_gpu(migraphx.generate_argument(value))
result = migraphx.from_gpu(
p.run_async(params, stream.value, "ihipStream_t")[-1])
# Wait for all commands in stream to complete
err = ctypes.c_long(hip.hipStreamSynchronize(stream))
if err.value != hipSuccess.value:
print("FAILED: hipStreamSyncronize")
return err
# Cleanup Stream
err = ctypes.c_long(hip.hipStreamDestroy(stream))
if err.value != hipSuccess.value:
print("FAILED: hipStreamDestroy")
return err
print(result)
test_conv_relu()
......@@ -3591,7 +3591,7 @@ TEST_CASE(multinomial_test)
result.visit([&](auto output) { result_vec.assign(output.begin(), output.end()); });
std::vector<int> res_dist(5, 0);
for(auto& r : result_vec)
for(const auto& r : result_vec)
res_dist[r]++;
auto dist_sum = std::accumulate(dist.begin(), dist.end(), 0);
auto res_dist_sum = std::accumulate(res_dist.begin(), res_dist.end(), 0);
......
......@@ -122,6 +122,33 @@ TEST_CASE(simplify_add3)
EXPECT(m1 == m2);
}
TEST_CASE(simplify_zero_add_constant)
{
migraphx::module m1;
{
auto x = m1.add_parameter("x", {migraphx::shape::int32_type, {1}});
auto zero = m1.add_literal(0);
m1.add_instruction(migraphx::make_op("add"), zero, x);
}
run_pass(m1);
migraphx::module m2;
{
auto x = m2.add_parameter("x", {migraphx::shape::int32_type, {1}});
m2.add_instruction(migraphx::make_op("identity"), x);
}
migraphx::module m3;
{
auto x = m3.add_parameter("x", {migraphx::shape::int32_type, {1}});
auto zero = m3.add_literal(0);
m3.add_instruction(migraphx::make_op("add"), x, zero);
}
run_pass(m3);
EXPECT((m1 == m2) && (m2 == m3));
}
TEST_CASE(simplify_add_broadcast1)
{
migraphx::shape inner{migraphx::shape::int32_type, {2}};
......@@ -435,7 +462,7 @@ TEST_CASE(simplify_mul_add)
migraphx::module m1;
{
auto x = m1.add_parameter("x", {migraphx::shape::int32_type, {1}});
auto one = m1.add_literal(1);
auto one = m1.add_literal(3);
auto two = m1.add_literal(2);
auto sum = m1.add_instruction(migraphx::make_op("add"), one, x);
auto mul = m1.add_instruction(migraphx::make_op("mul"), sum, two);
......@@ -446,7 +473,7 @@ TEST_CASE(simplify_mul_add)
migraphx::module m2;
{
auto x = m2.add_parameter("x", {migraphx::shape::int32_type, {1}});
auto one = m2.add_literal(1);
auto one = m2.add_literal(3);
auto two = m2.add_literal(2);
auto mul1 = m2.add_instruction(migraphx::make_op("mul"), two, x);
auto mul2 = m2.add_instruction(migraphx::make_op("mul"), two, one);
......@@ -883,6 +910,341 @@ TEST_CASE(simplify_div_const)
EXPECT(m1 == m2);
}
TEST_CASE(simplify_unit_mult_const)
{
migraphx::module m1;
{
auto x = m1.add_parameter("x", {migraphx::shape::int32_type, {1}});
auto unit = m1.add_literal(1);
m1.add_instruction(migraphx::make_op("mul"), x, unit);
}
run_pass(m1);
migraphx::module m2;
{
auto x = m2.add_parameter("x", {migraphx::shape::int32_type, {1}});
m2.add_instruction(migraphx::make_op("identity"), x);
}
EXPECT(m1 == m2);
}
TEST_CASE(simplify_unit_mult_const2)
{
migraphx::module m1;
{
auto unit = m1.add_literal(1);
auto x = m1.add_parameter("x", {migraphx::shape::int32_type, {1}});
m1.add_instruction(migraphx::make_op("mul"), unit, x);
}
run_pass(m1);
migraphx::module m2;
{
auto x = m2.add_parameter("x", {migraphx::shape::int32_type, {1}});
m2.add_instruction(migraphx::make_op("identity"), x);
}
EXPECT(m1 == m2);
}
TEST_CASE(simplify_unit_mult_const_vec)
{
migraphx::shape unit_shape{migraphx::shape::int32_type, {2}};
migraphx::shape x_shape{migraphx::shape::int32_type, {1, 2, 3, 3}};
migraphx::module m1;
{
auto unit = m1.add_literal({unit_shape, {1, 1}});
auto x = m1.add_parameter("x", x_shape);
auto unitb = m1.add_instruction(
migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", {1, 2, 3, 3}}}), unit);
m1.add_instruction(migraphx::make_op("mul"), x, unitb);
}
run_pass(m1);
migraphx::module m2;
{
auto x = m2.add_parameter("x", x_shape);
m2.add_instruction(migraphx::make_op("identity"), x);
}
EXPECT(m1 == m2);
}
TEST_CASE(simplify_unit_mult_const_vec2)
{
migraphx::shape unit_shape{migraphx::shape::int32_type, {2}};
migraphx::shape x_shape{migraphx::shape::int32_type, {1, 2, 3, 3}};
migraphx::module m1;
{
auto unit = m1.add_literal({unit_shape, {1, 1}});
auto x = m1.add_parameter("x", x_shape);
auto unitb = m1.add_instruction(
migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", {1, 2, 3, 3}}}), unit);
m1.add_instruction(migraphx::make_op("mul"), unitb, x);
}
run_pass(m1);
migraphx::module m2;
{
auto x = m2.add_parameter("x", x_shape);
m2.add_instruction(migraphx::make_op("identity"), x);
}
EXPECT(m1 == m2);
}
TEST_CASE(simplify_unit_div_const)
{
migraphx::module m1;
{
auto x = m1.add_parameter("x", {migraphx::shape::int32_type, {1}});
auto unit = m1.add_literal(1);
auto div = m1.add_instruction(migraphx::make_op("div"), x, unit);
m1.add_return({div});
}
run_pass(m1);
migraphx::module m2;
{
auto x = m2.add_parameter("x", {migraphx::shape::int32_type, {1}});
m2.add_return({x});
}
EXPECT(m1 == m2);
}
TEST_CASE(simplify_unit_div_const_vec)
{
migraphx::shape unit_shape{migraphx::shape::int32_type, {2}};
migraphx::shape x_shape{migraphx::shape::int32_type, {1, 2, 3, 3}};
migraphx::module m1;
{
auto unit = m1.add_literal({unit_shape, {1, 1}});
auto x = m1.add_parameter("x", x_shape);
auto unitb = m1.add_instruction(
migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", {1, 2, 3, 3}}}), unit);
m1.add_instruction(migraphx::make_op("div"), x, unitb);
}
run_pass(m1);
migraphx::module m2;
{
auto x = m2.add_parameter("x", x_shape);
m2.add_instruction(migraphx::make_op("identity"), x);
}
EXPECT(m1 == m2);
}
TEST_CASE(simplify_neg_unit_mult_const)
{
migraphx::module m1;
{
auto x = m1.add_parameter("x", {migraphx::shape::int32_type, {1}});
auto unit = m1.add_literal(-1);
m1.add_instruction(migraphx::make_op("mul"), x, unit);
}
run_pass(m1);
migraphx::module m2;
{
auto x = m2.add_parameter("x", {migraphx::shape::int32_type, {1}});
m2.add_instruction(migraphx::make_op("neg"), x);
}
EXPECT((m1 == m2));
}
TEST_CASE(simplify_neg_unit_mult_const2)
{
migraphx::module m1;
{
auto unit = m1.add_literal(-1);
auto x = m1.add_parameter("x", {migraphx::shape::int32_type, {1}});
m1.add_instruction(migraphx::make_op("mul"), unit, x);
}
run_pass(m1);
migraphx::module m2;
{
auto x = m2.add_parameter("x", {migraphx::shape::int32_type, {1}});
m2.add_instruction(migraphx::make_op("neg"), x);
}
EXPECT((m1 == m2));
}
TEST_CASE(simplify_neg_unit_mul_const_vec)
{
migraphx::shape unit_shape{migraphx::shape::int32_type, {2}};
migraphx::shape x_shape{migraphx::shape::int32_type, {1, 2, 3, 3}};
migraphx::module m1;
{
auto unit = m1.add_literal({unit_shape, {-1, -1}});
auto x = m1.add_parameter("x", x_shape);
auto unitb = m1.add_instruction(
migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", {1, 2, 3, 3}}}), unit);
m1.add_instruction(migraphx::make_op("mul"), x, unitb);
}
run_pass(m1);
migraphx::module m2;
{
auto x = m2.add_parameter("x", x_shape);
m2.add_instruction(migraphx::make_op("neg"), x);
}
EXPECT(m1 == m2);
}
TEST_CASE(simplify_neg_unit_mul_const_vec2)
{
migraphx::shape zero_shape{migraphx::shape::int32_type, {2}};
migraphx::shape x_shape{migraphx::shape::int32_type, {1, 2, 3, 3}};
migraphx::module m1;
{
auto unit = m1.add_literal({zero_shape, {-1, -1}});
auto x = m1.add_parameter("x", x_shape);
auto unitb = m1.add_instruction(
migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", {1, 2, 3, 3}}}), unit);
m1.add_instruction(migraphx::make_op("mul"), unitb, x);
}
run_pass(m1);
migraphx::module m2;
{
auto x = m2.add_parameter("x", x_shape);
m2.add_instruction(migraphx::make_op("neg"), x);
}
EXPECT(m1 == m2);
}
TEST_CASE(simplify_neg_unit_div_const)
{
migraphx::module m1;
{
auto x = m1.add_parameter("x", {migraphx::shape::int32_type, {1}});
auto unit = m1.add_literal(-1);
m1.add_instruction(migraphx::make_op("div"), x, unit);
}
run_pass(m1);
migraphx::module m2;
{
auto x = m2.add_parameter("x", {migraphx::shape::int32_type, {1}});
m2.add_instruction(migraphx::make_op("neg"), x);
}
EXPECT(m1 == m2);
}
TEST_CASE(simplify_neg_unit_div_const_vec)
{
migraphx::shape unit_shape{migraphx::shape::int32_type, {2}};
migraphx::shape x_shape{migraphx::shape::int32_type, {1, 2, 3, 3}};
migraphx::module m1;
{
auto unit = m1.add_literal({unit_shape, {-1, -1}});
auto x = m1.add_parameter("x", x_shape);
auto unitb = m1.add_instruction(
migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", {1, 2, 3, 3}}}), unit);
m1.add_instruction(migraphx::make_op("div"), x, unitb);
}
run_pass(m1);
migraphx::module m2;
{
auto x = m2.add_parameter("x", x_shape);
m2.add_instruction(migraphx::make_op("neg"), x);
}
EXPECT(m1 == m2);
}
TEST_CASE(simplify_sub_zero_const)
{
migraphx::module m1;
{
auto x = m1.add_parameter("x", {migraphx::shape::int32_type, {1}});
auto zero = m1.add_literal(0);
m1.add_instruction(migraphx::make_op("sub"), x, zero);
}
run_pass(m1);
migraphx::module m2;
{
auto x = m2.add_parameter("x", {migraphx::shape::int32_type, {1}});
m2.add_instruction(migraphx::make_op("identity"), x);
}
EXPECT(m1 == m2);
}
TEST_CASE(simplify_sub_zero_const_vec)
{
migraphx::shape zero_shape{migraphx::shape::int32_type, {2}};
migraphx::shape x_shape{migraphx::shape::int32_type, {1, 2, 3, 3}};
migraphx::module m1;
{
auto zero = m1.add_literal({zero_shape, {0, 0}});
auto x = m1.add_parameter("x", x_shape);
auto zerob = m1.add_instruction(
migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", {1, 2, 3, 3}}}), zero);
m1.add_instruction(migraphx::make_op("sub"), x, zerob);
}
run_pass(m1);
migraphx::module m2;
{
auto x = m2.add_parameter("x", x_shape);
m2.add_instruction(migraphx::make_op("identity"), x);
}
EXPECT(m1 == m2);
}
TEST_CASE(simplify_sub_neg_zero_const)
{
migraphx::module m1;
{
auto x = m1.add_parameter("x", {migraphx::shape::int32_type, {1}});
auto zero = m1.add_literal(0);
m1.add_instruction(migraphx::make_op("sub"), zero, x);
}
run_pass(m1);
migraphx::module m2;
{
auto x = m2.add_parameter("x", {migraphx::shape::int32_type, {1}});
m2.add_instruction(migraphx::make_op("neg"), x);
}
EXPECT(m1 == m2);
}
TEST_CASE(simplify_sub_neg_zero_const_vec)
{
migraphx::shape zero_shape{migraphx::shape::int32_type, {2}};
migraphx::shape x_shape{migraphx::shape::int32_type, {1, 2, 3, 3}};
migraphx::module m1;
{
auto zero = m1.add_literal({zero_shape, {0, 0}});
auto x = m1.add_parameter("x", x_shape);
auto zerob = m1.add_instruction(
migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", {1, 2, 3, 3}}}), zero);
m1.add_instruction(migraphx::make_op("sub"), zerob, x);
}
run_pass(m1);
migraphx::module m2;
{
auto x = m2.add_parameter("x", x_shape);
m2.add_instruction(migraphx::make_op("neg"), x);
}
EXPECT(m1 == m2);
}
TEST_CASE(simplify_sub_const)
{
migraphx::module m1;
......@@ -903,6 +1265,150 @@ TEST_CASE(simplify_sub_const)
EXPECT(m1 == m2);
}
TEST_CASE(simplify_zero_mult_const)
{
migraphx::module m1;
{
auto x = m1.add_parameter("x", {migraphx::shape::int32_type, {1}});
auto zero = m1.add_literal(0);
auto mul_ins = m1.add_instruction(migraphx::make_op("mul"), x, zero);
m1.add_return({mul_ins});
}
run_pass(m1);
migraphx::module m2;
{
m2.add_parameter("x", {migraphx::shape::int32_type, {1}});
auto zero = m2.add_literal(0);
m2.add_return({zero});
}
EXPECT(m1 == m2);
}
TEST_CASE(simplify_zero_mult_const2)
{
migraphx::module m1;
{
auto x = m1.add_parameter("x", {migraphx::shape::int32_type, {1}});
auto zero = m1.add_literal(0);
auto mul_ins = m1.add_instruction(migraphx::make_op("mul"), zero, x);
m1.add_return({mul_ins});
}
run_pass(m1);
migraphx::module m2;
{
m2.add_parameter("x", {migraphx::shape::int32_type, {1}});
auto zero = m2.add_literal(0);
m2.add_return({zero});
}
EXPECT(m1 == m2);
}
TEST_CASE(simplify_zero_mul_const_vec)
{
migraphx::shape zero_shape{migraphx::shape::int32_type, {2}};
migraphx::shape x_shape{migraphx::shape::int32_type, {1, 2, 3, 3}};
migraphx::module m1;
{
auto zero = m1.add_literal({zero_shape, {0, 0}});
auto x = m1.add_parameter("x", x_shape);
auto zerob = m1.add_instruction(
migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", {1, 2, 3, 3}}}), zero);
auto mul_ins = m1.add_instruction(migraphx::make_op("mul"), x, zerob);
m1.add_return({mul_ins});
}
run_pass(m1);
migraphx::module m2;
{
auto zero = m2.add_literal({zero_shape, {0, 0}});
m2.add_parameter("x", x_shape);
auto zerob = m2.add_instruction(
migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", {1, 2, 3, 3}}}), zero);
m2.add_return({zerob});
}
EXPECT(m1 == m2);
}
TEST_CASE(simplify_zero_mul_const_vec2)
{
migraphx::shape zero_shape{migraphx::shape::int32_type, {2}};
migraphx::shape x_shape{migraphx::shape::int32_type, {1, 2, 3, 3}};
migraphx::module m1;
{
auto zero = m1.add_literal({zero_shape, {0, 0}});
auto x = m1.add_parameter("x", x_shape);
auto zerob = m1.add_instruction(
migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", {1, 2, 3, 3}}}), zero);
auto mul_ins = m1.add_instruction(migraphx::make_op("mul"), zerob, x);
m1.add_return({mul_ins});
}
run_pass(m1);
migraphx::module m2;
{
auto zero = m2.add_literal({zero_shape, {0, 0}});
m2.add_parameter("x", x_shape);
auto zerob = m2.add_instruction(
migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", {1, 2, 3, 3}}}), zero);
m2.add_return({zerob});
}
EXPECT(m1 == m2);
}
TEST_CASE(simplify_zero_div_const)
{
migraphx::module m1;
{
auto zero = m1.add_literal(0);
auto x = m1.add_parameter("x", {migraphx::shape::int32_type, {1}});
auto div_ins = m1.add_instruction(migraphx::make_op("div"), zero, x);
m1.add_return({div_ins});
}
run_pass(m1);
migraphx::module m2;
{
auto zero = m2.add_literal(0);
m2.add_parameter("x", {migraphx::shape::int32_type, {1}});
m2.add_return({zero});
}
EXPECT(m1 == m2);
}
TEST_CASE(simplify_zero_div_const_vec)
{
migraphx::shape zero_shape{migraphx::shape::int32_type, {2}};
migraphx::shape x_shape{migraphx::shape::int32_type, {1, 2, 3, 3}};
migraphx::module m1;
{
auto x = m1.add_parameter("x", x_shape);
auto zero = m1.add_literal({zero_shape, {0, 0}});
auto zerob = m1.add_instruction(
migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", {1, 2, 3, 3}}}), zero);
auto div_ins = m1.add_instruction(migraphx::make_op("div"), zerob, x);
m1.add_return({div_ins});
}
run_pass(m1);
migraphx::module m2;
{
m2.add_parameter("x", x_shape);
auto zero = m2.add_literal({zero_shape, {0, 0}});
auto zerob = m2.add_instruction(
migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", {1, 2, 3, 3}}}), zero);
m2.add_return({zerob});
}
EXPECT(m1 == m2);
}
TEST_CASE(simplify_rsqrt)
{
migraphx::module m1;
......
......@@ -21,43 +21,53 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_RTGLIB_CONVERT_HPP
#define MIGRAPHX_GUARD_RTGLIB_CONVERT_HPP
#include <migraphx/argument.hpp>
#include <migraphx/reflect.hpp>
#include <migraphx/op/convert.hpp>
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/common.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct context;
/*
Checking for y == 0 ? eps : y
struct hip_convert
Adding this because HIP fmod sign changes when y = 0 resulting in nan and -nan not beign
consistent between ref and gpu implementations.
*/
migraphx::instruction_ref add_epsilon(migraphx::module& m, migraphx::instruction_ref y)
{
op::convert op;
auto zero = m.add_literal(0.0f);
auto eps = m.add_literal(1e-3f);
auto op_y = add_common_op(m, migraphx::make_op("equal"), {y, zero});
return add_common_op(m, migraphx::make_op("where"), {op_y, eps, y});
}
template <class Self, class F>
static auto reflect(Self& self, F f)
struct test_fmod : verify_program<test_fmod>
{
migraphx::program create_program() const
{
return migraphx::reflect(self.op, f);
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {64}};
auto x = mm->add_parameter("x", s);
auto y = mm->add_parameter("y", s);
auto op_where = add_epsilon(*mm, y);
mm->add_instruction(migraphx::make_op("fmod"), x, op_where);
return p;
}
};
std::string name() const { return "gpu::convert"; }
shape compute_shape(std::vector<shape> inputs) const;
argument compute(context& ctx, const shape&, const std::vector<argument>& args) const;
std::ptrdiff_t output_alias(const std::vector<shape>& shapes) const
struct test_mod : verify_program<test_mod>
{
migraphx::program create_program() const
{
return shapes.size() - 1;
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {64}};
auto x = mm->add_parameter("x", s);
auto y = mm->add_parameter("y", s);
auto op_where = add_epsilon(*mm, y);
mm->add_instruction(migraphx::make_op("mod"), x, op_where);
return p;
}
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
......@@ -21,23 +21,23 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_SOFTMAX_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_SOFTMAX_HPP
#include <migraphx/argument.hpp>
#include <migraphx/config.hpp>
#include <hip/hip_runtime_api.h>
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/common.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
void softmax(hipStream_t stream, const argument& result, const argument& arg, int64_t axis);
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
struct test_softmax_large3 : verify_program<test_softmax_large3>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {2, 4}});
auto large = mm->add_literal({migraphx::shape{migraphx::shape::float_type}, {100}});
auto add = migraphx::add_common_op(*mm, migraphx::make_op("mul"), {x, large});
mm->add_instruction(migraphx::make_op("softmax", {{"axis", -1}}), add);
return p;
}
};
......@@ -116,6 +116,9 @@ def main():
model = migraphx.parse_onnx(model_name, default_dim_value=batch)
if args.verbose:
print(model)
model.compile(migraphx.get_target('gpu'), offload_copy=False)
params = {}
......
......@@ -21,7 +21,10 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#####################################################################################
import string, sys, re, runpy
import string
import sys
import re
import runpy
from functools import wraps
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
......@@ -308,18 +311,39 @@ class Parameter:
return self.substitute('${type} ${name}', prefix=prefix)
def virtual_output_args(self, prefix: Optional[str] = None) -> List[str]:
return [
'&{prefix}{n}'.format(prefix=prefix or '', n=n)
for t, n in self.cparams
]
container_type = self.type.remove_generic().basic().str()
decl_list: List[str] = []
container = (container_type == "std::vector"
or container_type == "vector")
for t, n, in self.cparams:
if not decl_list and container:
decl_list.append('{prefix}{n}.data()'.format(prefix=prefix
or '',
n=n))
else:
decl_list.append('&{prefix}{n}'.format(prefix=prefix or '',
n=n))
return decl_list
def virtual_output_declarations(self,
prefix: Optional[str] = None) -> List[str]:
return [
'std::remove_pointer_t<{type}> {prefix}{n};'.format(
type=Type(t).str(), prefix=prefix or '', n=n)
for t, n in self.cparams
]
container_type = self.type.remove_generic().basic().str()
container = (container_type == "std::vector"
or container_type == "vector")
decl_list: List[str] = []
for t, n, in self.cparams:
if not decl_list and container:
inner_t = self.type.inner_type()
if inner_t:
decl_list.append(
'std::array<{inner_t}, 1024> {prefix}{n};'.format(
inner_t=inner_t.str(), prefix=prefix or '', n=n))
else:
decl_list.append(
'std::remove_pointer_t<{type}> {prefix}{n}'.format(
type=Type(t).str(), prefix=prefix or '', n=n))
decl_list[-1] += '=1024;' if container else ';'
return decl_list
def virtual_output(self, prefix: Optional[str] = None) -> str:
write = self.virtual_write
......@@ -694,9 +718,14 @@ def generate_cpp_header() -> str:
[c.generate() for c in cpp_classes])
def cwrap(name: str) -> Callable:
c_type_map: Dict[str, Type] = {}
def cwrap(name: str, c_type: Optional[str] = None) -> Callable:
def with_cwrap(f):
type_map[name] = f
if c_type:
c_type_map[name] = Type(c_type)
@wraps(f)
def decorated(*args, **kwargs):
......@@ -917,6 +946,9 @@ def vector_c_wrap(p: Parameter) -> None:
# Not a generic type
if not inner:
return
if inner.str() in c_type_map:
inner = c_type_map[inner.str()]
t = inner.add_pointer()
if p.type.is_reference():
if p.type.is_const():
......@@ -927,6 +959,12 @@ def vector_c_wrap(p: Parameter) -> None:
p.add_size_param()
p.bad_param('${name} == nullptr or ${size} == nullptr',
'Null pointer')
elif p.virtual:
p.add_param(t)
p.add_size_param()
p.bad_param('${name} == nullptr or ${size} == nullptr',
'Null pointer')
p.virtual_write = '{${name}.begin(), ${name}.begin()+${size}}; // cppcheck-suppress returnDanglingLifetime'
else:
p.add_param(t)
p.bad_param('${name} == nullptr', 'Null pointer')
......@@ -946,7 +984,7 @@ def vector_c_wrap(p: Parameter) -> None:
p.write = ['std::copy(${result}.begin(), ${result}.end(), ${name})']
@cwrap('std::string')
@cwrap('std::string', 'char*')
def string_c_wrap(p: Parameter) -> None:
t = Type('char*')
if p.returns:
......@@ -1061,9 +1099,9 @@ struct ${ctype} {
c_api_virtual_impl = Template('''
${return_type} ${name}(${params}) const
{
${output_decls}
if (${fname} == nullptr)
throw std::runtime_error("${name} function is missing.");
${output_decls}
std::array<char, 256> exception_msg;
exception_msg.front() = '\\0';
auto api_error_result = ${fname}(${args});
......
......@@ -21,6 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/execution_environment.hpp>
#include <migraphx/migraphx.h>
#include <migraphx/rank.hpp>
#include <migraphx/shape.hpp>
......@@ -166,6 +167,13 @@ void set_output_names(tf_options& options, std::vector<const char*> names)
options.output_node_names = std::vector<std::string>(names.begin(), names.end());
}
std::vector<argument>
run_async(program& p, const parameter_map& params, void* s, std::string_view name)
{
execution_environment exec_env{any_ptr(s, name), true};
return p.eval(params, exec_env);
}
template <class Value>
std::vector<const char*> get_names(const std::unordered_map<std::string, Value>& m)
{
......@@ -265,11 +273,18 @@ struct experimental_custom_op
template <class CustomOp>
struct custom_operation
{
template <class Self, class F>
static auto reflect(Self&, F)
{
return pack();
}
value attributes() const
{
return {{"custom_op", true}, {"target", op.runs_on_offload_target() ? "gpu" : "cpu"}};
}
CustomOp op;
std::string name() const { return op.xobject.name; }
......@@ -284,6 +299,23 @@ struct custom_operation
{
return op.compute(std::move(ctx), std::move(output_shape), std::move(inputs));
}
std::ptrdiff_t output_alias(std::vector<shape> inputs) const
{
auto alias_vec = op.output_alias(std::move(inputs));
// TODO: For now, only support one output alias
if(alias_vec.empty())
{
return -1;
}
if(alias_vec.size() > 1)
{
MIGRAPHX_THROW("Currently, CustomOps in MIGraphX only supports one output_alias");
}
return alias_vec.front();
}
bool runs_on_offload_target() const { return op.runs_on_offload_target(); }
};
template <class CustomOp>
......
......@@ -26,7 +26,6 @@
#include <stdlib.h>
#include <stdbool.h>
// Add new types here
// clang-format off
#define MIGRAPHX_SHAPE_VISIT_TYPES(m) \
......
......@@ -66,12 +66,21 @@ any_ptr get_queue_context(T&)
{
return {};
}
template <class T>
void wait_for_context(T&, any_ptr)
{
}
template <class T>
void finish_on_context(T&, any_ptr){}
<%
interface('context',
virtual('to_value', returns = 'value', const = True, default = 'to_value_context'),
virtual('from_value', v = 'const value&', default = 'from_value_context'),
virtual('get_queue', returns = 'any_ptr', default = 'get_queue_context'),
virtual('wait_for', queue = 'any_ptr', returns = 'void', default = 'wait_for_context'),
virtual('finish_on', queue = 'any_ptr', returns = 'void', default = 'finish_on_context'),
virtual('finish', returns = 'void', const = True)) %>
inline void migraphx_to_value(value& v, const context& ctx)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment