Commit 26d1a969 authored by Alan Turner's avatar Alan Turner
Browse files

Merge branch 'develop' of https://github.com/ROCmSoftwarePlatform/AMDMIGraphX into develop

parents f5ebc8f5 065d06af
......@@ -36,11 +36,12 @@ struct mlir_compiler : compiler<mlir_compiler>
operation compile_op(context&, const std::vector<shape>&, const value&) const { return {}; }
compiler_replace compile(context& ctx, instruction_ref ins, const operation&) const
compiler_replace
compile(const context& ctx, instruction_ref ins, const operation&, const value& solution) const
{
auto* smod = ins->module_inputs().front();
assert(smod->get_parameter_names().size() == ins->inputs().size() - 1);
return insert(compile_mlir(ctx, *smod, ins->inputs()));
return insert(compile_mlir(ctx, *smod, ins->inputs(), solution));
}
compiler_replace insert(code_object_op co) const
......@@ -50,6 +51,18 @@ struct mlir_compiler : compiler<mlir_compiler>
m.replace_instruction(ins, mlir);
}};
}
optional<tuning_config> get_tuning_config(const context& ctx,
instruction_ref ins,
const operation&,
bool exhaustive) const
{
if(not exhaustive)
return nullopt;
auto shapes = to_shapes(ins->inputs());
auto* smod = ins->module_inputs().front();
return get_tuning_config_mlir(ctx, *smod, shapes);
}
};
} // namespace gpu
......
......@@ -72,7 +72,7 @@ struct pointwise_compiler : compiler<pointwise_compiler>
hip_compile_options options;
options.inputs = inputs;
options.output = inputs.back();
options.virtual_inputs = reduce_dims(inputs);
options.virtual_inputs = reduce_dims(normalize_permutation(inputs));
options.params = "-Wno-float-equal";
auto axis = find_fast_axis(options.virtual_inputs);
auto vec = vectorize::elements(ctx, axis, options.virtual_inputs);
......
......@@ -84,7 +84,7 @@ static shape get_reduced_shape(const shape& s, const std::vector<T>& axes)
std::fill(lens.begin(), lens.end(), 1);
for(const auto& axis : axes)
lens[axis] = s.lens()[axis];
return shape{s.type(), lens};
return s.with_lens(lens);
}
template <class T>
......@@ -93,7 +93,7 @@ static shape get_output_shape(const shape& s, const std::vector<T>& axes)
auto lens = s.lens();
for(const auto& axis : axes)
lens[axis] = 1;
return shape{s.type(), lens};
return s.with_lens(lens);
}
template <class ReduceLens>
......@@ -228,7 +228,7 @@ struct fused_reduce_compiler : compiler<fused_reduce_compiler>
auto virtual_inputs = inputs;
virtual_inputs.push_back(get_reduced_shape(inputs.front(), axes));
virtual_inputs.push_back(get_output_shape(inputs.front(), axes));
virtual_inputs = reduce_dims(virtual_inputs);
virtual_inputs = reduce_dims(normalize_permutation(virtual_inputs));
auto reduce_output_shape = virtual_inputs.back();
virtual_inputs.pop_back();
auto reduction_shape = virtual_inputs.back();
......
......@@ -36,7 +36,10 @@
#include <mutex>
#if !defined(MLIR_MIGRAPHX_DIALECT_API_VERSION) || MLIR_MIGRAPHX_DIALECT_API_VERSION != 3
#warning "Incompatible version of rocMLIR library used, disabling"
// Only undefine when not using cppcheck
#ifndef CPPCHECK
#undef MIGRAPHX_MLIR
#endif
#else
#include <mlir-c/RegisterRocMLIR.h>
#endif
......@@ -50,8 +53,10 @@
#include <migraphx/ranges.hpp>
#include <migraphx/gpu/code_object_op.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/compile_gen.hpp>
#include <migraphx/gpu/device_name.hpp>
#include <migraphx/gpu/perfdb.hpp>
#include <migraphx/gpu/tuning_config.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/permutation.hpp>
#include <deque>
......@@ -134,6 +139,10 @@ using mlir_block = MIGRAPHX_MANAGE_MLIR_HANDLE(MlirBlock, mlirBlockD
using mlir_pass_manager = MIGRAPHX_MANAGE_MLIR_HANDLE(MlirPassManager, mlirPassManagerDestroy);
using mlir_tuning_table = MIGRAPHX_MANAGE_MLIR_HANDLE(MlirRockTuningTable,
mlirRockTuningTableDestroy);
using mlir_tuning_space = MIGRAPHX_MANAGE_MLIR_HANDLE(MlirRockTuningSpace,
mlirRockTuningSpaceDestroy);
using mlir_tuning_param = MIGRAPHX_MANAGE_MLIR_HANDLE(MlirRockTuningParam,
mlirRockTuningParamDestroy);
std::string_view to_string_view(MlirStringRef s) { return {s.data, s.length}; }
......@@ -167,12 +176,6 @@ std::string mlir_print(F f, T x)
return ss.str();
}
bool has_xdlops(const std::string& target_arch)
{
const auto device_name = trim(split_string(target_arch, ':').front());
return (starts_with(device_name, "gfx9") and device_name >= "gfx908");
}
struct mlir_program
{
mlir_program()
......@@ -507,7 +510,8 @@ struct mlir_program
ops.add_attributes({{"function_type", make_function_type(inputs, outputs)},
{"sym_name", sym_name},
{"kernel", std::string("mixr")},
{"arch", target_arch}});
{"arch", target_arch},
{"num_cu", num_cu}});
ops.add_region(std::move(region));
insert(body, std::move(ops));
......@@ -554,14 +558,7 @@ struct mlir_program
static std::string get_symbol_name(const module& m)
{
for(auto ins : iterator_for(m))
{
if(ins->name() == "convolution" or ins->name() == "dot")
{
return "mlir_" + ins->name();
}
}
return "main";
return "mlir_" + gen::generate_name_from_ops(m);
}
void parse(const module& m)
......@@ -597,9 +594,6 @@ struct mlir_program
{
pp =
problem_params{ins->get_operator(), to_shapes(ins->inputs()), ins->get_shape()};
// check if HW supports xdlops
if(has_xdlops(target_arch))
ops.add_attributes({{"xdlopsV2", true}});
}
std::vector<MlirValue> inputs;
......@@ -616,18 +610,30 @@ struct mlir_program
}
}
code_object_op compile() MIGRAPHX_TIDY_CONST
void run_high_level_pipeline() MIGRAPHX_TIDY_CONST
{
mlir_pass_manager pm_front{mlirPassManagerCreate(ctx.get())};
mlir_pass_manager pm_back{mlirPassManagerCreate(ctx.get())};
// 1st pipeline to call
mlirMIGraphXAddHighLevelPipeline(pm_front.get());
mlirPassManagerRunOnOp(pm_front.get(), mlirModuleGetOperation(mmodule.get()));
}
// 2nd pipeline to call
get_module_tuned();
void run_backend_pipeline() MIGRAPHX_TIDY_CONST
{
mlir_pass_manager pm_back{mlirPassManagerCreate(ctx.get())};
mlirMIGraphXAddBackendPipeline(pm_back.get(), target_arch.c_str());
mlirPassManagerRunOnOp(pm_back.get(), mlirModuleGetOperation(mmodule.get()));
}
code_object_op compile(const value& solution) MIGRAPHX_TIDY_CONST
{
// 1st pipeline to call
run_high_level_pipeline();
if(solution.is_null())
get_module_tuned();
else
set_tuning(solution);
// 2nd pipeline to call
run_backend_pipeline();
code_object_op op{};
op.symbol_name = sym_name;
......@@ -636,7 +642,12 @@ struct mlir_program
return op;
}
void find_target() { target_arch = get_device_name(); }
void set_gpu_properties(const context& migraphx_ctx)
{
auto& device = migraphx_ctx.get_current_device();
target_arch = device.get_device_name();
num_cu = device.get_cu_count();
}
std::pair<std::size_t, std::size_t> get_launch_params() const
{
......@@ -650,7 +661,7 @@ struct mlir_program
value::binary get_binary() const
{
int size = 0;
size_t size = 0;
mlirGetBinary(mmodule.get(), &size, nullptr);
value::binary result(size);
if(mlirGetBinary(mmodule.get(), &size, reinterpret_cast<char*>(result.data())))
......@@ -658,14 +669,52 @@ struct mlir_program
MIGRAPHX_THROW("Failed to compile mlir program");
}
void set_tuning(const value& v)
{
auto* str = v.if_string();
if(not str)
MIGRAPHX_THROW("mlir tuning solutions must be strings");
if(not mlirRockTuningSetFromStr(mmodule.get(), make_mlir_string_ref(*str)))
MIGRAPHX_THROW("Failed setting tuning key: " + *str);
}
tuning_config get_tuning_config() MIGRAPHX_TIDY_CONST
{
tuning_config tc;
run_high_level_pipeline();
mlir_tuning_space params{
mlirRockTuningSpaceCreate(mmodule.get(), RocmlirTuningParamSetKindFull)};
for(auto i : range(mlirRockTuningGetNumParams(params.get())))
{
mlir_tuning_param param{mlirRockTuningParamCreate()};
if(not mlirRockTuningParamGet(params.get(), i, param.get()))
MIGRAPHX_THROW("Incorrect mlir tuning parameter: " + std::to_string(i));
std::array<char, ROCMLIR_TUNING_KEY_BUFSZ> perf_key;
size_t perf_key_bytes =
mlirRockTuningParamToString(param.get(), perf_key.data(), perf_key.size());
if(perf_key_bytes > perf_key.size())
MIGRAPHX_THROW("Tuning perf key was " + std::to_string(perf_key_bytes) +
" bytes and thus too long");
tc.solutions.emplace_back(perf_key.begin(), perf_key.begin() + perf_key_bytes);
}
std::array<char, ROCMLIR_TUNING_KEY_BUFSZ> tuning_key;
size_t tuning_key_bytes =
mlirRockTuningGetKey(mmodule.get(), tuning_key.data(), tuning_key.size());
if(tuning_key_bytes > tuning_key.size())
MIGRAPHX_THROW("Tuning table key was " + std::to_string(tuning_key_bytes) +
" bytes and thus too long");
tc.problem = std::string(tuning_key.begin(), tuning_key.begin() + tuning_key_bytes);
return tc;
}
std::string get_tune_params(bool xdlops) const { return get_mlir_perf_for_conv(pp, xdlops); }
// This function appends to tuning cfg file that could be
// used with rocMLIR tuning scripts.
void dump_tuning_cfg(const char* prob_config) const
void dump_tuning_cfg(const std::string& prob_config) const
{
std::string tuning_cfg_path = string_value_of(MIGRAPHX_MLIR_TUNING_CFG{});
if(!tuning_cfg_path.empty())
if(not tuning_cfg_path.empty())
{
std::vector<std::string> tokens = split_string(prob_config, '\t');
std::string prob = tokens[1];
......@@ -682,51 +731,66 @@ struct mlir_program
}
}
static mlir_tuning_table create_tuning_table()
static std::pair<mlir_tuning_table, bool> load_tuning_table()
{
mlir_tuning_table tuning_table{mlirRockTuningTableCreate()};
bool found_table = false;
std::string tuning_db_path = string_value_of(MIGRAPHX_MLIR_TUNING_DB{});
if(!tuning_db_path.empty())
if(not tuning_db_path.empty())
{
std::ifstream tuning_db_tsv(tuning_db_path);
if(tuning_db_tsv)
{
found_table = true;
std::string line;
while(std::getline(tuning_db_tsv, line))
{
std::vector<std::string> tokens = split_string(line, '\t');
std::string arch = tokens[0];
std::string prob = tokens[1];
std::string perf = tokens[2];
std::string key = arch.append("\t").append(prob);
mlirRockTuningUpdateTable(tuning_table.get(), key.c_str(), perf.c_str(), 1.0);
std::string numCU = tokens[1];
std::string prob = tokens[2];
std::string perf = tokens[3];
std::string key = arch.append("\t").append(numCU).append("\t").append(prob);
mlirRockTuningUpdateTable(tuning_table.get(),
make_mlir_string_ref(key),
make_mlir_string_ref(perf),
1.0);
}
}
}
else
{
found_table = false;
std::cerr
<< "WARNING: MLIR tuning db not found. Please set MIGRAPHX_MLIR_TUNING_DB for "
"optimal performance."
<< std::endl;
}
return tuning_table;
return std::make_pair(std::move(tuning_table), found_table);
}
bool get_module_tuned() const
{
static mlir_tuning_table tuning_table = create_tuning_table();
// The tuning table as currently implemented is currently not
// thread safe. This will be fixed in the future. For now,
// stick a mutex around all tuning table interaction.
static std::mutex lock;
std::lock_guard<std::mutex> guard(lock);
if(!mlirRockTuningSetFromTable(tuning_table.get(), mmodule.get()))
static std::pair<mlir_tuning_table, bool> tuning_table = load_tuning_table();
if(not mlirRockTuningSetFromTable(tuning_table.first.get(), mmodule.get()))
{
const char* prob_config = mlirRockTuningGetKey(tuning_table.get(), mmodule.get());
std::stringstream key(prob_config);
std::cerr << "fails to set param on" << prob_config << std::endl;
dump_tuning_cfg(prob_config);
std::array<char, ROCMLIR_TUNING_KEY_BUFSZ> prob_config;
size_t prob_config_bytes =
mlirRockTuningGetKey(mmodule.get(), prob_config.data(), prob_config.size());
if(prob_config_bytes >= prob_config.size())
{
std::cerr << "MLIR tuning key overflowed buffer, needed " << prob_config_bytes
<< " bytes" << std::endl;
return false;
}
std::string prob_config_str(prob_config.begin(),
prob_config.begin() + prob_config_bytes);
if(tuning_table.second)
{
std::cerr << "NOTE: MLIR tuning table did not include a key for " << prob_config_str
<< std::endl;
}
dump_tuning_cfg(prob_config_str);
return false;
}
return true;
......@@ -737,7 +801,8 @@ struct mlir_program
mlir_module mmodule;
problem_params pp;
std::deque<std::string> strings{};
std::string target_arch;
std::string target_arch = "";
std::size_t num_cu = 0;
std::string sym_name;
};
......@@ -749,14 +814,14 @@ std::string dump_mlir(const module& m)
return mlir_print(&mlirOperationPrint, mod_op);
}
void adjust_param_shapes(module& m, const std::vector<instruction_ref>& inputs)
void adjust_param_shapes(module& m, const std::vector<shape>& inputs)
{
auto names = m.get_parameter_names();
std::sort(names.begin(), names.end());
for(auto i : range(names.size()))
{
const auto& name = names[i];
const auto& input = inputs[i]->get_shape();
const auto& input = inputs[i];
auto param = m.get_parameter(name);
if(input.standard())
continue;
......@@ -794,22 +859,26 @@ void adjust_param_shapes(module& m, const std::vector<instruction_ref>& inputs)
}
}
code_object_op compile_mlir(const context&, module m, const std::vector<instruction_ref>& inputs)
code_object_op compile_mlir(const context& migraphx_ctx,
module m,
const std::vector<instruction_ref>& inputs,
const value& solution)
{
adjust_param_shapes(m, inputs);
adjust_param_shapes(m, to_shapes(inputs));
const bool trace = enabled(MIGRAPHX_TRACE_MLIR{});
if(trace)
std::cout << m << std::endl;
mlir_program mp;
mp.find_target();
mp.set_gpu_properties(migraphx_ctx);
mp.parse(m);
auto mod_op = mlirModuleGetOperation(mp.mmodule.get());
if(trace)
std::cout << mlir_print(&mlirOperationPrint, mod_op) << std::endl;
auto co = mp.compile();
co.output = m.get_output_shapes().front();
auto co = mp.compile(solution);
co.expected_inputs = to_shapes(inputs);
co.output = m.get_output_shapes().front();
return co;
}
......@@ -829,6 +898,17 @@ instruction_ref insert_mlir(module& m,
return m.insert_instruction(ins, co, refs);
}
tuning_config
get_tuning_config_mlir(const context& migraphx_ctx, module m, const std::vector<shape>& inputs)
{
adjust_param_shapes(m, inputs);
mlir_program mp;
mp.set_gpu_properties(migraphx_ctx);
mp.parse(m);
return mp.get_tuning_config();
}
#else
std::string dump_mlir(const module&) { return {}; }
......@@ -840,20 +920,27 @@ void use(T&)
// Disabling clang-tidy warning on non-real useage.
// NOLINTBEGIN(performance-unnecessary-value-param)
code_object_op compile_mlir(const context&, module, const std::vector<instruction_ref>&)
code_object_op
compile_mlir(const context&, module, const std::vector<instruction_ref>&, const value&)
{
return {};
}
// NOLINTEND(performance-unnecessary-value-param)
instruction_ref
// cppcheck-suppress funcArgNamesDifferent
insert_mlir(module& m, instruction_ref, code_object_op co, const std::vector<instruction_ref>&)
{
use(co);
use(m);
return m.end();
}
tuning_config get_tuning_config_mlir(const context&, module, const std::vector<shape>&)
{
return {};
}
// NOLINTEND(performance-unnecessary-value-param)
#endif
} // namespace gpu
......
......@@ -34,7 +34,7 @@ namespace gpu {
std::vector<argument> generate_arguments(const std::vector<shape>& shapes, unsigned long seed = 0)
{
std::vector<argument> args;
std::transform(shapes.begin(), shapes.end(), std::back_inserter(args), [&](auto& s) {
std::transform(shapes.begin(), shapes.end(), std::back_inserter(args), [&](const auto& s) {
return to_gpu(generate_argument(s, seed++));
});
return args;
......
......@@ -338,7 +338,7 @@ void tf_parser::parse_node(const std::string& name)
std::string input_name = input;
// if input has trailing `:0` index then remove it
auto multi_out_idx = input.find(':');
if(multi_out_idx != std::string::npos && input.substr(multi_out_idx + 1) == "0")
if(multi_out_idx != std::string::npos and input.substr(multi_out_idx + 1) == "0")
{
input_name = input.substr(0, multi_out_idx);
}
......
......@@ -285,7 +285,7 @@ bool value::contains(const std::string& pkey) const
}
std::size_t value::size() const
{
auto* a = if_array_impl(x);
const auto* a = if_array_impl(x);
if(a == nullptr)
return 0;
return a->size();
......
......@@ -98,17 +98,11 @@ endfunction()
function(add_test_executable TEST_NAME)
add_executable(${TEST_NAME} EXCLUDE_FROM_ALL ${ARGN})
target_link_libraries(${TEST_NAME} ${CMAKE_THREAD_LIBS_INIT})
# Cmake does not add flags correctly for gcc
if(CMAKE_CXX_COMPILER_ID MATCHES "GNU")
set_target_properties(${TEST_NAME} PROPERTIES COMPILE_FLAGS -pthread LINK_FLAGS -pthread)
endif()
set(TEST_COMMAND ${TEST_NAME})
add_test_command(${TEST_NAME} ${TEST_COMMAND})
add_dependencies(tests ${TEST_NAME})
add_dependencies(check ${TEST_NAME})
target_link_libraries(${TEST_NAME} migraphx migraphx_onnx migraphx_ref)
target_link_libraries(${TEST_NAME} Threads::Threads migraphx migraphx_onnx migraphx_ref)
target_include_directories(${TEST_NAME} PUBLIC include)
endfunction(add_test_executable)
......
......@@ -99,7 +99,7 @@ TEST_CASE(run_sigmoid_custom_op)
EXPECT(bool{result == migraphx::argument(s, expected_result.data())});
}
extern "C" void migraphx_test_private_disable_exception_catch(bool b);
extern "C" MIGRAPHX_C_EXPORT void migraphx_test_private_disable_exception_catch(bool);
TEST_CASE(run_sigmoid_with_incorrect_shape)
{
......
......@@ -84,7 +84,7 @@ migraphx::program create_program_from_mlir(const migraphx::module& mmlir)
inputs.push_back(mm->add_parameter("output", mmlir.get_output_shapes().front()));
migraphx::gpu::context ctx;
migraphx::gpu::insert_mlir(*mm, mm->end(), compile_mlir(ctx, mmlir, inputs), inputs);
migraphx::gpu::insert_mlir(*mm, mm->end(), compile_mlir(ctx, mmlir, inputs, {}), inputs);
return p;
}
......@@ -140,7 +140,7 @@ TEST_CASE(conv)
{
const std::string mlir_output = R"__migraphx__(
module {
func.func @mlir_convolution(%arg0: tensor<2x8x3x3xf32>, %arg1: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {arch = "", kernel = "mixr"} {
func.func @mlir_convolution(%arg0: tensor<2x8x3x3xf32>, %arg1: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {arch = "", kernel = "mixr", num_cu = 0 : i64} {
%0 = migraphx.convolution(%arg1, %arg0) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
return %0 : tensor<1x2x2x2xf32>
}
......@@ -163,7 +163,7 @@ TEST_CASE(conv_add_relu)
{
const std::string mlir_output = R"__migraphx__(
module {
func.func @mlir_convolution(%arg0: tensor<1x2x2x2xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {arch = "", kernel = "mixr"} {
func.func @mlir_convolution_add_relu(%arg0: tensor<1x2x2x2xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {arch = "", kernel = "mixr", num_cu = 0 : i64} {
%0 = migraphx.convolution(%arg2, %arg1) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
%1 = migraphx.add(%0, %arg0) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
%2 = migraphx.relu(%1) : (tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
......@@ -191,7 +191,7 @@ TEST_CASE(quant_dot_add)
{
const std::string mlir_output = R"__migraphx__(
module {
func.func @main(%arg0: tensor<1x5x4xi8>, %arg1: tensor<1x4x3xi8>, %arg2: tensor<1x5x3xi32>) -> tensor<1x5x3xi32> attributes {arch = "", kernel = "mixr"} {
func.func @mlir_quant_dot_add(%arg0: tensor<1x5x4xi8>, %arg1: tensor<1x4x3xi8>, %arg2: tensor<1x5x3xi32>) -> tensor<1x5x3xi32> attributes {arch = "", kernel = "mixr", num_cu = 0 : i64} {
%0 = migraphx.quant_dot(%arg0, %arg1) : (tensor<1x5x4xi8>, tensor<1x4x3xi8>) -> tensor<1x5x3xi32>
%1 = migraphx.add(%0, %arg2) : (tensor<1x5x3xi32>, tensor<1x5x3xi32>) -> tensor<1x5x3xi32>
return %1 : tensor<1x5x3xi32>
......@@ -218,7 +218,7 @@ TEST_CASE(dot_add)
{
const std::string mlir_output = R"__migraphx__(
module {
func.func @mlir_dot(%arg0: tensor<1x5x4xf32>, %arg1: tensor<1x4x3xf32>, %arg2: tensor<1x5x3xf32>) -> tensor<1x5x3xf32> attributes {arch = "", kernel = "mixr"} {
func.func @mlir_dot_add(%arg0: tensor<1x5x4xf32>, %arg1: tensor<1x4x3xf32>, %arg2: tensor<1x5x3xf32>) -> tensor<1x5x3xf32> attributes {arch = "", kernel = "mixr", num_cu = 0 : i64} {
%0 = migraphx.dot(%arg0, %arg1) : (tensor<1x5x4xf32>, tensor<1x4x3xf32>) -> tensor<1x5x3xf32>
%1 = migraphx.add(%0, %arg2) : (tensor<1x5x3xf32>, tensor<1x5x3xf32>) -> tensor<1x5x3xf32>
return %1 : tensor<1x5x3xf32>
......@@ -244,7 +244,7 @@ TEST_CASE(conv_int8_dequantize_quantize)
{
const std::string mlir_output = R"__migraphx__(
module {
func.func @main(%arg0: tensor<2x8x3x3xi8>, %arg1: tensor<1x8x4x4xi8>, %arg2: tensor<1x2x2x2xf32>, %arg3: tensor<1x2x2x2xi32>) -> tensor<1x2x2x2xi32> attributes {arch = "", kernel = "mixr"} {
func.func @mlir_quant_convolution_dequantizelinear_quantizelinear(%arg0: tensor<2x8x3x3xi8>, %arg1: tensor<1x8x4x4xi8>, %arg2: tensor<1x2x2x2xf32>, %arg3: tensor<1x2x2x2xi32>) -> tensor<1x2x2x2xi32> attributes {arch = "", kernel = "mixr", num_cu = 0 : i64} {
%0 = migraphx.quant_convolution(%arg1, %arg0) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xi8>, tensor<2x8x3x3xi8>) -> tensor<1x2x2x2xi32>
%1 = migraphx.dequantizelinear(%0, %arg2, %arg3) : (tensor<1x2x2x2xi32>, tensor<1x2x2x2xf32>, tensor<1x2x2x2xi32>) -> tensor<1x2x2x2xf32>
%2 = migraphx.quantizelinear(%1, %arg2, %arg3) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>, tensor<1x2x2x2xi32>) -> tensor<1x2x2x2xi32>
......@@ -277,7 +277,7 @@ TEST_CASE(dot_convert)
{
const std::string mlir_output = R"__migraphx__(
module {
func.func @mlir_dot(%arg0: tensor<1x5x4xf32>, %arg1: tensor<1x4x3xf32>) -> tensor<1x5x3xf16> attributes {arch = "", kernel = "mixr"} {
func.func @mlir_dot_convert(%arg0: tensor<1x5x4xf32>, %arg1: tensor<1x4x3xf32>) -> tensor<1x5x3xf16> attributes {arch = "", kernel = "mixr", num_cu = 0 : i64} {
%0 = migraphx.dot(%arg0, %arg1) : (tensor<1x5x4xf32>, tensor<1x4x3xf32>) -> tensor<1x5x3xf32>
%1 = migraphx.convert(%0) {target_type = 1 : i64} : (tensor<1x5x3xf32>) -> tensor<1x5x3xf16>
return %1 : tensor<1x5x3xf16>
......@@ -303,7 +303,7 @@ TEST_CASE(dot_where)
{
const std::string mlir_output = R"__migraphx__(
module {
func.func @mlir_dot(%arg0: tensor<1x5x4xf32>, %arg1: tensor<1x4x3xf32>, %arg2: tensor<1x5x3xi8>, %arg3: tensor<1x5x3xf32>) -> tensor<1x5x3xf32> attributes {arch = "", kernel = "mixr"} {
func.func @mlir_dot_where(%arg0: tensor<1x5x4xf32>, %arg1: tensor<1x4x3xf32>, %arg2: tensor<1x5x3xi8>, %arg3: tensor<1x5x3xf32>) -> tensor<1x5x3xf32> attributes {arch = "", kernel = "mixr", num_cu = 0 : i64} {
%0 = migraphx.dot(%arg0, %arg1) : (tensor<1x5x4xf32>, tensor<1x4x3xf32>) -> tensor<1x5x3xf32>
%1 = migraphx.where(%arg2, %0, %arg3) : (tensor<1x5x3xi8>, tensor<1x5x3xf32>, tensor<1x5x3xf32>) -> tensor<1x5x3xf32>
return %1 : tensor<1x5x3xf32>
......
......@@ -89,17 +89,13 @@ bool is_overlap_load(migraphx::instruction_ref a, migraphx::instruction_ref b)
bool is_disjoint(const std::vector<migraphx::instruction_ref>& inss)
{
for(auto ins1 : inss)
{
for(auto ins2 : inss)
{
return std::none_of(inss.begin(), inss.end(), [&](auto ins1) {
return std::none_of(inss.begin(), inss.end(), [&](auto ins2) {
if(ins1 == ins2)
continue;
if(is_overlap_load(ins1, ins2))
return false;
}
}
return true;
return true;
return is_overlap_load(ins1, ins2);
});
});
}
TEST_CASE(test1)
......
......@@ -83,7 +83,7 @@ TEST_CASE(calc_implict_deps)
auto* else_mod = p.create_module("If_5_else");
auto l2 = else_mod->add_literal(migraphx::literal(ys, datay));
auto a2 = else_mod->add_instruction(migraphx::make_op("if"), {cond}, {then_mod1, else_mod1});
auto a3 = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), a2);
auto a3 = else_mod->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), a2);
else_mod->add_return({a3, l2});
auto ret = mm->add_instruction(migraphx::make_op("if"), {cond}, {then_mod, else_mod});
......@@ -95,6 +95,15 @@ TEST_CASE(calc_implict_deps)
EXPECT(migraphx::contains(implicit_deps.at(ret), x1));
EXPECT(migraphx::contains(implicit_deps.at(ret), x2));
EXPECT(migraphx::contains(implicit_deps.at(ret), y2));
EXPECT(migraphx::contains(implicit_deps.at(ret), lx));
EXPECT(migraphx::contains(implicit_deps.at(ret), ly));
// test for sorting
p.sort();
auto ret_inputs = ret->inputs();
ret_inputs.insert(ret_inputs.end(), implicit_deps.at(ret).begin(), implicit_deps.at(ret).end());
EXPECT(std::all_of(ret_inputs.begin(), ret_inputs.end(), [&](const auto i) {
return std::distance(mm->begin(), i) < std::distance(mm->begin(), ret);
}));
}
TEST_CASE(module_annotate)
......
d3295f4329d744fe1f8419e1220e123807282b99
e5bb7aba502f5a8783de945258d226c092c14386
......@@ -4712,14 +4712,16 @@ TEST_CASE(quantizelinear_test)
auto l1 = mm->add_parameter("1", {migraphx::shape::float_type, {1}});
auto l1_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {5}}}), l1);
auto div = mm->add_instruction(migraphx::make_op("div"), l0, l1_mbcast);
auto round = mm->add_instruction(migraphx::make_op("round"), div);
auto s = round->get_shape();
std::vector<int> min_data(s.elements(), 0);
std::vector<int> max_data(s.elements(), 255);
auto min_arg = mm->add_literal(s, min_data);
auto max_arg = mm->add_literal(s, max_data);
auto clip = mm->add_instruction(migraphx::make_op("clip"), round, min_arg, max_arg);
auto div = mm->add_instruction(migraphx::make_op("div"), l0, l1_mbcast);
auto round = mm->add_instruction(migraphx::make_op("round"), div);
auto s = round->get_shape();
auto min_arg = mm->add_literal(migraphx::literal{migraphx::shape{s.type()}, {0}});
auto max_arg = mm->add_literal(migraphx::literal{migraphx::shape{s.type()}, {255}});
auto min_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), min_arg);
auto max_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), max_arg);
auto clip = mm->add_instruction(migraphx::make_op("clip"), round, min_mbcast, max_mbcast);
mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::uint8_type)}}),
......@@ -4741,14 +4743,16 @@ TEST_CASE(quantizelinear_int32_test)
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::float_type)}}),
l0);
auto div = mm->add_instruction(migraphx::make_op("div"), l0, l1_mbcast);
auto round = mm->add_instruction(migraphx::make_op("round"), div);
auto s = round->get_shape();
std::vector<int> min_data(s.elements(), 0);
std::vector<int> max_data(s.elements(), 255);
auto min_arg = mm->add_literal(s, min_data);
auto max_arg = mm->add_literal(s, max_data);
auto clip = mm->add_instruction(migraphx::make_op("clip"), round, min_arg, max_arg);
auto div = mm->add_instruction(migraphx::make_op("div"), l0, l1_mbcast);
auto round = mm->add_instruction(migraphx::make_op("round"), div);
auto s = round->get_shape();
auto min_arg = mm->add_literal(migraphx::literal{migraphx::shape{s.type()}, {0}});
auto max_arg = mm->add_literal(migraphx::literal{migraphx::shape{s.type()}, {255}});
auto min_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), min_arg);
auto max_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), max_arg);
auto clip = mm->add_instruction(migraphx::make_op("clip"), round, min_mbcast, max_mbcast);
mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::uint8_type)}}),
......@@ -4775,13 +4779,15 @@ TEST_CASE(quantizelinear_zero_point_test)
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::float_type)}}),
l2_mbcast);
auto add = mm->add_instruction(migraphx::make_op("add"), round, l2_mbcast);
auto s = round->get_shape();
std::vector<int> min_data(s.elements(), -128);
std::vector<int> max_data(s.elements(), 127);
auto min_arg = mm->add_literal(s, min_data);
auto max_arg = mm->add_literal(s, max_data);
auto clip = mm->add_instruction(migraphx::make_op("clip"), add, min_arg, max_arg);
auto add = mm->add_instruction(migraphx::make_op("add"), round, l2_mbcast);
auto s = round->get_shape();
auto min_arg = mm->add_literal(migraphx::literal{migraphx::shape{s.type()}, {-128}});
auto max_arg = mm->add_literal(migraphx::literal{migraphx::shape{s.type()}, {127}});
auto min_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), min_arg);
auto max_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), max_arg);
auto clip = mm->add_instruction(migraphx::make_op("clip"), add, min_mbcast, max_mbcast);
mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::int8_type)}}),
......@@ -4812,13 +4818,15 @@ migraphx::program make_quantizelinear_axis_prog()
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::float_type)}}),
l2_bcast);
auto add = mm->add_instruction(migraphx::make_op("add"), round, l2_bcast);
auto s = round->get_shape();
std::vector<int> min_data(s.elements(), -128);
std::vector<int> max_data(s.elements(), 127);
auto min_arg = mm->add_literal(s, min_data);
auto max_arg = mm->add_literal(s, max_data);
auto clip = mm->add_instruction(migraphx::make_op("clip"), add, min_arg, max_arg);
auto add = mm->add_instruction(migraphx::make_op("add"), round, l2_bcast);
auto s = round->get_shape();
auto min_arg = mm->add_literal(migraphx::literal{migraphx::shape{s.type()}, {-128}});
auto max_arg = mm->add_literal(migraphx::literal{migraphx::shape{s.type()}, {127}});
auto min_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), min_arg);
auto max_mbcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s.lens()}}), max_arg);
auto clip = mm->add_instruction(migraphx::make_op("clip"), add, min_mbcast, max_mbcast);
mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::int8_type)}}),
......
......@@ -323,7 +323,7 @@ TEST_CASE(conv_dyn_batch)
TEST_CASE(conv_dyn_img)
{
migraphx::shape input_dyn_shape = {migraphx::shape::float_type,
{{1, 1}, {3, 3}, {5, 20}, {5, 20}}};
{{1, 1}, {3, 3}, {5, 20}, {5, 20}}};
migraphx::shape weights_shape = {migraphx::shape::float_type, {1, 3, 3, 3}};
migraphx::shape output_dyn_shape = {migraphx::shape::float_type,
{{1, 1}, {1, 1}, {3, 18}, {3, 18}}};
......@@ -376,7 +376,7 @@ TEST_CASE(conv_autopad_dyn_batch)
{
// auto_pad dynamic batch
migraphx::shape input_dyn_shape = {migraphx::shape::float_type,
{{1, 10}, {3, 3}, {5, 5}, {5, 5}}};
{{1, 10}, {3, 3}, {5, 5}, {5, 5}}};
migraphx::shape weights_shape = {migraphx::shape::float_type, {1, 3, 3, 3}};
migraphx::shape output_dyn_shape = {migraphx::shape::float_type,
{{1, 10}, {1, 1}, {5, 5}, {5, 5}}};
......@@ -393,7 +393,7 @@ TEST_CASE(conv_autopad_dyn_img)
{
// auto_pad dynamic img
migraphx::shape input_dyn_shape = {migraphx::shape::float_type,
{{1, 1}, {3, 3}, {5, 10}, {5, 10}}};
{{1, 1}, {3, 3}, {5, 10}, {5, 10}}};
migraphx::shape weights_shape = {migraphx::shape::float_type, {1, 3, 3, 3}};
migraphx::shape output_dyn_shape = {migraphx::shape::float_type,
{{1, 1}, {1, 1}, {5, 10}, {5, 10}}};
......@@ -2597,6 +2597,36 @@ TEST_CASE(reshape_non_fixed_not_matching_error)
throws_shape(migraphx::make_op("reshape", {{"dims", new_shape}}), input);
}
TEST_CASE(return_shape_tuple)
{
using migraphx::shape;
auto op = migraphx::make_op("@return");
shape s0{shape::bool_type, {1, 1}};
shape s1{shape::float_type, {2, 3}};
std::vector<shape> s{s0, s1};
auto s_out = op.compute_shape(s);
EXPECT(s_out.type() == shape::tuple_type);
EXPECT(s0 == s_out.sub_shapes()[0]);
EXPECT(s1 == s_out.sub_shapes()[1]);
}
TEST_CASE(return_shape_half)
{
using migraphx::shape;
auto op = migraphx::make_op("@return");
std::vector<shape> s{{shape::half_type}};
EXPECT(op.compute_shape(s) == shape{shape::half_type});
}
TEST_CASE(return_shape_empty)
{
using migraphx::shape;
auto op = migraphx::make_op("@return");
std::vector<shape> s;
EXPECT(op.compute_shape(s) == shape{});
}
TEST_CASE(rnn)
{
{
......
......@@ -27,7 +27,7 @@ include(PythonModules)
function(add_py_test NAME SCRIPT)
foreach(PYTHON_VERSION ${PYTHON_VERSIONS})
set (ENV_COMMAND ${CMAKE_COMMAND} -E env
"PYTHONPATH=$<TARGET_FILE_DIR:migraphx_py_${PYTHON_VERSION}>"
"PYTHONPATH=$<TARGET_FILE_DIR:migraphx_pybind_${PYTHON_VERSION}>"
"PYTHONMALLOC=debug"
"MALLOC_CHECK_=3"
)
......@@ -41,10 +41,8 @@ function(add_py_test NAME SCRIPT)
endforeach()
endfunction()
foreach(PYTHON_VERSION ${PYTHON_VERSIONS})
add_dependencies(tests migraphx_py_${PYTHON_VERSION})
add_dependencies(check migraphx_py_${PYTHON_VERSION})
endforeach()
add_dependencies(tests migraphx_py)
add_dependencies(check migraphx_py)
add_py_test(ref test_cpu.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(save_load test_save_load.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
......
......@@ -37,6 +37,17 @@
bool is_quantizelinear(migraphx::instruction& ins) { return ins.name() == "quantizelinear"; }
bool is_dequantizelinear(migraphx::instruction& ins) { return ins.name() == "dequantizelinear"; }
bool is_clip_scalar(migraphx::instruction& ins)
{
if(ins.name() == "clip")
{
assert(ins.inputs().size() > 1);
return (std::all_of(ins.inputs().begin() + 1, ins.inputs().end(), [](auto input) {
return input->get_shape().scalar();
}));
}
return false;
}
void run_pass(migraphx::module& m) { migraphx::run_passes(m, {migraphx::rewrite_quantization{}}); }
......@@ -70,6 +81,8 @@ TEST_CASE(quantizelinear)
EXPECT(eval(p1) == eval(p2));
EXPECT(any_of(*p1.get_main_module(), &is_quantizelinear));
EXPECT(none_of(*p2.get_main_module(), &is_quantizelinear));
// ensure clip literals created in quantized program are scalar
EXPECT(any_of(*p2.get_main_module(), &is_clip_scalar));
}
TEST_CASE(dequantizelinear)
......
......@@ -2189,16 +2189,16 @@ TEST_CASE(simplify_split_between_add)
EXPECT(m1.sort() == m2.sort());
}
TEST_CASE(simplify_dot_horiz)
void test_dot_horiz(migraphx::shape::type_t type, const std::string& dot_type)
{
auto s = migraphx::shape{migraphx::shape::int32_type, {3, 2, 2}};
auto s = migraphx::shape{type, {3, 2, 2}};
migraphx::module m1;
{
auto input = m1.add_parameter("input", s);
auto a = m1.add_literal(migraphx::generate_literal(s, 0));
auto b = m1.add_literal(migraphx::generate_literal(s, 1));
auto x = m1.add_instruction(migraphx::make_op("dot"), input, a);
auto y = m1.add_instruction(migraphx::make_op("dot"), input, b);
auto x = m1.add_instruction(migraphx::make_op(dot_type), input, a);
auto y = m1.add_instruction(migraphx::make_op(dot_type), input, b);
auto sum = m1.add_instruction(migraphx::make_op("add"), x, y);
m1.add_instruction(pass_op{}, sum);
}
......@@ -2210,7 +2210,7 @@ TEST_CASE(simplify_dot_horiz)
auto a = m2.add_literal(migraphx::generate_literal(s, 0));
auto b = m2.add_literal(migraphx::generate_literal(s, 1));
auto concat = m2.add_instruction(migraphx::make_op("concat", {{"axis", 2}}), a, b);
auto dot = m2.add_instruction(migraphx::make_op("dot"), input, concat);
auto dot = m2.add_instruction(migraphx::make_op(dot_type), input, concat);
auto x = m2.add_instruction(
migraphx::make_op("slice", {{"axes", {2}}, {"starts", {0}}, {"ends", {2}}}), dot);
auto y = m2.add_instruction(
......@@ -2221,6 +2221,10 @@ TEST_CASE(simplify_dot_horiz)
EXPECT(m1.sort() == m2.sort());
}
TEST_CASE(simplify_dot_horiz) { test_dot_horiz(migraphx::shape::int32_type, "dot"); }
TEST_CASE(simplify_quant_dot_horiz) { test_dot_horiz(migraphx::shape::int8_type, "quant_dot"); }
TEST_CASE(simplify_dot_horiz_same_constant)
{
auto s = migraphx::shape{migraphx::shape::int32_type, {3, 2, 2}};
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_add_nhwc : verify_program<test_add_nhwc>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
auto s = migraphx::shape::from_permutation(
migraphx::shape::float_type, {4, 3, 8, 8}, {0, 2, 3, 1});
auto x = mm->add_parameter("x", s);
auto y = mm->add_parameter("y", s);
auto add = mm->add_instruction(migraphx::make_op("add"), x, y);
mm->add_return({add});
return p;
}
};
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/instruction.hpp>
struct test_reduce_mean_nhwc : verify_program<test_reduce_mean_nhwc>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
auto s = migraphx::shape::from_permutation(
migraphx::shape::float_type, {4, 256, 2, 2}, {0, 2, 3, 1});
auto x = mm->add_parameter("x", s);
auto reduce = mm->add_instruction(migraphx::make_op("reduce_mean", {{"axes", {1}}}), x);
auto abs = mm->add_instruction(migraphx::make_op("abs"), reduce);
auto sqrt = mm->add_instruction(migraphx::make_op("sqrt"), abs);
mm->add_return({sqrt});
return p;
};
};
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment