Commit 00d5d880 authored by Khalique Ahmed's avatar Khalique Ahmed
Browse files

Merge branch 'develop' of https://github.com/ROCmSoftwarePlatform/AMDMIGraphX into mi100_opts

parents 00d90ca8 f60c3815
......@@ -57,20 +57,7 @@ struct parse_pooling : op_parser<parse_pooling>
calculate_padding(0, pads, input_dims[2], op.stride[0], 1, op.lengths[0]);
calculate_padding(1, pads, input_dims[3], op.stride[1], 1, op.lengths[1]);
if(pads[0] != pads[2] || pads[1] != pads[3])
{
std::vector<int64_t> padding = {0, 0, pads[0], pads[1], 0, 0, pads[2], pads[3]};
l0 = info.add_instruction(
migraphx::make_op(
"pad",
{{"pads", padding}, {"value", std::numeric_limits<float>::lowest()}}),
l0);
}
else
{
op.padding[0] = pads[0];
op.padding[1] = pads[1];
}
op.padding = std::vector<size_t>(pads.begin(), pads.end());
}
}
return info.add_instruction(op, l0);
......
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/instruction.hpp>
#include <basic_ops.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/ranges.hpp>
......@@ -113,15 +114,16 @@ TEST_CASE(depth_test)
TEST_CASE(undefined_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto one = mm->add_literal(1);
auto two = mm->add_literal(2);
auto undef = mm->add_instruction(migraphx::make_op("undefined"));
auto* mm = p.get_main_module();
auto one = mm->add_literal(1);
auto two = mm->add_literal(2);
mm->add_instruction(migraphx::make_op("undefined"));
mm->add_instruction(sum_op{}, one, two);
auto count = std::distance(mm->begin(), mm->end());
run_pass(p);
EXPECT(std::distance(mm->begin(), mm->end()) == count - 1);
EXPECT(not mm->has_instruction(undef));
EXPECT(
std::none_of(mm->begin(), mm->end(), [](auto&& ins) { return ins.name() == "undefined"; }));
auto result = p.eval({}).back();
EXPECT(result == migraphx::literal{3});
EXPECT(result != migraphx::literal{4});
......
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/normalize_ops.hpp>
#include <migraphx/eliminate_pad.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/instruction.hpp>
......@@ -10,7 +11,9 @@
void run_pass(migraphx::module& m)
{
migraphx::run_passes(m, {migraphx::eliminate_pad{}, migraphx::dead_code_elimination{}});
migraphx::run_passes(
m,
{migraphx::normalize_ops{}, migraphx::eliminate_pad{}, migraphx::dead_code_elimination{}});
}
migraphx::instruction_ref
......@@ -66,15 +69,15 @@ TEST_CASE(rewrite_pad)
auto om1 = l1->get_operator().to_value();
auto om2 = l2->get_operator().to_value();
EXPECT(op0["padding"].to_vector<std::size_t>() == std::vector<std::size_t>{1, 1});
EXPECT(om1["padding"].to_vector<std::size_t>() == std::vector<std::size_t>{1, 1});
EXPECT(om2["padding"].to_vector<std::size_t>() == std::vector<std::size_t>{1, 1});
EXPECT(op0["padding"].to_vector<std::size_t>() == std::vector<std::size_t>{1, 1, 1, 1});
EXPECT(om1["padding"].to_vector<std::size_t>() == std::vector<std::size_t>{1, 1, 1, 1});
EXPECT(om2["padding"].to_vector<std::size_t>() == std::vector<std::size_t>{1, 1, 1, 1});
EXPECT(std::none_of(
m.begin(), m.end(), [](const migraphx::instruction& ins) { return ins.name() == "pad"; }));
}
TEST_CASE(rewrite_pad_im2col_asymetric)
TEST_CASE(rewrite_pad_im2col_asymmetric)
{
migraphx::module m;
......@@ -95,10 +98,10 @@ TEST_CASE(rewrite_pad_im2col_asymetric)
EXPECT(l0->get_shape() == s0);
auto op0 = l0->get_operator().to_value();
EXPECT(op0["padding"].to_vector<std::size_t>() == std::vector<std::size_t>{0, 0});
EXPECT(op0["padding"].to_vector<std::size_t>() == std::vector<std::size_t>{0, 0, 2, 2});
run_pass(m);
EXPECT(std::any_of(
EXPECT(std::none_of(
m.begin(), m.end(), [](const migraphx::instruction& ins) { return ins.name() == "pad"; }));
}
......
#include "migraphx/instruction_ref.hpp"
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/gpu/target.hpp>
#include <migraphx/adjust_allocation.hpp>
#include <migraphx/gpu/pack_int8_args.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/auto_contiguous.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/eliminate_contiguous.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/make_op.hpp>
#include <test.hpp>
void run_passes(migraphx::module& m)
{
auto ctx = migraphx::gpu::context{};
migraphx::run_passes(m,
{migraphx::auto_contiguous{},
migraphx::gpu::lowering{&ctx, false},
migraphx::dead_code_elimination{},
migraphx::gpu::pack_int8_args{},
migraphx::dead_code_elimination{}});
}
bool get_int8_x4_format()
{
bool int8_x4_format = true;
#if ROCBLAS_VERSION_MAJOR >= 2 && ROCBLAS_VERSION_MINOR >= 38
auto ctx = migraphx::gpu::context{};
rocblas_gemm_flags flag;
rocblas_query_int8_layout_flag(ctx.get_stream().get_rocblas(), &flag);
int8_x4_format = (flag == rocblas_gemm_flags_pack_int8x4);
#endif
return int8_x4_format;
}
TEST_CASE(quant_dot)
{
auto create_module = [] {
migraphx::module m("test");
migraphx::shape m1_shape{migraphx::shape::int8_type, {5, 8}};
migraphx::shape m2_shape{migraphx::shape::int8_type, {8, 7}};
migraphx::shape m3_shape{migraphx::shape::int32_type, {5, 7}};
auto l1 = m.add_parameter("a", m1_shape);
auto l2 = m.add_parameter("b", m2_shape);
auto l3 = m.add_parameter("c", m3_shape);
auto r = m.add_instruction(migraphx::make_op("quant_dot"), l1, l2, l3);
m.add_return({r});
return m;
};
auto create_optimized_int8_x4 = [](bool int8_x4) {
migraphx::module m("test");
migraphx::shape m1_shape{migraphx::shape::int8_type, {5, 8}};
migraphx::shape m2_shape{migraphx::shape::int8_type, {8, 7}};
migraphx::shape m3_shape{migraphx::shape::int32_type, {5, 7}};
auto l1 = m.add_parameter("a", m1_shape);
auto l2 = m.add_parameter("b", m2_shape);
auto l3 = m.add_parameter("c", m3_shape);
auto output = m.add_parameter("test:#output_0", m3_shape);
auto cout = m.add_instruction(migraphx::make_op("hip::copy"), l3, output);
auto packa = l2;
if(int8_x4)
{
auto alloc = m.add_instruction(
migraphx::make_op("hip::allocate", {{"shape", migraphx::to_value(m2_shape)}}));
packa = m.add_instruction(migraphx::make_op("gpu::int8_gemm_pack_a"), l2, alloc);
}
auto gemm = m.add_instruction(
migraphx::make_op("gpu::quant_gemm",
{{"alpha", 1}, {"beta", 1}, {"int8_x4_format", int8_x4}}),
l1,
packa,
cout,
cout);
m.add_return({gemm});
return m;
};
auto m1 = create_module();
run_passes(m1);
bool flag = get_int8_x4_format();
auto m2 = create_optimized_int8_x4(flag);
EXPECT(m1 == m2);
}
TEST_CASE(quant_dot_trans)
{
auto create_module = [] {
migraphx::module m("test");
migraphx::shape s1{migraphx::shape::int8_type, {3, 2, 8, 5}};
migraphx::shape s2{migraphx::shape::int8_type, {3, 2, 7, 8}};
auto l1 = m.add_parameter("a", s1);
auto tl1 = m.add_instruction(migraphx::make_op("transpose", {{"dims", {0, 1, 3, 2}}}), l1);
auto l2 = m.add_parameter("b", s2);
auto tl2 = m.add_instruction(migraphx::make_op("transpose", {{"dims", {0, 1, 3, 2}}}), l2);
auto r = m.add_instruction(
migraphx::make_op("quant_dot", {{"alpha", 3}, {"beta", 2}}), tl1, tl2);
m.add_return({r});
return m;
};
auto create_optimized_int8_x4 = [](bool int8_x4) {
migraphx::module m("test");
migraphx::shape s1{migraphx::shape::int8_type, {3, 2, 8, 5}};
migraphx::shape s2{migraphx::shape::int8_type, {3, 2, 7, 8}};
migraphx::shape s3{migraphx::shape::int32_type, {3, 2, 5, 7}};
auto l1 = m.add_parameter("a", s1);
auto l2 = m.add_parameter("b", s2);
auto output = m.add_parameter("test:#output_0", s3);
auto tl1 = m.add_instruction(migraphx::make_op("transpose", {{"dims", {0, 1, 3, 2}}}), l1);
migraphx::shape ts1{migraphx::shape::int8_type, {3, 2, 5, 8}};
auto alloca = m.add_instruction(
migraphx::make_op("hip::allocate", {{"shape", migraphx::to_value(ts1)}}));
auto conta = m.add_instruction(migraphx::make_op("gpu::contiguous"), tl1, alloca);
auto tl2 = m.add_instruction(migraphx::make_op("transpose", {{"dims", {0, 1, 3, 2}}}), l2);
migraphx::shape ts2{migraphx::shape::int8_type, {3, 2, 8, 7}};
auto allocb = m.add_instruction(
migraphx::make_op("hip::allocate", {{"shape", migraphx::to_value(ts2)}}));
auto contb = m.add_instruction(migraphx::make_op("gpu::contiguous"), tl2, allocb);
auto packb = contb;
if(int8_x4)
{
auto allocpb = m.add_instruction(
migraphx::make_op("hip::allocate", {{"shape", migraphx::to_value(ts2)}}));
packb = m.add_instruction(migraphx::make_op("gpu::int8_gemm_pack_a"), contb, allocpb);
}
auto gemm = m.add_instruction(
migraphx::make_op("gpu::quant_gemm",
{{"alpha", 3}, {"beta", 0}, {"int8_x4_format", int8_x4}}),
conta,
packb,
output);
m.add_return({gemm});
return m;
};
auto m1 = create_module();
bool flag = get_int8_x4_format();
auto m2 = create_optimized_int8_x4(flag);
run_passes(m1);
EXPECT(m1 == m2);
}
TEST_CASE(quant_dot_pad)
{
auto create_module = [] {
migraphx::module m("test");
migraphx::shape s1{migraphx::shape::int8_type, {5, 6}};
migraphx::shape s2{migraphx::shape::int8_type, {6, 7}};
migraphx::shape s3{migraphx::shape::int32_type, {5, 7}};
auto l1 = m.add_parameter("a", s1);
auto l2 = m.add_parameter("b", s2);
auto l3 = m.add_parameter("c", s3);
auto r = m.add_instruction(migraphx::make_op("quant_dot"), l1, l2, l3);
m.add_return({r});
return m;
};
auto create_optimized_int8_x4 = [](bool int8_x4) {
migraphx::module m("test");
migraphx::shape s1{migraphx::shape::int8_type, {5, 6}};
migraphx::shape ps1{migraphx::shape::int8_type, {5, 8}};
migraphx::shape s2{migraphx::shape::int8_type, {6, 7}};
migraphx::shape ps2{migraphx::shape::int8_type, {8, 7}};
migraphx::shape s3{migraphx::shape::int32_type, {5, 7}};
auto l1 = m.add_parameter("a", s1);
auto l2 = m.add_parameter("b", s2);
auto l3 = m.add_parameter("c", s3);
auto output = m.add_parameter("test:#output_0", s3);
auto pl1 = l1;
auto packa = l2;
migraphx::instruction_ref pl2{};
if(int8_x4)
{
auto po1 = m.insert_instruction(
l1, migraphx::make_op("hip::allocate", {{"shape", migraphx::to_value(ps1)}}));
pl1 = m.add_instruction(
migraphx::make_op("gpu::pad", {{"mode", 0}, {"pads", {0, 2, 0, 0}}, {"value", 0}}),
l1,
po1);
auto po2 = m.insert_instruction(
l2, migraphx::make_op("hip::allocate", {{"shape", migraphx::to_value(ps2)}}));
pl2 = m.insert_instruction(
std::next(l2),
migraphx::make_op("gpu::pad", {{"mode", 0}, {"pads", {2, 0, 0, 0}}, {"value", 0}}),
l2,
po2);
}
auto cout = m.add_instruction(migraphx::make_op("hip::copy"), l3, output);
if(int8_x4)
{
auto alloc = m.add_instruction(
migraphx::make_op("hip::allocate", {{"shape", migraphx::to_value(ps2)}}));
packa = m.add_instruction(migraphx::make_op("gpu::int8_gemm_pack_a"), pl2, alloc);
}
auto gemm = m.add_instruction(
migraphx::make_op("gpu::quant_gemm",
{{"alpha", 1}, {"beta", 1}, {"int8_x4_format", int8_x4}}),
pl1,
packa,
cout,
cout);
m.add_return({gemm});
return m;
};
auto m1 = create_module();
bool flag = get_int8_x4_format();
auto m2 = create_optimized_int8_x4(flag);
run_passes(m1);
EXPECT(m1 == m2);
}
TEST_CASE(quant_dot_trans_pad)
{
auto create_module = [] {
migraphx::module m("test");
migraphx::shape s1{migraphx::shape::int8_type, {3, 2, 9, 5}};
migraphx::shape s2{migraphx::shape::int8_type, {3, 2, 7, 9}};
auto l1 = m.add_parameter("a", s1);
auto tl1 = m.add_instruction(migraphx::make_op("transpose", {{"dims", {0, 1, 3, 2}}}), l1);
auto l2 = m.add_parameter("b", s2);
auto tl2 = m.add_instruction(migraphx::make_op("transpose", {{"dims", {0, 1, 3, 2}}}), l2);
auto r = m.add_instruction(
migraphx::make_op("quant_dot", {{"alpha", 3}, {"beta", 2}}), tl1, tl2);
m.add_return({r});
return m;
};
auto create_optimized_int8_x4 = [](bool int8_x4) {
migraphx::module m("test");
migraphx::shape s1{migraphx::shape::int8_type, {3, 2, 9, 5}};
migraphx::shape ps1{migraphx::shape::int8_type, {3, 2, 5, 12}};
migraphx::shape s2{migraphx::shape::int8_type, {3, 2, 7, 9}};
migraphx::shape ps2{migraphx::shape::int8_type, {3, 2, 12, 7}};
migraphx::shape s3{migraphx::shape::int32_type, {3, 2, 5, 7}};
auto l1 = m.add_parameter("a", s1);
auto l2 = m.add_parameter("b", s2);
auto output = m.add_parameter("test:#output_0", s3);
auto tl1 = m.add_instruction(migraphx::make_op("transpose", {{"dims", {0, 1, 3, 2}}}), l1);
migraphx::shape ts1{migraphx::shape::int8_type, {3, 2, 5, 9}};
auto ta = m.add_instruction(
migraphx::make_op("hip::allocate", {{"shape", migraphx::to_value(ts1)}}));
migraphx::instruction_ref pta{};
if(int8_x4)
{
pta = m.add_instruction(
migraphx::make_op("hip::allocate", {{"shape", migraphx::to_value(ps1)}}));
}
auto conta = m.add_instruction(migraphx::make_op("gpu::contiguous"), tl1, ta);
auto pa = conta;
if(int8_x4)
{
pa = m.add_instruction(
migraphx::make_op("gpu::pad", {{"mode", 0}, {"pads", {0, 0, 0, 3, 0, 0, 0, 0}}}),
conta,
pta);
}
auto tl2 = m.add_instruction(migraphx::make_op("transpose", {{"dims", {0, 1, 3, 2}}}), l2);
migraphx::shape ts2{migraphx::shape::int8_type, {3, 2, 9, 7}};
auto tb = m.add_instruction(
migraphx::make_op("hip::allocate", {{"shape", migraphx::to_value(ts2)}}));
migraphx::instruction_ref ptb{};
if(int8_x4)
{
ptb = m.add_instruction(
migraphx::make_op("hip::allocate", {{"shape", migraphx::to_value(ps2)}}));
}
auto contb = m.add_instruction(migraphx::make_op("gpu::contiguous"), tl2, tb);
auto packb = contb;
if(int8_x4)
{
auto pb = m.add_instruction(
migraphx::make_op("gpu::pad", {{"mode", 0}, {"pads", {0, 0, 3, 0, 0, 0, 0, 0}}}),
contb,
ptb);
auto allocpb = m.add_instruction(
migraphx::make_op("hip::allocate", {{"shape", migraphx::to_value(ps2)}}));
packb = m.add_instruction(migraphx::make_op("gpu::int8_gemm_pack_a"), pb, allocpb);
}
auto gemm = m.add_instruction(
migraphx::make_op("gpu::quant_gemm",
{{"alpha", 3}, {"beta", 0}, {"int8_x4_format", int8_x4}}),
pa,
packb,
output);
m.add_return({gemm});
return m;
};
auto m1 = create_module();
bool flag = get_int8_x4_format();
auto m2 = create_optimized_int8_x4(flag);
run_passes(m1);
EXPECT(m1 == m2);
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -63,9 +63,9 @@ TEST_CASE(int8_quantization)
auto create_program = [] {
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape sa{migraphx::shape::float_type, {2, 16}};
migraphx::shape sa{migraphx::shape::float_type, {5, 16}};
migraphx::shape sb{migraphx::shape::float_type, {16, 8}};
migraphx::shape sc{migraphx::shape::float_type, {2, 8}};
migraphx::shape sc{migraphx::shape::float_type, {5, 8}};
auto pa = mm->add_parameter("a", sa);
auto pb = mm->add_parameter("b", sb);
auto pc = mm->add_parameter("c", sc);
......@@ -77,9 +77,9 @@ TEST_CASE(int8_quantization)
{
auto p = create_program();
migraphx::parameter_map m;
migraphx::shape sa{migraphx::shape::float_type, {2, 16}};
migraphx::shape sa{migraphx::shape::float_type, {5, 16}};
migraphx::shape sb{migraphx::shape::float_type, {16, 8}};
migraphx::shape sc{migraphx::shape::float_type, {2, 8}};
migraphx::shape sc{migraphx::shape::float_type, {5, 8}};
m["a"] = migraphx::generate_argument(sa);
m["b"] = migraphx::generate_argument(sb);
m["c"] = migraphx::generate_argument(sc);
......
......@@ -9,6 +9,10 @@
#include <unordered_map>
#include <vector>
#ifdef __linux__
#include <unistd.h>
#endif
#ifndef MIGRAPHX_GUARD_TEST_TEST_HPP
#define MIGRAPHX_GUARD_TEST_TEST_HPP
......@@ -264,6 +268,32 @@ struct capture
}
};
enum class color
{
reset = 0,
bold = 1,
underlined = 4,
fg_red = 31,
fg_green = 32,
fg_yellow = 33,
fg_blue = 34,
fg_default = 39,
bg_red = 41,
bg_green = 42,
bg_yellow = 43,
bg_blue = 44,
bg_default = 49
};
inline std::ostream& operator<<(std::ostream& os, const color& c)
{
#ifndef _WIN32
static const bool use_color = isatty(STDOUT_FILENO) != 0;
if(use_color)
return os << "\033[" << static_cast<std::size_t>(c) << "m";
#endif
return os;
}
template <class T, class F>
void failed(T x, const char* msg, const char* func, const char* file, int line, F f)
{
......@@ -271,7 +301,7 @@ void failed(T x, const char* msg, const char* func, const char* file, int line,
{
std::cout << func << std::endl;
std::cout << file << ":" << line << ":" << std::endl;
std::cout << " FAILED: " << msg << " "
std::cout << color::bold << color::fg_red << " FAILED: " << color::reset << msg << " "
<< "[ " << x << " ]" << std::endl;
f();
}
......@@ -315,7 +345,7 @@ auto near(T px, U py, double ptol = 1e-6f)
using string_map = std::unordered_map<std::string, std::vector<std::string>>;
template <class Keyword>
string_map parse(std::vector<std::string> as, Keyword keyword)
string_map generic_parse(std::vector<std::string> as, Keyword keyword)
{
string_map result;
......@@ -331,19 +361,22 @@ string_map parse(std::vector<std::string> as, Keyword keyword)
{
flag = f.front();
result[flag]; // Ensure the flag exists
flag = f.back();
}
}
return result;
}
using test_case = std::function<void()>;
inline auto& get_test_cases()
{
// NOLINTNEXTLINE
static std::vector<std::pair<std::string, std::function<void()>>> cases;
static std::vector<std::pair<std::string, test_case>> cases;
return cases;
}
inline void add_test_case(std::string name, std::function<void()> f)
inline void add_test_case(std::string name, test_case f)
{
get_test_cases().emplace_back(std::move(name), std::move(f));
}
......@@ -357,37 +390,243 @@ struct auto_register_test_case
}
};
inline void run_test_case(const std::string& name, const std::function<void()>& f)
struct failure_error
{
std::cout << "[ RUN ] " << name << std::endl;
f();
std::cout << "[ COMPLETE ] " << name << std::endl;
}
};
inline void run(int argc, const char* argv[])
[[noreturn]] inline void fail() { throw failure_error{}; }
struct driver
{
std::vector<std::string> as(argv + 1, argv + argc);
driver()
{
add_flag({"--help", "-h"}, "Show help");
add_flag({"--list", "-l"}, "List all test cases");
add_flag({"--continue", "-c"}, "Continue after failure");
add_flag({"--quiet", "-q"}, "Don't print out extra output");
}
struct argument
{
std::vector<std::string> flags = {};
std::string help = "";
int nargs = 1;
};
auto args = parse(as, [](auto &&) -> std::vector<std::string> { return {}; });
auto cases = args[""];
if(cases.empty())
void add_arg(const std::vector<std::string>& flags, const std::string& help = "")
{
for(auto&& tc : get_test_cases())
run_test_case(tc.first, tc.second);
arguments.push_back(argument{flags, help, 1});
}
else
void add_flag(const std::vector<std::string>& flags, const std::string& help = "")
{
std::unordered_map<std::string, std::function<void()>> m(get_test_cases().begin(),
get_test_cases().end());
for(auto&& name : cases)
arguments.push_back(argument{flags, help, 0});
}
void show_help(const std::string& exe) const
{
std::cout << std::endl;
std::cout << color::fg_yellow << "USAGE:" << color::reset << std::endl;
std::cout << " ";
std::cout << exe << " <test-case>... <options>" << std::endl;
std::cout << std::endl;
std::cout << color::fg_yellow << "ARGS:" << color::reset << std::endl;
std::cout << " ";
std::cout << color::fg_green << "<test-case>..." << color::reset;
std::cout << std::endl;
std::cout << " "
<< "Test case name to run" << std::endl;
std::cout << std::endl;
std::cout << color::fg_yellow << "OPTIONS:" << color::reset << std::endl;
for(auto&& arg : arguments)
{
std::string prefix = " ";
std::cout << color::fg_green;
for(const std::string& a : arg.flags)
{
std::cout << prefix;
std::cout << a;
prefix = ", ";
}
std::cout << color::reset << std::endl;
std::cout << " " << arg.help << std::endl;
}
}
std::ostream& out() const
{
struct null_buffer : std::streambuf
{
virtual int overflow(int c) override { return c; }
};
static null_buffer buffer;
static std::ostream null_stream(&buffer);
if(quiet)
return null_stream;
return std::cout;
}
string_map parse(int argc, const char* argv[]) const
{
std::vector<std::string> args(argv + 1, argv + argc);
string_map keys;
for(auto&& arg : arguments)
{
auto f = m.find(name);
if(f == m.end())
std::cout << "[ ERROR ] Test case '" << name << "' not found." << std::endl;
for(auto&& flag : arg.flags)
{
keys[flag] = {arg.flags.front()};
if(arg.nargs == 0)
keys[flag].push_back("");
}
}
auto result = generic_parse(args, [&](auto&& s) -> std::vector<std::string> {
if(keys.count(s) > 0)
return keys[s];
else
run_test_case(name, f->second);
return {};
});
result["__exe__"].push_back(argv[0]);
return result;
}
static std::string create_command(const string_map& args)
{
std::stringstream ss;
ss << args.at("__exe__").front();
if(args.count("") > 0)
{
for(auto&& arg : args.at(""))
ss << " \"" << arg << "\"";
}
for(auto&& p : args)
{
if(p.first == "__exe__")
continue;
if(p.first.empty())
continue;
ss << " " << p.first;
for(auto&& arg : p.second)
ss << " \"" << arg << "\"";
}
return ss.str();
}
static std::string fork(const std::string& name, string_map args)
{
std::string msg;
args[""] = {name};
args.erase("--continue");
args["--quiet"];
auto cmd = create_command(args);
auto r = std::system(cmd.c_str()); // NOLINT
if(r != 0)
msg = "Exited with " + std::to_string(r);
return msg;
}
void run_test_case(const std::string& name, const test_case& f, const string_map& args)
{
ran++;
out() << color::fg_green << "[ RUN ] " << color::reset << color::bold << name
<< color::reset << std::endl;
std::string msg;
if(args.count("--continue") > 0)
{
msg = fork(name, args);
}
else
{
try
{
f();
}
catch(const failure_error&)
{
msg = "Test failure";
}
}
if(msg.empty())
{
out() << color::fg_green << "[ COMPLETE ] " << color::reset << color::bold << name
<< color::reset << std::endl;
}
else
{
failed.push_back(name);
out() << color::fg_red << "[ FAILED ] " << color::reset << color::bold << name
<< color::reset << ": " << color::fg_yellow << msg << color::reset << std::endl;
}
}
void run(int argc, const char* argv[])
{
auto args = parse(argc, argv);
if(args.count("--help") > 0)
{
show_help(args.at("__exe__").front());
return;
}
if(args.count("--list") > 0)
{
for(auto&& tc : get_test_cases())
out() << tc.first << std::endl;
return;
}
if(args.count("--quiet") > 0)
quiet = true;
auto cases = args[""];
if(cases.empty())
{
for(auto&& tc : get_test_cases())
run_test_case(tc.first, tc.second, args);
}
else
{
std::unordered_map<std::string, test_case> m(get_test_cases().begin(),
get_test_cases().end());
for(auto&& iname : cases)
{
for(auto&& name : get_case_names(iname))
{
auto f = m.find(name);
if(f == m.end())
{
out() << color::fg_red << "[ ERROR ] Test case '" << name
<< "' not found." << color::reset << std::endl;
failed.push_back(name);
}
else
run_test_case(name, f->second, args);
}
}
}
out() << color::fg_green << "[==========] " << color::fg_yellow << ran << " tests ran"
<< color::reset << std::endl;
if(not failed.empty())
{
out() << color::fg_red << "[ FAILED ] " << color::fg_yellow << failed.size()
<< " tests failed" << color::reset << std::endl;
for(auto&& name : failed)
out() << color::fg_red << "[ FAILED ] " << color::fg_yellow << name
<< color::reset << std::endl;
std::exit(1);
}
}
std::function<std::vector<std::string>(const std::string&)> get_case_names =
[](const std::string& name) -> std::vector<std::string> { return {name}; };
std::vector<argument> arguments = {};
std::vector<std::string> failed = {};
std::size_t ran = 0;
bool quiet = false;
};
inline void run(int argc, const char* argv[])
{
driver d{};
d.run(argc, argv);
}
} // namespace test
......@@ -404,7 +643,7 @@ inline void run(int argc, const char* argv[])
__PRETTY_FUNCTION__, \
__FILE__, \
__LINE__, \
&std::abort)
&test::fail)
// NOLINTNEXTLINE
#define STATUS(...) EXPECT((__VA_ARGS__) == 0)
......
This diff is collapsed.
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/normalize_ops.hpp>
#include <migraphx/insert_pad.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/instruction.hpp>
#include <basic_ops.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/make_op.hpp>
#include <test.hpp>
void run_pass(migraphx::module& m)
{
migraphx::run_passes(
m, {migraphx::normalize_ops{}, migraphx::insert_pad{}, migraphx::dead_code_elimination{}});
}
migraphx::instruction_ref
create_im2col(migraphx::instruction_ref& l_img, size_t channels, migraphx::module& m)
{
size_t f[2] = {1, 1};
std::vector<int32_t> weights(channels * f[0] * f[1]);
migraphx::shape s_weights{migraphx::shape::int32_type, {1, channels, f[0], f[1]}};
auto l_weights = m.add_literal(migraphx::literal{s_weights, weights});
return m.add_instruction(
migraphx::make_op("im2col", {{"padding", {0, 0, 1, 1}}}), l_img, l_weights);
}
migraphx::instruction_ref
create_conv(migraphx::instruction_ref& l_img,
size_t channels,
migraphx::module& m,
migraphx::op::padding_mode_t padding_mode = migraphx::op::padding_mode_t::default_)
{
migraphx::shape s_weights{migraphx::shape::int32_type, {4, channels, 3, 3}};
std::vector<int32_t> weights(4 * channels * 3 * 3);
auto l_weights = m.add_literal(migraphx::literal{s_weights, weights});
migraphx::op::convolution op;
op.padding_mode = padding_mode;
op.padding = {0, 0, 1, 1};
return m.add_instruction(op, l_img, l_weights);
}
TEST_CASE(rewrite_pad)
{
migraphx::module m;
size_t img_dim[2] = {2, 2};
size_t channels = 1;
std::vector<int32_t> input(channels * img_dim[0] * img_dim[1]);
std::iota(input.begin(), input.end(), 0);
migraphx::shape s_img{migraphx::shape::int32_type, {1, channels, img_dim[0], img_dim[1]}};
auto l_img = m.add_literal(migraphx::literal{s_img, input});
auto l0 = create_im2col(l_img, channels, m);
auto l1 = create_conv(l_img, channels, m);
auto l2 = m.add_instruction(
migraphx::make_op("pooling", {{"mode", "max"}, {"padding", {0, 0, 1, 1}}}), l_img);
m.add_instruction(migraphx::make_op("identity"), l0, l1, l2);
run_pass(m);
EXPECT(std::any_of(
m.begin(), m.end(), [](const migraphx::instruction& ins) { return ins.name() == "pad"; }));
}
TEST_CASE(rewrite_pad_symmetric)
{
migraphx::module m;
size_t img_dim[2] = {2, 2};
size_t channels = 1;
std::vector<int32_t> input(channels * img_dim[0] * img_dim[1]);
std::iota(input.begin(), input.end(), 0);
migraphx::shape s_img{migraphx::shape::int32_type, {1, channels, img_dim[0], img_dim[1]}};
auto l_img = m.add_literal(migraphx::literal{s_img, input});
m.add_instruction(migraphx::make_op("pooling", {{"mode", "max"}, {"padding", {1, 1, 1, 1}}}),
l_img);
run_pass(m);
EXPECT(std::none_of(
m.begin(), m.end(), [](const migraphx::instruction& ins) { return ins.name() == "pad"; }));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
#include "test.hpp"
int main() {}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
This diff is collapsed.
This diff is collapsed.
......@@ -75,6 +75,26 @@ TEST_CASE(gather_test_1)
EXPECT(m1 == m2);
}
migraphx::module create_padded_op(const std::vector<size_t>& pad_vals)
{
migraphx::module m;
migraphx::shape s{migraphx::shape::float_type, {2, 3, 4, 5}};
auto si = m.add_parameter("data", s);
auto r = m.add_instruction(migraphx::make_op("pooling", {{"padding", pad_vals}}), si);
m.add_return({r});
return m;
}
TEST_CASE(padding_attr_test)
{
migraphx::module m1 = create_padded_op({0, 1});
migraphx::module m2 = create_padded_op({0, 1, 0, 1});
run_pass(m1);
EXPECT(m1 == m2);
}
migraphx::module create_reduce_mean(const std::vector<int64_t>& axes)
{
migraphx::module m;
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment