Commit c6ec6638 authored by Khalique Ahmed's avatar Khalique Ahmed
Browse files

Merge branch 'develop' of https://github.com/ROCmSoftwarePlatform/AMDMIGraphX into auto_contig_fix

parents b42c7b41 a6d1540f
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <cmath>
#include <migraphx/float_equal.hpp>
#include <migraphx/float8.hpp>
#include <migraphx/half.hpp>
#include <migraphx/ranges.hpp>
#include "test.hpp"
#include <limits>
#include <sstream>
float fp8e5m2_to_fp32_value(uint8_t input)
{
constexpr std::array<float, 256> e4m3fnuz_lut = {
0.0,
1.52587890625e-05,
3.0517578125e-05,
4.57763671875e-05,
6.103515625e-05,
7.62939453125e-05,
9.1552734375e-05,
0.0001068115234375,
0.0001220703125,
0.000152587890625,
0.00018310546875,
0.000213623046875,
0.000244140625,
0.00030517578125,
0.0003662109375,
0.00042724609375,
0.00048828125,
0.0006103515625,
0.000732421875,
0.0008544921875,
0.0009765625,
0.001220703125,
0.00146484375,
0.001708984375,
0.001953125,
0.00244140625,
0.0029296875,
0.00341796875,
0.00390625,
0.0048828125,
0.005859375,
0.0068359375,
0.0078125,
0.009765625,
0.01171875,
0.013671875,
0.015625,
0.01953125,
0.0234375,
0.02734375,
0.03125,
0.0390625,
0.046875,
0.0546875,
0.0625,
0.078125,
0.09375,
0.109375,
0.125,
0.15625,
0.1875,
0.21875,
0.25,
0.3125,
0.375,
0.4375,
0.5,
0.625,
0.75,
0.875,
1.0,
1.25,
1.5,
1.75,
2.0,
2.5,
3.0,
3.5,
4.0,
5.0,
6.0,
7.0,
8.0,
10.0,
12.0,
14.0,
16.0,
20.0,
24.0,
28.0,
32.0,
40.0,
48.0,
56.0,
64.0,
80.0,
96.0,
112.0,
128.0,
160.0,
192.0,
224.0,
256.0,
320.0,
384.0,
448.0,
512.0,
640.0,
768.0,
896.0,
1024.0,
1280.0,
1536.0,
1792.0,
2048.0,
2560.0,
3072.0,
3584.0,
4096.0,
5120.0,
6144.0,
7168.0,
8192.0,
10240.0,
12288.0,
14336.0,
16384.0,
20480.0,
24576.0,
28672.0,
32768.0,
40960.0,
49152.0,
57344.0,
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::quiet_NaN(),
-0.0,
-1.52587890625e-05,
-3.0517578125e-05,
-4.57763671875e-05,
-6.103515625e-05,
-7.62939453125e-05,
-9.1552734375e-05,
-0.0001068115234375,
-0.0001220703125,
-0.000152587890625,
-0.00018310546875,
-0.000213623046875,
-0.000244140625,
-0.00030517578125,
-0.0003662109375,
-0.00042724609375,
-0.00048828125,
-0.0006103515625,
-0.000732421875,
-0.0008544921875,
-0.0009765625,
-0.001220703125,
-0.00146484375,
-0.001708984375,
-0.001953125,
-0.00244140625,
-0.0029296875,
-0.00341796875,
-0.00390625,
-0.0048828125,
-0.005859375,
-0.0068359375,
-0.0078125,
-0.009765625,
-0.01171875,
-0.013671875,
-0.015625,
-0.01953125,
-0.0234375,
-0.02734375,
-0.03125,
-0.0390625,
-0.046875,
-0.0546875,
-0.0625,
-0.078125,
-0.09375,
-0.109375,
-0.125,
-0.15625,
-0.1875,
-0.21875,
-0.25,
-0.3125,
-0.375,
-0.4375,
-0.5,
-0.625,
-0.75,
-0.875,
-1.0,
-1.25,
-1.5,
-1.75,
-2.0,
-2.5,
-3.0,
-3.5,
-4.0,
-5.0,
-6.0,
-7.0,
-8.0,
-10.0,
-12.0,
-14.0,
-16.0,
-20.0,
-24.0,
-28.0,
-32.0,
-40.0,
-48.0,
-56.0,
-64.0,
-80.0,
-96.0,
-112.0,
-128.0,
-160.0,
-192.0,
-224.0,
-256.0,
-320.0,
-384.0,
-448.0,
-512.0,
-640.0,
-768.0,
-896.0,
-1024.0,
-1280.0,
-1536.0,
-1792.0,
-2048.0,
-2560.0,
-3072.0,
-3584.0,
-4096.0,
-5120.0,
-6144.0,
-7168.0,
-8192.0,
-10240.0,
-12288.0,
-14336.0,
-16384.0,
-20480.0,
-24576.0,
-28672.0,
-32768.0,
-40960.0,
-49152.0,
-57344.0,
-1.0f * std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::quiet_NaN(),
};
return e4m3fnuz_lut[input];
}
TEST_CASE(test_fp8_cast_to_float)
{
std::vector<uint8_t> bit_vals(256);
std::iota(bit_vals.begin(), bit_vals.end(), 0);
EXPECT(bool{std::all_of(bit_vals.begin(), bit_vals.end(), [](uint8_t bit_val) {
migraphx::fp8::fp8e5m2 fp8_val(bit_val, migraphx::fp8::fp8e5m2::from_bits());
if(std::isnan(float(fp8_val)) and std::isnan(fp8e5m2_to_fp32_value(bit_val)))
{
return true;
}
else if(std::isinf(float(fp8_val)) and std::isinf(fp8e5m2_to_fp32_value(bit_val)))
{
return true;
}
return migraphx::float_equal(float(fp8_val), fp8e5m2_to_fp32_value(bit_val));
})});
}
TEST_CASE(test_fp8_cast_from_float)
{
std::unordered_map<float, uint8_t> test_vals = {
{-60000, 0xfb},
{-57344, 0xfb},
{-448, 0xdf},
{-256, 0xdc},
{-240, 0xdc},
{-200, 0xda},
{-20, 0xcd},
{-2, 0xc0},
{-1, 0xbc},
{-0.5, 0xb8},
{-0.2, 0xb2},
{-0.1111, 0xaf},
{-0.111, 0xaf},
{-0.11, 0xaf},
{-0.1, 0xae},
{6.10351e-05, 0x4},
{-6.10351e-05, 0x84},
{3.05176e-05, 0x2},
{-3.05176e-05, 0x82},
{1.52588e-05, 0x1},
{-1.52588e-05, 0x81},
{7.62939e-06, 0x0},
{-7.62939e-06, 0x80},
{0.1, 0x2e},
{0.11, 0x2f},
{0.111, 0x2f},
{0.1111, 0x2f},
{0.2, 0x32},
{0.5, 0x38},
{1, 0x3c},
{2, 0x40},
{20, 0x4d},
{200, 0x5a},
{240, 0x5c},
{256, 0x5c},
{448, 0x5f},
{57344, 0x7b},
{60000, 0x7b},
{1e+07, 0x7b},
};
EXPECT(bool{std::all_of(test_vals.begin(), test_vals.end(), [](const auto sample) {
return migraphx::float_equal(
migraphx::fp8::fp8e5m2(sample.first),
migraphx::fp8::fp8e5m2(sample.second, migraphx::fp8::fp8e5m2::from_bits()));
})});
}
TEST_CASE(test_positive_zero)
{
float zero = 0.0;
migraphx::fp8::fp8e5m2 fp8_zero(zero);
EXPECT(fp8_zero.is_zero());
EXPECT(migraphx::float_equal(zero, float(fp8_zero)));
}
TEST_CASE(test_negative_zero)
{
float nzero = -0.0;
migraphx::fp8::fp8e5m2 fp8_nzero(nzero);
EXPECT(fp8_nzero.is_zero());
// negative zero is preserved for fp8e5m2
EXPECT(migraphx::float_equal(nzero, float(fp8_nzero)));
}
TEST_CASE(test_pos_zero_eq_neg_zero)
{
float nzero = -0.0;
float pzero = 0.0;
migraphx::fp8::fp8e5m2 fp8_nzero(nzero);
migraphx::fp8::fp8e5m2 fp8_pzero(pzero);
EXPECT(fp8_nzero == fp8_pzero);
}
TEST_CASE(test_nan_1)
{
float fnan = std::numeric_limits<float>::quiet_NaN();
migraphx::fp8::fp8e5m2 fp8_nan(fnan);
EXPECT(fp8_nan.is_nan());
EXPECT(std::isnan(fp8_nan));
}
TEST_CASE(test_nan_2)
{
auto fnan = std::numeric_limits<migraphx::fp8::fp8e5m2>::quiet_NaN();
migraphx::fp8::fp8e5m2 fp8_nan(fnan.data, migraphx::fp8::fp8e5m2::from_bits());
EXPECT(fp8_nan.is_nan());
EXPECT(std::isnan(fp8_nan));
EXPECT(std::isnan(float(fp8_nan)));
}
TEST_CASE(test_infinity_1)
{
// float infinity should get clipped to max
float finf = std::numeric_limits<float>::infinity();
migraphx::fp8::fp8e5m2 fp8_max(finf);
EXPECT(fp8_max == std::numeric_limits<migraphx::fp8::fp8e5m2>::max());
}
TEST_CASE(test_infinity_2)
{
// neg inf
float finf = -1.0 * std::numeric_limits<float>::infinity();
// no inf in fp8e5m2, it gets clipped to lowest
migraphx::fp8::fp8e5m2 fp8_lowest(finf);
EXPECT(bool{fp8_lowest == std::numeric_limits<migraphx::fp8::fp8e5m2>::lowest()});
}
TEST_CASE(test_numeric_max_1)
{
float fmax = std::numeric_limits<float>::max();
migraphx::fp8::fp8e5m2 fp8_max(fmax);
EXPECT(fp8_max == std::numeric_limits<migraphx::fp8::fp8e5m2>::max());
}
TEST_CASE(test_numeric_max_2)
{
// gets clipped to max
float fmax = 2 * std::numeric_limits<migraphx::fp8::fp8e5m2>::max();
migraphx::fp8::fp8e5m2 fp8_max(fmax);
EXPECT(fp8_max == std::numeric_limits<migraphx::fp8::fp8e5m2>::max());
}
TEST_CASE(test_numeric_lowest_1)
{
float flowest = std::numeric_limits<float>::lowest();
migraphx::fp8::fp8e5m2 fp8_lowest(flowest);
EXPECT(fp8_lowest == std::numeric_limits<migraphx::fp8::fp8e5m2>::lowest());
}
TEST_CASE(test_numeric_lowest_2)
{
// gets clipped to lowest
float fmin = 2.0 * std::numeric_limits<migraphx::fp8::fp8e5m2>::lowest();
migraphx::fp8::fp8e5m2 fp8_lowest(fmin);
EXPECT(fp8_lowest == std::numeric_limits<migraphx::fp8::fp8e5m2>::lowest());
}
TEST_CASE(test_max_eq_lowest)
{
EXPECT(migraphx::float_equal(std::numeric_limits<migraphx::fp8::fp8e5m2>::lowest(),
-1 * std::numeric_limits<migraphx::fp8::fp8e5m2>::max()));
}
TEST_CASE(test_isfinite)
{
EXPECT(std::isfinite(migraphx::fp8::fp8e5m2(0.0)));
EXPECT(std::isfinite(migraphx::fp8::fp8e5m2(-0.0)));
EXPECT(not std::isfinite(
migraphx::fp8::fp8e5m2(std::numeric_limits<migraphx::fp8::fp8e5m2>::quiet_NaN())));
EXPECT(not std::isfinite(std::numeric_limits<migraphx::fp8::fp8e5m2>::infinity()));
// -1.0 * inf is float(-inf) which with clipping/saturation gets converted into fp8::lowest()
EXPECT(std::isfinite(
migraphx::fp8::fp8e5m2(-1.0 * std::numeric_limits<migraphx::fp8::fp8e5m2>::infinity())));
EXPECT(not std::isfinite(migraphx::fp8::fp8e5m2(0xFC, migraphx::fp8::fp8e5m2::from_bits())));
}
TEST_CASE(test_binary_ops)
{
auto a = migraphx::fp8::fp8e5m2(-1.0);
auto b = migraphx::fp8::fp8e5m2(1.0);
auto c = migraphx::fp8::fp8e5m2(0.0);
auto d = migraphx::fp8::fp8e5m2(-0.0);
EXPECT(migraphx::float_equal((c + d), c));
EXPECT(migraphx::float_equal((c + d), d));
EXPECT(migraphx::float_equal((a + b), c));
EXPECT(migraphx::float_equal((a + b), d));
auto e = migraphx::fp8::fp8e5m2(10.0);
auto f = migraphx::fp8::fp8e5m2(-10.0);
EXPECT(bool{e > f});
EXPECT(bool{f < e});
EXPECT(bool{f <= e});
EXPECT(bool{e >= f});
EXPECT(bool{e <= e});
EXPECT(bool{f >= f});
EXPECT(not migraphx::float_equal(f, e));
}
TEST_CASE(test_fabs)
{
auto a = migraphx::fp8::fp8e5m2(-1.0);
auto b = migraphx::fp8::fp8e5m2(1.0);
EXPECT(migraphx::float_equal(b, migraphx::fp8::fabs(a)));
}
TEST_CASE(test_stream_op)
{
auto a = migraphx::fp8::fp8e5m2(-1.0);
std::stringstream ss;
ss << a;
EXPECT(std::string("-1") == ss.str());
ss = std::stringstream();
auto b = std::numeric_limits<migraphx::fp8::fp8e5m2>::quiet_NaN();
ss << b;
EXPECT(std::string("nan") == ss.str());
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <cmath>
#include <migraphx/float_equal.hpp>
#include <migraphx/float8.hpp>
#include <migraphx/half.hpp>
#include <migraphx/ranges.hpp>
#include "test.hpp"
#include <limits>
float fp8e5m2fnuz_to_fp32_value(uint8_t input)
{
constexpr std::array<float, 256> e4m3fnuz_lut = {
0.0,
7.62939453125e-06,
1.52587890625e-05,
2.288818359375e-05,
3.0517578125e-05,
3.814697265625e-05,
4.57763671875e-05,
5.340576171875e-05,
6.103515625e-05,
7.62939453125e-05,
9.1552734375e-05,
0.0001068115234375,
0.0001220703125,
0.000152587890625,
0.00018310546875,
0.000213623046875,
0.000244140625,
0.00030517578125,
0.0003662109375,
0.00042724609375,
0.00048828125,
0.0006103515625,
0.000732421875,
0.0008544921875,
0.0009765625,
0.001220703125,
0.00146484375,
0.001708984375,
0.001953125,
0.00244140625,
0.0029296875,
0.00341796875,
0.00390625,
0.0048828125,
0.005859375,
0.0068359375,
0.0078125,
0.009765625,
0.01171875,
0.013671875,
0.015625,
0.01953125,
0.0234375,
0.02734375,
0.03125,
0.0390625,
0.046875,
0.0546875,
0.0625,
0.078125,
0.09375,
0.109375,
0.125,
0.15625,
0.1875,
0.21875,
0.25,
0.3125,
0.375,
0.4375,
0.5,
0.625,
0.75,
0.875,
1.0,
1.25,
1.5,
1.75,
2.0,
2.5,
3.0,
3.5,
4.0,
5.0,
6.0,
7.0,
8.0,
10.0,
12.0,
14.0,
16.0,
20.0,
24.0,
28.0,
32.0,
40.0,
48.0,
56.0,
64.0,
80.0,
96.0,
112.0,
128.0,
160.0,
192.0,
224.0,
256.0,
320.0,
384.0,
448.0,
512.0,
640.0,
768.0,
896.0,
1024.0,
1280.0,
1536.0,
1792.0,
2048.0,
2560.0,
3072.0,
3584.0,
4096.0,
5120.0,
6144.0,
7168.0,
8192.0,
10240.0,
12288.0,
14336.0,
16384.0,
20480.0,
24576.0,
28672.0,
32768.0,
40960.0,
49152.0,
57344.0,
std::numeric_limits<float>::quiet_NaN(),
-7.62939453125e-06,
-1.52587890625e-05,
-2.288818359375e-05,
-3.0517578125e-05,
-3.814697265625e-05,
-4.57763671875e-05,
-5.340576171875e-05,
-6.103515625e-05,
-7.62939453125e-05,
-9.1552734375e-05,
-0.0001068115234375,
-0.0001220703125,
-0.000152587890625,
-0.00018310546875,
-0.000213623046875,
-0.000244140625,
-0.00030517578125,
-0.0003662109375,
-0.00042724609375,
-0.00048828125,
-0.0006103515625,
-0.000732421875,
-0.0008544921875,
-0.0009765625,
-0.001220703125,
-0.00146484375,
-0.001708984375,
-0.001953125,
-0.00244140625,
-0.0029296875,
-0.00341796875,
-0.00390625,
-0.0048828125,
-0.005859375,
-0.0068359375,
-0.0078125,
-0.009765625,
-0.01171875,
-0.013671875,
-0.015625,
-0.01953125,
-0.0234375,
-0.02734375,
-0.03125,
-0.0390625,
-0.046875,
-0.0546875,
-0.0625,
-0.078125,
-0.09375,
-0.109375,
-0.125,
-0.15625,
-0.1875,
-0.21875,
-0.25,
-0.3125,
-0.375,
-0.4375,
-0.5,
-0.625,
-0.75,
-0.875,
-1.0,
-1.25,
-1.5,
-1.75,
-2.0,
-2.5,
-3.0,
-3.5,
-4.0,
-5.0,
-6.0,
-7.0,
-8.0,
-10.0,
-12.0,
-14.0,
-16.0,
-20.0,
-24.0,
-28.0,
-32.0,
-40.0,
-48.0,
-56.0,
-64.0,
-80.0,
-96.0,
-112.0,
-128.0,
-160.0,
-192.0,
-224.0,
-256.0,
-320.0,
-384.0,
-448.0,
-512.0,
-640.0,
-768.0,
-896.0,
-1024.0,
-1280.0,
-1536.0,
-1792.0,
-2048.0,
-2560.0,
-3072.0,
-3584.0,
-4096.0,
-5120.0,
-6144.0,
-7168.0,
-8192.0,
-10240.0,
-12288.0,
-14336.0,
-16384.0,
-20480.0,
-24576.0,
-28672.0,
-32768.0,
-40960.0,
-49152.0,
-57344.0,
};
return e4m3fnuz_lut[input];
}
TEST_CASE(test_fp8_cast_to_float)
{
std::vector<uint8_t> bit_vals(256);
std::iota(bit_vals.begin(), bit_vals.end(), 0);
EXPECT(bool{std::all_of(bit_vals.begin(), bit_vals.end(), [](uint8_t bit_val) {
migraphx::fp8::fp8e5m2fnuz fp8_val(bit_val, migraphx::fp8::fp8e5m2fnuz::from_bits());
if(std::isnan(float(fp8_val)) and std::isnan(fp8e5m2fnuz_to_fp32_value(bit_val)))
{
return true;
}
return migraphx::float_equal(float(fp8_val), fp8e5m2fnuz_to_fp32_value(bit_val));
})});
}
TEST_CASE(test_fp8_cast_from_float)
{
std::unordered_map<float, uint8_t> test_vals = {
{57344, 0x7f}, {-57344, 0xff}, {60000, 0x7f}, {-60000, 0xff},
{448, 0x63}, {-448, 0xe3}, {256, 0x60}, {-256, 0xe0},
{240, 0x60}, {-240, 0xe0}, {3.05176e-05, 0x4}, {-3.05176e-05, 0x84},
{1.52588e-05, 0x2}, {-1.52588e-05, 0x82}, {7.62939e-06, 0x1}, {-7.62939e-06, 0x81},
{3.81469e-06, 0x0}, {-3.81469e-06, 0x0}, {1e+07, 0x7f}, {1, 0x40},
{-1, 0xc0}, {0.1, 0x32}, {0.11, 0x33}, {0.111, 0x33},
{0.1111, 0x33}, {-0.1, 0xb2}, {-0.11, 0xb3}, {-0.111, 0xb3},
{-0.1111, 0xb3}, {0.2, 0x36}, {2, 0x44}, {20, 0x51},
{200, 0x5e}, {-0.2, 0xb6}, {-2, 0xc4}, {-20, 0xd1},
{-200, 0xde}, {0.5, 0x3c}, {-0.5, 0xbc}, {1.17549e-38, 0x0},
{1.4013e-45, 0x0},
};
EXPECT(bool{std::all_of(test_vals.begin(), test_vals.end(), [](const auto sample) {
return migraphx::float_equal(
migraphx::fp8::fp8e5m2fnuz(sample.first),
migraphx::fp8::fp8e5m2fnuz(sample.second, migraphx::fp8::fp8e5m2fnuz::from_bits()));
})});
}
TEST_CASE(test_positive_zero)
{
float zero = 0.0;
migraphx::fp8::fp8e5m2fnuz fp8_zero(zero);
EXPECT(fp8_zero.is_zero());
EXPECT(migraphx::float_equal(zero, float(fp8_zero)));
}
TEST_CASE(test_negative_zero)
{
float nzero = -0.0;
float pzero = 0.0;
migraphx::fp8::fp8e5m2fnuz fp8_nzero(nzero);
EXPECT(fp8_nzero.is_zero());
// negative zero gets converted to positive zero
EXPECT(migraphx::float_equal(pzero, float(fp8_nzero)));
}
TEST_CASE(test_nan_1)
{
float fnan = std::numeric_limits<float>::quiet_NaN();
migraphx::fp8::fp8e5m2fnuz fp8_nan(fnan);
EXPECT(fp8_nan.is_nan());
EXPECT(std::isnan(fp8_nan));
}
TEST_CASE(test_nan_2)
{
auto fnan = std::numeric_limits<migraphx::fp8::fp8e5m2fnuz>::quiet_NaN();
migraphx::fp8::fp8e5m2fnuz fp8_nan(fnan.data, migraphx::fp8::fp8e5m2fnuz::from_bits());
EXPECT(fp8_nan.is_nan());
EXPECT(std::isnan(fp8_nan));
EXPECT(std::isnan(float(fp8_nan)));
}
TEST_CASE(test_infinity_1)
{
float finf = std::numeric_limits<float>::infinity();
// no inf in fp8e5m2fnuz it gets clipped to Nans
migraphx::fp8::fp8e5m2fnuz fp8_nan(finf);
EXPECT(fp8_nan.is_nan());
EXPECT(std::isnan(float(fp8_nan)));
}
TEST_CASE(test_infinity_2)
{
// neg inf
float finf = -1.0 * std::numeric_limits<float>::infinity();
// no inf in fp8e5m2fnuz it gets clipped to NaNs
migraphx::fp8::fp8e5m2fnuz fp8_nan(finf);
EXPECT(fp8_nan.is_nan());
EXPECT(std::isnan(float(fp8_nan)));
}
TEST_CASE(test_numeric_max_1)
{
float fmax = std::numeric_limits<float>::max();
migraphx::fp8::fp8e5m2fnuz fp8_max(fmax);
EXPECT(fp8_max == std::numeric_limits<migraphx::fp8::fp8e5m2fnuz>::max());
}
TEST_CASE(test_numeric_max_2)
{
// gets clipped to max
float fmax = 2 * std::numeric_limits<migraphx::fp8::fp8e5m2fnuz>::max();
migraphx::fp8::fp8e5m2fnuz fp8_max(fmax);
EXPECT(fp8_max == std::numeric_limits<migraphx::fp8::fp8e5m2fnuz>::max());
}
TEST_CASE(test_numeric_lowest_1)
{
float flowest = std::numeric_limits<float>::lowest();
migraphx::fp8::fp8e5m2fnuz fp8_lowest(flowest);
EXPECT(fp8_lowest == std::numeric_limits<migraphx::fp8::fp8e5m2fnuz>::lowest());
}
TEST_CASE(test_numeric_lowest_2)
{
// gets clipped to lowest
float fmin = 2.0 * std::numeric_limits<migraphx::fp8::fp8e5m2fnuz>::lowest();
migraphx::fp8::fp8e5m2fnuz fp8_lowest(fmin);
EXPECT(fp8_lowest == std::numeric_limits<migraphx::fp8::fp8e5m2fnuz>::lowest());
}
TEST_CASE(test_max_eq_lowest)
{
EXPECT(migraphx::float_equal(std::numeric_limits<migraphx::fp8::fp8e5m2fnuz>::lowest(),
-1 * std::numeric_limits<migraphx::fp8::fp8e5m2fnuz>::max()));
}
TEST_CASE(test_isfinite)
{
EXPECT(std::isfinite(migraphx::fp8::fp8e5m2fnuz(0.0)));
EXPECT(std::isfinite(migraphx::fp8::fp8e5m2fnuz(-0.0)));
EXPECT(not std::isfinite(
migraphx::fp8::fp8e5m2fnuz(std::numeric_limits<migraphx::fp8::fp8e5m2fnuz>::quiet_NaN())));
}
TEST_CASE(test_no_infinity)
{
EXPECT(not bool{std::numeric_limits<migraphx::fp8::fp8e5m2fnuz>::has_infinity});
}
TEST_CASE(test_binary_ops)
{
auto a = migraphx::fp8::fp8e5m2fnuz(-1.0);
auto b = migraphx::fp8::fp8e5m2fnuz(1.0);
auto c = migraphx::fp8::fp8e5m2fnuz(0.0);
auto d = migraphx::fp8::fp8e5m2fnuz(-0.0);
EXPECT(migraphx::float_equal((c + d), c));
EXPECT(migraphx::float_equal((c + d), d));
EXPECT(migraphx::float_equal((a + b), c));
EXPECT(migraphx::float_equal((a + b), d));
auto e = migraphx::fp8::fp8e5m2fnuz(10.0);
auto f = migraphx::fp8::fp8e5m2fnuz(-10.0);
EXPECT(bool{e > f});
EXPECT(bool{f < e});
EXPECT(bool{f <= e});
EXPECT(bool{e >= f});
EXPECT(bool{e <= e});
EXPECT(bool{f >= f});
EXPECT(not migraphx::float_equal(f, e));
}
TEST_CASE(test_fabs)
{
auto a = migraphx::fp8::fp8e5m2fnuz(-1.0);
auto b = migraphx::fp8::fp8e5m2fnuz(1.0);
EXPECT(migraphx::float_equal(b, migraphx::fp8::fabs(a)));
}
TEST_CASE(test_stream_op)
{
auto a = migraphx::fp8::fp8e5m2fnuz(-1.0);
std::stringstream ss;
ss << a;
EXPECT(std::string("-1") == ss.str());
ss = std::stringstream();
auto b = std::numeric_limits<migraphx::fp8::fp8e5m2fnuz>::quiet_NaN();
ss << b;
EXPECT(std::string("nan") == ss.str());
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -414,8 +414,8 @@ TEST_CASE(add_reshape_add_nonstandard)
auto y = mm->add_parameter("y", s1);
auto z = mm->add_parameter("z", s2);
auto add1 = mm->add_instruction(migraphx::make_op("add"), x, y);
auto c = mm->add_instruction(migraphx::make_op("contiguous"), add1);
auto reshape = mm->add_instruction(migraphx::make_op("reshape", {{"dims", s2.lens()}}), c);
auto reshape =
mm->add_instruction(migraphx::make_op("reshape", {{"dims", s2.lens()}}), add1);
auto add2 = mm->add_instruction(migraphx::make_op("add"), reshape, z);
mm->add_return({add2});
}
......@@ -426,10 +426,8 @@ TEST_CASE(add_reshape_add_nonstandard)
auto x = mm->add_parameter("x", s1);
auto y = mm->add_parameter("y", s1);
auto z = mm->add_parameter("z", s2);
auto cx = mm->add_instruction(migraphx::make_op("contiguous"), x);
auto cy = mm->add_instruction(migraphx::make_op("contiguous"), y);
auto x2 = mm->add_instruction(migraphx::make_op("reshape", {{"dims", s3.lens()}}), cx);
auto y2 = mm->add_instruction(migraphx::make_op("reshape", {{"dims", s3.lens()}}), cy);
auto x2 = mm->add_instruction(migraphx::make_op("reshape", {{"dims", s3.lens()}}), x);
auto y2 = mm->add_instruction(migraphx::make_op("reshape", {{"dims", s3.lens()}}), y);
auto z2 = mm->add_instruction(migraphx::make_op("reshape", {{"dims", s3.lens()}}), z);
auto fadd =
add_pointwise(p2, "main:pointwise0", {x2, y2, z2}, [=](auto* pm, const auto& inputs) {
......@@ -466,10 +464,8 @@ TEST_CASE(add_unsqueeze_add_nonstandard)
auto x = mm->add_parameter("x", s1);
auto y = mm->add_parameter("y", s1);
auto z = mm->add_parameter("z", s2);
auto cx = mm->add_instruction(migraphx::make_op("contiguous"), x);
auto cy = mm->add_instruction(migraphx::make_op("contiguous"), y);
auto x2 = mm->add_instruction(migraphx::make_op("reshape", {{"dims", s2.lens()}}), cx);
auto y2 = mm->add_instruction(migraphx::make_op("reshape", {{"dims", s2.lens()}}), cy);
auto x2 = mm->add_instruction(migraphx::make_op("reshape", {{"dims", s2.lens()}}), x);
auto y2 = mm->add_instruction(migraphx::make_op("reshape", {{"dims", s2.lens()}}), y);
auto fadd =
add_pointwise(p2, "main:pointwise0", {x2, y2, z}, [=](auto* pm, const auto& inputs) {
auto add1 = pm->add_instruction(migraphx::make_op("add"), inputs[0], inputs[1]);
......
......@@ -139,7 +139,8 @@ const std::string math_template = R"__migraphx__(
#include <migraphx/kernels/pointwise.hpp>
#include <migraphx/kernels/math.hpp>
#include <migraphx/kernels/types.hpp>
using namespace migraphx;
namespace migraphx {
extern "C" {
__global__ void kernel(${type}* p)
{
......@@ -148,6 +149,7 @@ __global__ void kernel(${type}* p)
}
}
}
int main() {}
......@@ -237,12 +239,12 @@ TEST_CASE(code_object_hip)
std::vector<migraphx::shape> expected_inputs = {input, input};
auto co = migraphx::make_op("gpu::code_object",
{{"code_object", migraphx::value::binary{binaries.front()}},
{"symbol_name", "add_2"},
{"global", input.elements()},
{"local", 1024},
{"expected_inputs", migraphx::to_value(expected_inputs)},
{"output", migraphx::to_value(input)}});
{{"code_object", migraphx::value::binary{binaries.front()}},
{"symbol_name", "add_2"},
{"global", input.elements()},
{"local", 1024},
{"expected_inputs", migraphx::to_value(expected_inputs)},
{"output", migraphx::to_value(input)}});
migraphx::program p;
auto* mm = p.get_main_module();
......@@ -348,7 +350,10 @@ TEST_CASE(compile_math)
auto vec_sizes = {2, 4, 6};
for(auto&& t : migraphx::shape::types())
{
if(contains({migraphx::shape::bool_type, migraphx::shape::tuple_type}, t))
if(contains({migraphx::shape::bool_type,
migraphx::shape::fp8e4m3fnuz_type,
migraphx::shape::tuple_type},
t))
continue;
auto name = migraphx::shape::cpp_type(t);
if(t == migraphx::shape::half_type)
......@@ -396,7 +401,10 @@ TEST_CASE(assert_type_min_max)
migraphx::gpu::hip_compile_options options;
for(auto&& t : migraphx::shape::types())
{
if(contains({migraphx::shape::bool_type, migraphx::shape::tuple_type}, t))
if(contains({migraphx::shape::bool_type,
migraphx::shape::fp8e4m3fnuz_type,
migraphx::shape::tuple_type},
t))
continue;
auto name = migraphx::shape::cpp_type(t);
if(t == migraphx::shape::half_type)
......
......@@ -24,6 +24,7 @@
#include <atomic>
#include <algorithm>
#include <array>
#include <cassert>
#include <cstdio>
#include <cstdlib>
......
......@@ -46,8 +46,12 @@ std::function<F>
compile_function(const std::string& src, const std::string& flags, const std::string& fname)
{
migraphx::src_compiler compiler;
compiler.flags = flags + "-std=c++14 -fPIC -shared";
compiler.flags = flags + "-std=c++14 -fPIC -shared";
#ifdef _WIN32
compiler.output = "simple.dll";
#else
compiler.output = "libsimple.so";
#endif
migraphx::src_file f{"main.cpp", src};
auto image = compiler.compile({f});
return migraphx::dynamic_loader{image}.get_function<F>(fname);
......
4a8203033930da506b356cdaf88b1531d8d8fca3
a5537f2f563d4975c7e6121a7eb260bbbfd9455a
averagepool_dilate_test:
Y
xy" AveragePool*
dilations@*
kernel_shape@*
pads@@*
strides@averagepool_dilate_testZ
x



b
y



B
\ No newline at end of file
......@@ -276,6 +276,22 @@ def averagepool_1d_test():
return ([node], [x], [out])
@onnx_test()
def averagepool_dilate_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 4, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 4, 2])
node = onnx.helper.make_node('AveragePool',
inputs=['x'],
outputs=['y'],
kernel_shape=[2],
strides=[1],
pads=[1, 1],
dilations=[3])
return ([node], [x], [y])
@onnx_test()
def averagepool_3d_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5, 5, 5])
......@@ -4484,6 +4500,177 @@ def lrn_test():
return ([node], [x], [y])
@onnx_test()
def lstm_bi_layout_cell_test():
seq = helper.make_tensor_value_info('seq', TensorProto.FLOAT, [3, 5, 10])
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [2, 80, 10])
r = helper.make_tensor_value_info('r', TensorProto.FLOAT, [2, 80, 20])
bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [2, 160])
seq_len = helper.make_tensor_value_info('seq_len', TensorProto.INT32, [3])
h0 = helper.make_tensor_value_info('h0', TensorProto.FLOAT, [3, 2, 20])
c0 = helper.make_tensor_value_info('c0', TensorProto.FLOAT, [3, 2, 20])
pph = helper.make_tensor_value_info('pph', TensorProto.FLOAT, [2, 60])
cellout = helper.make_tensor_value_info('cellout', TensorProto.FLOAT,
[3, 2, 20])
node = onnx.helper.make_node(
'LSTM',
inputs=['seq', 'w', 'r', 'bias', 'seq_len', 'h0', 'c0', 'pph'],
outputs=['', '', 'cellout'],
activations=['sigmoid', 'tanh', 'tanh'],
clip=0,
direction='bidirectional',
hidden_size=20,
input_forget=1,
layout=1)
return ([node], [seq, w, r, bias, seq_len, h0, c0, pph], [cellout])
@onnx_test()
def lstm_bi_layout_last_test():
seq = helper.make_tensor_value_info('seq', TensorProto.FLOAT, [3, 5, 10])
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [2, 80, 10])
r = helper.make_tensor_value_info('r', TensorProto.FLOAT, [2, 80, 20])
bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [2, 160])
seq_len = helper.make_tensor_value_info('seq_len', TensorProto.INT32, [3])
h0 = helper.make_tensor_value_info('h0', TensorProto.FLOAT, [3, 2, 20])
c0 = helper.make_tensor_value_info('c0', TensorProto.FLOAT, [3, 2, 20])
pph = helper.make_tensor_value_info('pph', TensorProto.FLOAT, [2, 60])
hs = helper.make_tensor_value_info('hs', TensorProto.FLOAT, [3, 5, 2, 20])
output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
[3, 2, 20])
node = onnx.helper.make_node(
'LSTM',
inputs=['seq', 'w', 'r', 'bias', 'seq_len', 'h0', 'c0', 'pph'],
outputs=['hs', 'output'],
activations=['sigmoid', 'tanh', 'tanh'],
clip=0,
direction='bidirectional',
hidden_size=20,
input_forget=1,
layout=1)
return ([node], [seq, w, r, bias, seq_len, h0, c0, pph], [hs, output])
@onnx_test()
def lstm_f_layout_hs_test():
seq = helper.make_tensor_value_info('seq', TensorProto.FLOAT, [3, 5, 10])
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 80, 10])
r = helper.make_tensor_value_info('r', TensorProto.FLOAT, [1, 80, 20])
bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [1, 160])
seq_len = helper.make_tensor_value_info('seq_len', TensorProto.INT32, [3])
h0 = helper.make_tensor_value_info('h0', TensorProto.FLOAT, [3, 1, 20])
c0 = helper.make_tensor_value_info('c0', TensorProto.FLOAT, [3, 1, 20])
pph = helper.make_tensor_value_info('pph', TensorProto.FLOAT, [1, 60])
hs = helper.make_tensor_value_info('hs', TensorProto.FLOAT, [3, 5, 1, 20])
output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
[3, 1, 20])
node = onnx.helper.make_node(
'LSTM',
inputs=['seq', 'w', 'r', 'bias', 'seq_len', 'h0', 'c0', 'pph'],
outputs=['hs', 'output'],
activations=['sigmoid', 'tanh', 'tanh'],
clip=0,
direction='forward',
hidden_size=20,
input_forget=1,
layout=1)
return ([node], [seq, w, r, bias, seq_len, h0, c0, pph], [hs, output])
@onnx_test()
def lstm_f_layout_cell_test():
seq = helper.make_tensor_value_info('seq', TensorProto.FLOAT, [3, 5, 10])
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 80, 10])
r = helper.make_tensor_value_info('r', TensorProto.FLOAT, [1, 80, 20])
bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [1, 160])
seq_len = helper.make_tensor_value_info('seq_len', TensorProto.INT32, [3])
h0 = helper.make_tensor_value_info('h0', TensorProto.FLOAT, [3, 1, 20])
c0 = helper.make_tensor_value_info('c0', TensorProto.FLOAT, [3, 1, 20])
pph = helper.make_tensor_value_info('pph', TensorProto.FLOAT, [1, 60])
cellout = helper.make_tensor_value_info('cellout', TensorProto.FLOAT,
[3, 1, 20])
node = onnx.helper.make_node(
'LSTM',
inputs=['seq', 'w', 'r', 'bias', 'seq_len', 'h0', 'c0', 'pph'],
outputs=['', '', 'cellout'],
activations=['sigmoid', 'tanh', 'tanh'],
clip=0,
direction='forward',
hidden_size=20,
input_forget=1,
layout=1)
return ([node], [seq, w, r, bias, seq_len, h0, c0, pph], [cellout])
@onnx_test()
def lstm_r_layout_test():
seq = helper.make_tensor_value_info('seq', TensorProto.FLOAT, [3, 5, 10])
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 80, 10])
r = helper.make_tensor_value_info('r', TensorProto.FLOAT, [1, 80, 20])
bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [1, 160])
seq_len = helper.make_tensor_value_info('seq_len', TensorProto.INT32, [3])
h0 = helper.make_tensor_value_info('h0', TensorProto.FLOAT, [3, 1, 20])
c0 = helper.make_tensor_value_info('c0', TensorProto.FLOAT, [3, 1, 20])
pph = helper.make_tensor_value_info('pph', TensorProto.FLOAT, [1, 60])
hs = helper.make_tensor_value_info('hs', TensorProto.FLOAT, [3, 5, 1, 20])
node = onnx.helper.make_node(
'LSTM',
inputs=['seq', 'w', 'r', 'bias', 'seq_len', 'h0', 'c0', 'pph'],
outputs=['hs'],
activations=['sigmoid', 'tanh', 'tanh'],
clip=0,
direction='reverse',
hidden_size=20,
input_forget=1,
layout=1)
return ([node], [seq, w, r, bias, seq_len, h0, c0, pph], [hs])
@onnx_test()
def lstm_r_layout_hs_cell_test():
seq = helper.make_tensor_value_info('seq', TensorProto.FLOAT, [3, 5, 10])
w = helper.make_tensor_value_info('w', TensorProto.FLOAT, [1, 80, 10])
r = helper.make_tensor_value_info('r', TensorProto.FLOAT, [1, 80, 20])
bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT, [1, 160])
seq_len = helper.make_tensor_value_info('seq_len', TensorProto.INT32, [3])
h0 = helper.make_tensor_value_info('h0', TensorProto.FLOAT, [3, 1, 20])
c0 = helper.make_tensor_value_info('c0', TensorProto.FLOAT, [3, 1, 20])
pph = helper.make_tensor_value_info('pph', TensorProto.FLOAT, [1, 60])
output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
[3, 1, 20])
cellout = helper.make_tensor_value_info('cellout', TensorProto.FLOAT,
[3, 1, 20])
node = onnx.helper.make_node(
'LSTM',
inputs=['seq', 'w', 'r', 'bias', 'seq_len', 'h0', 'c0', 'pph'],
outputs=['', 'output', 'cellout'],
activations=['sigmoid', 'tanh', 'tanh'],
clip=0,
direction='reverse',
hidden_size=20,
input_forget=1,
layout=1)
return ([node], [seq, w, r, bias, seq_len, h0, c0, pph], [output, cellout])
@onnx_test()
def matmul_bmbm_test():
m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 6, 7])
......@@ -4711,6 +4898,22 @@ def maxpool_notset_test():
return ([node], [x], [y])
@onnx_test()
def maxpool_dilate_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 4, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 4, 2])
node = onnx.helper.make_node('MaxPool',
inputs=['x'],
outputs=['y'],
kernel_shape=[2],
strides=[1],
pads=[1, 1],
dilations=[3])
return ([node], [x], [y])
@onnx_test()
def maxpool_same_upper_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5])
......@@ -5791,6 +5994,263 @@ def qlinearadd_bcast_test():
[sc_a, zero_pt_a, sc_b, zero_pt_b, sc_c, zero_pt_c])
@onnx_test()
def qlinearaveragepool_1d_test():
x = helper.make_tensor_value_info('x', TensorProto.INT8, [1, 3, 32])
x_scale = helper.make_tensor('x_scale', TensorProto.FLOAT, [], [0.05])
x_zero_point = helper.make_tensor('x_zero_point', TensorProto.INT8, [],
[0])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 3, 31])
y_scale = helper.make_tensor('y_scale', TensorProto.FLOAT, [], [0.05])
y_zero_point = helper.make_tensor('y_zero_point', TensorProto.INT8, [],
[16])
node = onnx.helper.make_node(
'QLinearAveragePool',
inputs=['x', 'x_scale', 'x_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],
kernel_shape=[2],
)
return ([node], [x], [y], [x_scale, x_zero_point, y_scale, y_zero_point])
@onnx_test()
def qlinearaveragepool_2d_test():
x = helper.make_tensor_value_info('x', TensorProto.INT8, [1, 3, 4, 4])
x_scale = helper.make_tensor('x_scale', TensorProto.FLOAT, [], [0.05])
x_zero_point = helper.make_tensor('x_zero_point', TensorProto.INT8, [],
[0])
y = helper.make_tensor_value_info('y', TensorProto.INT8, [1, 3, 3, 3])
y_scale = helper.make_tensor('y_scale', TensorProto.FLOAT, [], [0.015])
y_zero_point = helper.make_tensor('y_zero_point', TensorProto.INT8, [],
[16])
node = onnx.helper.make_node(
'QLinearAveragePool',
inputs=['x', 'x_scale', 'x_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],
kernel_shape=[2, 2],
)
return ([node], [x], [y], [x_scale, x_zero_point, y_scale, y_zero_point])
@onnx_test()
def qlinearaveragepool_2d_ceil_test():
x = helper.make_tensor_value_info('x', TensorProto.UINT8, [1, 1, 4, 4])
x_scale = helper.make_tensor('x_scale', TensorProto.FLOAT, [], [0.5])
x_zero_point = helper.make_tensor('x_zero_point', TensorProto.UINT8, [],
[0])
y = helper.make_tensor_value_info('y', TensorProto.UINT8, [1, 1, 2, 2])
y_scale = helper.make_tensor('y_scale', TensorProto.FLOAT, [], [0.05])
y_zero_point = helper.make_tensor('y_zero_point', TensorProto.UINT8, [],
[0])
node = onnx.helper.make_node(
'QLinearAveragePool',
inputs=['x', 'x_scale', 'x_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],
kernel_shape=[3, 3],
strides=[2, 2],
ceil_mode=True,
)
return ([node], [x], [y], [x_scale, x_zero_point, y_scale, y_zero_point])
@onnx_test()
def qlinearaveragepool_2d_dilations_test():
x = helper.make_tensor_value_info('x', TensorProto.INT8, [1, 1, 4, 4])
x_scale = helper.make_tensor('x_scale', TensorProto.FLOAT, [], [0.5])
x_zero_point = helper.make_tensor('x_zero_point', TensorProto.INT8, [],
[0])
y = helper.make_tensor_value_info('y', TensorProto.INT8, [1, 1, 2, 2])
y_scale = helper.make_tensor('y_scale', TensorProto.FLOAT, [], [0.25])
y_zero_point = helper.make_tensor('y_zero_point', TensorProto.INT8, [],
[84])
node = onnx.helper.make_node(
'QLinearAveragePool',
inputs=['x', 'x_scale', 'x_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],
kernel_shape=[2, 2],
strides=[1, 1],
dilations=[2, 2],
ceil_mode=True,
)
return ([node], [x], [y], [x_scale, x_zero_point, y_scale, y_zero_point])
@onnx_test()
def qlinearaveragepool_2d_pads_count_include_pad_test():
x = helper.make_tensor_value_info('x', TensorProto.INT8, [1, 3, 4, 4])
x_scale = helper.make_tensor('x_scale', TensorProto.FLOAT, [], [0.05])
x_zero_point = helper.make_tensor('x_zero_point', TensorProto.INT8, [],
[0])
y = helper.make_tensor_value_info('y', TensorProto.INT8, [1, 3, 6, 6])
y_scale = helper.make_tensor('y_scale', TensorProto.FLOAT, [], [0.01])
y_zero_point = helper.make_tensor('y_zero_point', TensorProto.INT8, [],
[32])
node = onnx.helper.make_node(
'QLinearAveragePool',
inputs=['x', 'x_scale', 'x_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],
kernel_shape=[3, 3],
pads=[2, 2, 2, 2],
count_include_pad=1,
)
return ([node], [x], [y], [x_scale, x_zero_point, y_scale, y_zero_point])
@onnx_test()
def qlinearaveragepool_2d_same_lower_test():
x = helper.make_tensor_value_info('x', TensorProto.UINT8, [1, 3, 4, 4])
x_scale = helper.make_tensor('x_scale', TensorProto.FLOAT, [], [0.5])
x_zero_point = helper.make_tensor('x_zero_point', TensorProto.UINT8, [],
[0])
y = helper.make_tensor_value_info('y', TensorProto.UINT8, [1, 3, 4, 4])
y_scale = helper.make_tensor('y_scale', TensorProto.FLOAT, [], [0.5])
y_zero_point = helper.make_tensor('y_zero_point', TensorProto.UINT8, [],
[0])
node = onnx.helper.make_node(
'QLinearAveragePool',
inputs=['x', 'x_scale', 'x_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],
kernel_shape=[2, 2],
auto_pad="SAME_LOWER",
)
return ([node], [x], [y], [x_scale, x_zero_point, y_scale, y_zero_point])
@onnx_test()
def qlinearaveragepool_2d_same_upper_test():
x = helper.make_tensor_value_info('x', TensorProto.INT8, [1, 3, 4, 4])
x_scale = helper.make_tensor('x_scale', TensorProto.FLOAT, [], [0.5])
x_zero_point = helper.make_tensor('x_zero_point', TensorProto.INT8, [],
[32])
y = helper.make_tensor_value_info('y', TensorProto.INT8, [1, 3, 4, 4])
y_scale = helper.make_tensor('y_scale', TensorProto.FLOAT, [], [0.25])
y_zero_point = helper.make_tensor('y_zero_point', TensorProto.INT8, [],
[0])
node = onnx.helper.make_node(
'QLinearAveragePool',
inputs=['x', 'x_scale', 'x_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],
kernel_shape=[2, 2],
auto_pad="SAME_UPPER",
)
return ([node], [x], [y], [x_scale, x_zero_point, y_scale, y_zero_point])
@onnx_test()
def qlinearaveragepool_2d_strides_test():
x = helper.make_tensor_value_info('x', TensorProto.INT8, [1, 3, 8, 8])
x_scale = helper.make_tensor('x_scale', TensorProto.FLOAT, [], [0.05])
x_zero_point = helper.make_tensor('x_zero_point', TensorProto.INT8, [],
[0])
y = helper.make_tensor_value_info('y', TensorProto.INT8, [1, 3, 2, 2])
y_scale = helper.make_tensor('y_scale', TensorProto.FLOAT, [], [0.05])
y_zero_point = helper.make_tensor('y_zero_point', TensorProto.INT8, [],
[8])
node = onnx.helper.make_node(
'QLinearAveragePool',
inputs=['x', 'x_scale', 'x_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],
kernel_shape=[5, 5],
strides=[2, 2],
)
return ([node], [x], [y], [x_scale, x_zero_point, y_scale, y_zero_point])
@onnx_test()
def qlinearaveragepool_3d_test():
x = helper.make_tensor_value_info('x', TensorProto.INT8, [1, 3, 3, 3, 3])
x_scale = helper.make_tensor('x_scale', TensorProto.FLOAT, [], [0.05])
x_zero_point = helper.make_tensor('x_zero_point', TensorProto.INT8, [],
[0])
y = helper.make_tensor_value_info('y', TensorProto.INT8, [1, 3, 2, 2, 2])
y_scale = helper.make_tensor('y_scale', TensorProto.FLOAT, [], [0.02])
y_zero_point = helper.make_tensor('y_zero_point', TensorProto.INT8, [],
[0])
node = onnx.helper.make_node(
'QLinearAveragePool',
inputs=['x', 'x_scale', 'x_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],
kernel_shape=[2, 2, 2],
)
return ([node], [x], [y], [x_scale, x_zero_point, y_scale, y_zero_point])
@onnx_test()
def qlinearaveragepool_notset_test():
x = helper.make_tensor_value_info('x', TensorProto.INT8, [1, 1, 5, 5])
x_scale = helper.make_tensor('x_scale', TensorProto.FLOAT, [], [0.5])
x_zero_point = helper.make_tensor('x_zero_point', TensorProto.INT8, [],
[0])
y = helper.make_tensor_value_info('y', TensorProto.INT8, [1, 1, 1, 1])
y_scale = helper.make_tensor('y_scale', TensorProto.FLOAT, [], [0.5])
y_zero_point = helper.make_tensor('y_zero_point', TensorProto.INT8, [],
[10])
node = onnx.helper.make_node(
'QLinearAveragePool',
inputs=['x', 'x_scale', 'x_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],
kernel_shape=[6, 6],
strides=[2, 2],
pads=[0, 0, 1, 1],
channels_last=0,
auto_pad='NOTSET')
return ([node], [x], [y], [x_scale, x_zero_point, y_scale, y_zero_point])
@onnx_test()
def qlinearaveragepool_nt_cip_test():
x = helper.make_tensor_value_info('x', TensorProto.UINT8, [1, 1, 5, 5])
x_scale = helper.make_tensor('x_scale', TensorProto.FLOAT, [], [0.5])
x_zero_point = helper.make_tensor('x_zero_point', TensorProto.UINT8, [],
[0])
y = helper.make_tensor_value_info('y', TensorProto.UINT8, [1, 1, 1, 1])
y_scale = helper.make_tensor('y_scale', TensorProto.FLOAT, [], [0.5])
y_zero_point = helper.make_tensor('y_zero_point', TensorProto.UINT8, [],
[10])
node = onnx.helper.make_node(
'QLinearAveragePool',
inputs=['x', 'x_scale', 'x_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],
kernel_shape=[6, 6],
strides=[2, 2],
pads=[0, 0, 1, 1],
channels_last=0,
auto_pad='NOTSET',
count_include_pad=1)
return ([node], [x], [y], [x_scale, x_zero_point, y_scale, y_zero_point])
@onnx_test()
def qlinearconv_test():
# https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__QLinearConv.html
......@@ -5923,6 +6383,26 @@ def qlinearglobalavgpool_test():
return ([n], [x], [y], [sc_x, z_pt_x, sc_y, z_pt_y])
@onnx_test()
def qlinearleakyrelu_test():
x = helper.make_tensor_value_info('X', TensorProto.INT8, [64])
sc_x = helper.make_tensor('X_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_x = helper.make_tensor('X_zero_point', TensorProto.INT8, [], [0])
sc_y = helper.make_tensor('Y_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_y = helper.make_tensor('Y_zero_point', TensorProto.INT8, [], [10])
y = helper.make_tensor_value_info('Y', TensorProto.INT8, [64])
node = onnx.helper.make_node(
'QLinearLeakyRelu',
inputs=['X', 'X_scale', 'X_zero_point', 'Y_scale', 'Y_zero_point'],
outputs=['Y'],
alpha=1.1,
)
return ([node], [x], [y], [sc_x, zero_pt_x, sc_y, zero_pt_y])
def qlinearmatmul_1D_test():
a = helper.make_tensor_value_info('A', TensorProto.UINT8, [8])
sc_a = helper.make_tensor('A_scale', TensorProto.FLOAT, [], [0.05])
......@@ -6008,6 +6488,81 @@ def qlinearmatmul_3D_test():
[sc_a, zero_pt_a, sc_b, zero_pt_b, sc_c, zero_pt_c])
@onnx_test()
def qlinearmul_test():
a = helper.make_tensor_value_info('A', TensorProto.UINT8, [64])
sc_a = helper.make_tensor('A_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_a = helper.make_tensor('A_zero_point', TensorProto.UINT8, [], [0])
b = helper.make_tensor_value_info('B', TensorProto.UINT8, [64])
sc_b = helper.make_tensor('B_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_b = helper.make_tensor('B_zero_point', TensorProto.UINT8, [], [16])
sc_c = helper.make_tensor('C_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_c = helper.make_tensor('C_zero_point', TensorProto.UINT8, [],
[100])
c = helper.make_tensor_value_info('C', TensorProto.UINT8, [64])
node = onnx.helper.make_node(
'QLinearMul',
inputs=[
'A', 'A_scale', 'A_zero_point', 'B', 'B_scale', 'B_zero_point',
'C_scale', 'C_zero_point'
],
outputs=['C'],
)
return ([node], [a, b], [c],
[sc_a, zero_pt_a, sc_b, zero_pt_b, sc_c, zero_pt_c])
@onnx_test()
def qlinearmul_bcast_test():
a = helper.make_tensor_value_info('A', TensorProto.INT8, [64])
sc_a = helper.make_tensor('A_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_a = helper.make_tensor('A_zero_point', TensorProto.INT8, [], [0])
b = helper.make_tensor_value_info('B', TensorProto.INT8, [1, 1, 64])
sc_b = helper.make_tensor('B_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_b = helper.make_tensor('B_zero_point', TensorProto.INT8, [], [128])
sc_c = helper.make_tensor('C_scale', TensorProto.FLOAT, [], [0.15])
zero_pt_c = helper.make_tensor('C_zero_point', TensorProto.INT8, [], [32])
c = helper.make_tensor_value_info('C', TensorProto.INT8, [1, 1, 64])
node = onnx.helper.make_node(
'QLinearMul',
inputs=[
'A', 'A_scale', 'A_zero_point', 'B', 'B_scale', 'B_zero_point',
'C_scale', 'C_zero_point'
],
outputs=['C'],
)
return ([node], [a, b], [c],
[sc_a, zero_pt_a, sc_b, zero_pt_b, sc_c, zero_pt_c])
@onnx_test()
def qlinearsigmoid_test():
x = helper.make_tensor_value_info('X', TensorProto.INT8, [64])
sc_x = helper.make_tensor('X_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_x = helper.make_tensor('X_zero_point', TensorProto.INT8, [], [0])
sc_y = helper.make_tensor('Y_scale', TensorProto.FLOAT, [], [0.0035])
zero_pt_y = helper.make_tensor('Y_zero_point', TensorProto.INT8, [],
[-128])
y = helper.make_tensor_value_info('Y', TensorProto.INT8, [64])
node = onnx.helper.make_node(
'QLinearSigmoid',
inputs=['X', 'X_scale', 'X_zero_point', 'Y_scale', 'Y_zero_point'],
outputs=['Y'],
)
return ([node], [x], [y], [sc_x, zero_pt_x, sc_y, zero_pt_y])
@onnx_test()
def quantizelinear_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [5])
......@@ -7157,8 +7712,7 @@ def scatter_none_test():
return ([node], [x, i, u], [y])
@onnx_test()
def scatternd_add_test():
def make_scatternd_test(reduction="none"):
data = helper.make_tensor_value_info('data', TensorProto.FLOAT, [2, 2, 2])
indices = helper.make_tensor_value_info('indices', TensorProto.INT64,
[2, 1, 2])
......@@ -7170,44 +7724,39 @@ def scatternd_add_test():
node = onnx.helper.make_node('ScatterND',
inputs=['data', 'indices', 'updates'],
outputs=['output'],
reduction="add")
reduction=reduction)
return ([node], [data, indices, updates], [output])
@onnx_test()
def scatternd_add_test():
return make_scatternd_test("add")
@onnx_test()
def scatternd_mul_test():
data = helper.make_tensor_value_info('data', TensorProto.FLOAT, [2, 2, 2])
indices = helper.make_tensor_value_info('indices', TensorProto.INT64,
[2, 1, 2])
updates = helper.make_tensor_value_info('updates', TensorProto.FLOAT,
[2, 1, 2])
output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
[2, 2, 2])
return make_scatternd_test("mul")
node = onnx.helper.make_node('ScatterND',
inputs=['data', 'indices', 'updates'],
outputs=['output'],
reduction="mul")
return ([node], [data, indices, updates], [output])
@onnx_test()
def scatternd_max_test():
return make_scatternd_test("max")
@onnx_test()
def scatternd_min_test():
return make_scatternd_test("min")
@onnx_test()
def scatternd_test():
data = helper.make_tensor_value_info('data', TensorProto.FLOAT, [2, 2, 2])
indices = helper.make_tensor_value_info('indices', TensorProto.INT64,
[2, 1, 2])
updates = helper.make_tensor_value_info('updates', TensorProto.FLOAT,
[2, 1, 2])
output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
[2, 2, 2])
return make_scatternd_test()
node = onnx.helper.make_node('ScatterND',
inputs=['data', 'indices', 'updates'],
outputs=['output'])
return ([node], [data, indices, updates], [output])
@onnx_test()
def scatternd_invalid_reduction_test():
return make_scatternd_test("invalid")
@onnx_test()
......
maxpool_dilate_test:
U
xy"MaxPool*
dilations@*
kernel_shape@*
pads@@*
strides@maxpool_dilate_testZ
x



b
y



B
\ No newline at end of file
......@@ -1092,6 +1092,115 @@ TEST_CASE(lstm_forward)
}
}
TEST_CASE(lstm_forward_layout)
{
std::size_t sl = 5; // sequence len
std::size_t bs = 3; // batch size
std::size_t hs = 20; // hidden size
std::size_t is = 10; // input size
std::size_t nd = 1; // num directions
float clip = 0.0f;
int input_forget = 1;
migraphx::shape seq_shape{migraphx::shape::float_type, {bs, sl, is}};
migraphx::shape w_shape{migraphx::shape::float_type, {nd, 4 * hs, is}};
migraphx::shape r_shape{migraphx::shape::float_type, {nd, 4 * hs, hs}};
migraphx::shape bias_shape{migraphx::shape::float_type, {nd, 8 * hs}};
migraphx::shape sl_shape{migraphx::shape::int32_type, {bs}};
migraphx::shape ih_shape{migraphx::shape::float_type, {bs, nd, hs}};
migraphx::shape pph_shape{migraphx::shape::float_type, {nd, 3 * hs}};
// 8 args, hs and last output
{
migraphx::program p;
auto* mm = p.get_main_module();
auto seq = mm->add_parameter("seq", seq_shape);
auto w = mm->add_parameter("w", w_shape);
auto r = mm->add_parameter("r", r_shape);
auto bias = mm->add_parameter("bias", bias_shape);
auto seq_len = mm->add_parameter("seq_len", sl_shape);
auto ih = mm->add_parameter("h0", ih_shape);
auto ic = mm->add_parameter("c0", ih_shape);
auto pph = mm->add_parameter("pph", pph_shape);
std::vector<int64_t> perm{1, 0, 2};
seq = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), seq);
ih = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), ih);
ic = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), ic);
auto out_hs = mm->add_instruction(
migraphx::make_op(
"lstm",
{{"hidden_size", hs},
{"actv_func",
migraphx::to_value(std::vector<migraphx::operation>{migraphx::make_op("sigmoid"),
migraphx::make_op("tanh"),
migraphx::make_op("tanh")})},
{"direction", migraphx::to_value(migraphx::op::rnn_direction::forward)},
{"clip", clip},
{"input_forget", input_forget}}),
seq,
w,
r,
bias,
seq_len,
ih,
ic,
pph);
auto last_output = mm->add_instruction(migraphx::make_op("rnn_last_hs_output"), out_hs);
std::vector<int64_t> perm_hid{2, 0, 1, 3};
out_hs = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm_hid}}),
out_hs);
mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), last_output);
auto prog = optimize_onnx("lstm_f_layout_hs_test.onnx");
EXPECT(p == prog);
}
// 8 args, cell output
{
migraphx::program p;
auto* mm = p.get_main_module();
auto seq = mm->add_parameter("seq", seq_shape);
auto w = mm->add_parameter("w", w_shape);
auto r = mm->add_parameter("r", r_shape);
auto bias = mm->add_parameter("bias", bias_shape);
auto seq_len = mm->add_parameter("seq_len", sl_shape);
auto ih = mm->add_parameter("h0", ih_shape);
auto ic = mm->add_parameter("c0", ih_shape);
auto pph = mm->add_parameter("pph", pph_shape);
std::vector<int64_t> perm{1, 0, 2};
seq = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), seq);
ih = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), ih);
ic = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), ic);
auto out_hs = mm->add_instruction(
migraphx::make_op(
"lstm",
{{"hidden_size", hs},
{"actv_func",
migraphx::to_value(std::vector<migraphx::operation>{migraphx::make_op("sigmoid"),
migraphx::make_op("tanh"),
migraphx::make_op("tanh")})},
{"direction", migraphx::to_value(migraphx::op::rnn_direction::forward)},
{"clip", clip},
{"input_forget", input_forget}}),
seq,
w,
r,
bias,
seq_len,
ih,
ic,
pph);
auto last_cell = mm->add_instruction(migraphx::make_op("rnn_last_cell_output"), out_hs);
mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), last_cell);
auto prog = optimize_onnx("lstm_f_layout_cell_test.onnx");
EXPECT(p == prog);
}
}
// activation functions
TEST_CASE(lstm_forward_actv_func)
{
......@@ -1342,6 +1451,117 @@ TEST_CASE(lstm_reverse)
}
}
TEST_CASE(lstm_reverse_layout)
{
std::size_t sl = 5; // sequence len
std::size_t bs = 3; // batch size
std::size_t hs = 20; // hidden size
std::size_t is = 10; // input size
std::size_t nd = 1; // num directions
float clip = 0.0f;
int input_forget = 1;
migraphx::shape seq_shape{migraphx::shape::float_type, {bs, sl, is}};
migraphx::shape w_shape{migraphx::shape::float_type, {nd, 4 * hs, is}};
migraphx::shape r_shape{migraphx::shape::float_type, {nd, 4 * hs, hs}};
migraphx::shape bias_shape{migraphx::shape::float_type, {nd, 8 * hs}};
migraphx::shape sl_shape{migraphx::shape::int32_type, {bs}};
migraphx::shape ih_shape{migraphx::shape::float_type, {bs, nd, hs}};
migraphx::shape pph_shape{migraphx::shape::float_type, {nd, 3 * hs}};
// 8 args, hs output
{
migraphx::program p;
auto* mm = p.get_main_module();
auto seq = mm->add_parameter("seq", seq_shape);
auto w = mm->add_parameter("w", w_shape);
auto r = mm->add_parameter("r", r_shape);
auto bias = mm->add_parameter("bias", bias_shape);
auto seq_len = mm->add_parameter("seq_len", sl_shape);
auto ih = mm->add_parameter("h0", ih_shape);
auto ic = mm->add_parameter("c0", ih_shape);
auto pph = mm->add_parameter("pph", pph_shape);
std::vector<int64_t> perm{1, 0, 2};
seq = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), seq);
ih = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), ih);
ic = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), ic);
auto out_hs = mm->add_instruction(
migraphx::make_op(
"lstm",
{{"hidden_size", hs},
{"actv_func",
migraphx::to_value(std::vector<migraphx::operation>{migraphx::make_op("sigmoid"),
migraphx::make_op("tanh"),
migraphx::make_op("tanh")})},
{"direction", migraphx::to_value(migraphx::op::rnn_direction::reverse)},
{"clip", clip},
{"input_forget", input_forget}}),
seq,
w,
r,
bias,
seq_len,
ih,
ic,
pph);
std::vector<int64_t> perm_hid{2, 0, 1, 3};
out_hs = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm_hid}}),
out_hs);
auto prog = optimize_onnx("lstm_r_layout_test.onnx");
EXPECT(p == prog);
}
// 8 args, last and cell output
{
migraphx::program p;
auto* mm = p.get_main_module();
auto seq = mm->add_parameter("seq", seq_shape);
auto w = mm->add_parameter("w", w_shape);
auto r = mm->add_parameter("r", r_shape);
auto bias = mm->add_parameter("bias", bias_shape);
auto seq_len = mm->add_parameter("seq_len", sl_shape);
auto ih = mm->add_parameter("h0", ih_shape);
auto ic = mm->add_parameter("c0", ih_shape);
auto pph = mm->add_parameter("pph", pph_shape);
std::vector<int64_t> perm{1, 0, 2};
seq = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), seq);
ih = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), ih);
ic = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), ic);
auto out_hs = mm->add_instruction(
migraphx::make_op(
"lstm",
{{"hidden_size", hs},
{"actv_func",
migraphx::to_value(std::vector<migraphx::operation>{migraphx::make_op("sigmoid"),
migraphx::make_op("tanh"),
migraphx::make_op("tanh")})},
{"direction", migraphx::to_value(migraphx::op::rnn_direction::reverse)},
{"clip", clip},
{"input_forget", input_forget}}),
seq,
w,
r,
bias,
seq_len,
ih,
ic,
pph);
auto last_output = mm->add_instruction(migraphx::make_op("rnn_last_hs_output"), out_hs);
auto last_cell = mm->add_instruction(migraphx::make_op("rnn_last_cell_output"), out_hs);
last_output = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}),
last_output);
mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), last_cell);
auto prog = optimize_onnx("lstm_r_layout_hs_cell_test.onnx");
EXPECT(p == prog);
}
}
TEST_CASE(lstm_bidirectional)
{
std::size_t sl = 5; // sequence len
......@@ -1594,6 +1814,118 @@ TEST_CASE(lstm_bidirectional)
}
}
TEST_CASE(lstm_bidirectional_layout)
{
std::size_t sl = 5; // sequence len
std::size_t bs = 3; // batch size
std::size_t hs = 20; // hidden size
std::size_t is = 10; // input size
std::size_t nd = 2; // num directions
float clip = 0.0f;
int input_forget = 1;
migraphx::shape seq_shape{migraphx::shape::float_type, {bs, sl, is}};
migraphx::shape w_shape{migraphx::shape::float_type, {nd, 4 * hs, is}};
migraphx::shape r_shape{migraphx::shape::float_type, {nd, 4 * hs, hs}};
migraphx::shape bias_shape{migraphx::shape::float_type, {nd, 8 * hs}};
migraphx::shape sl_shape{migraphx::shape::int32_type, {bs}};
migraphx::shape ih_shape{migraphx::shape::float_type, {bs, nd, hs}};
migraphx::shape pph_shape{migraphx::shape::float_type, {nd, 3 * hs}};
// 0 activation function
{
migraphx::program p;
auto* mm = p.get_main_module();
auto seq = mm->add_parameter("seq", seq_shape);
auto w = mm->add_parameter("w", w_shape);
auto r = mm->add_parameter("r", r_shape);
auto bias = mm->add_parameter("bias", bias_shape);
auto seq_len = mm->add_parameter("seq_len", sl_shape);
auto ih = mm->add_parameter("h0", ih_shape);
auto ic = mm->add_parameter("c0", ih_shape);
auto pph = mm->add_parameter("pph", pph_shape);
std::vector<int64_t> perm{1, 0, 2};
seq = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), seq);
ih = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), ih);
ic = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), ic);
auto out_hs = mm->add_instruction(
migraphx::make_op(
"lstm",
{{"hidden_size", hs},
{"actv_func",
migraphx::to_value(std::vector<migraphx::operation>{migraphx::make_op("sigmoid"),
migraphx::make_op("tanh"),
migraphx::make_op("tanh"),
migraphx::make_op("sigmoid"),
migraphx::make_op("tanh"),
migraphx::make_op("tanh")})},
{"direction", migraphx::to_value(migraphx::op::rnn_direction::bidirectional)},
{"clip", clip},
{"input_forget", input_forget}}),
seq,
w,
r,
bias,
seq_len,
ih,
ic,
pph);
auto last_output = mm->add_instruction(migraphx::make_op("rnn_last_hs_output"), out_hs);
std::vector<int64_t> perm_hid{2, 0, 1, 3};
out_hs = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm_hid}}),
out_hs);
mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), last_output);
auto prog = optimize_onnx("lstm_bi_layout_last_test.onnx");
EXPECT(p == prog);
}
{
migraphx::program p;
auto* mm = p.get_main_module();
auto seq = mm->add_parameter("seq", seq_shape);
auto w = mm->add_parameter("w", w_shape);
auto r = mm->add_parameter("r", r_shape);
auto bias = mm->add_parameter("bias", bias_shape);
auto seq_len = mm->add_parameter("seq_len", sl_shape);
auto ih = mm->add_parameter("h0", ih_shape);
auto ic = mm->add_parameter("c0", ih_shape);
auto pph = mm->add_parameter("pph", pph_shape);
std::vector<int64_t> perm{1, 0, 2};
seq = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), seq);
ih = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), ih);
ic = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), ic);
auto out_hs = mm->add_instruction(
migraphx::make_op(
"lstm",
{{"hidden_size", hs},
{"actv_func",
migraphx::to_value(std::vector<migraphx::operation>{migraphx::make_op("sigmoid"),
migraphx::make_op("tanh"),
migraphx::make_op("tanh"),
migraphx::make_op("sigmoid"),
migraphx::make_op("tanh"),
migraphx::make_op("tanh")})},
{"direction", migraphx::to_value(migraphx::op::rnn_direction::bidirectional)},
{"clip", clip},
{"input_forget", input_forget}}),
seq,
w,
r,
bias,
seq_len,
ih,
ic,
pph);
auto last_cell = mm->add_instruction(migraphx::make_op("rnn_last_cell_output"), out_hs);
mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), last_cell);
auto prog = optimize_onnx("lstm_bi_layout_cell_test.onnx");
EXPECT(p == prog);
}
}
TEST_CASE(lstm_bi_actv_funcs)
{
std::size_t sl = 5; // sequence len
......
......@@ -296,13 +296,32 @@ TEST_CASE(averagepool_1d_test)
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0}},
{"stride", {1}},
{"lengths", {3}}}),
{"lengths", {3}},
{"dilations", {1}}}),
l0);
auto prog = optimize_onnx("averagepool_1d_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(averagepool_dilate_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 4, 3}});
mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {1, 1}},
{"stride", {1}},
{"lengths", {2}},
{"dilations", {3}}}),
input);
auto prog = optimize_onnx("averagepool_dilate_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(averagepool_3d_test)
{
migraphx::program p;
......@@ -312,7 +331,8 @@ TEST_CASE(averagepool_3d_test)
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0, 0, 0, 0, 0}},
{"stride", {1, 1, 1}},
{"lengths", {3, 3, 3}}}),
{"lengths", {3, 3, 3}},
{"dilations", {1, 1, 1}}}),
l0);
auto prog = optimize_onnx("averagepool_3d_test.onnx");
......@@ -332,6 +352,7 @@ TEST_CASE(averagepool_dyn_test)
{"mode", migraphx::op::pooling_mode::average},
{"stride", {2, 2, 2}},
{"lengths", {3, 3, 3}},
{"dilations", {1, 1, 1}},
{"padding", {1, 1, 1, 1, 1, 1}},
{"padding_mode", 0},
}),
......@@ -357,6 +378,7 @@ TEST_CASE(averagepool_dyn_autopad_test)
{"mode", migraphx::op::pooling_mode::average},
{"stride", {2, 2, 2}},
{"lengths", {3, 3, 3}},
{"dilations", {1, 1, 1}},
{"padding", {0, 0, 0, 0, 0, 0}},
{"padding_mode", migraphx::op::padding_mode_t::same_upper},
}),
......@@ -394,7 +416,8 @@ TEST_CASE(averagepool_notset_test)
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {2, 2, 2, 2}},
{"stride", {2, 2}},
{"lengths", {6, 6}}}),
{"lengths", {6, 6}},
{"dilations", {1, 1}}}),
input);
auto ret = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {1, 1}}, {"ends", {2, 2}}}), ins);
......@@ -415,7 +438,8 @@ TEST_CASE(averagepool_nt_cip_test)
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0, 0, 0}},
{"stride", {2, 2}},
{"lengths", {6, 6}}}),
{"lengths", {6, 6}},
{"dilations", {1, 1}}}),
ins_pad);
mm->add_return({ret});
......@@ -437,6 +461,7 @@ TEST_CASE(averagepool_same_lower_test)
{"padding", {1, 1, 1, 1}},
{"stride", {1, 1}},
{"lengths", {2, 2}},
{"dilations", {1, 1}},
{"padding_mode", migraphx::op::padding_mode_t::default_},
}),
input);
......@@ -459,7 +484,8 @@ TEST_CASE(averagepool_sl_cip_test)
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0, 0, 0}},
{"stride", {1, 1}},
{"lengths", {2, 2}}}),
{"lengths", {2, 2}},
{"dilations", {1, 1}}}),
ins_pad);
mm->add_return({ret});
auto prog = migraphx::parse_onnx("averagepool_sl_cip_test.onnx");
......@@ -476,7 +502,8 @@ TEST_CASE(averagepool_same_upper_test)
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {1, 1, 1, 1}},
{"stride", {1, 1}},
{"lengths", {2, 2}}}),
{"lengths", {2, 2}},
{"dilations", {1, 1}}}),
input);
auto ret = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {1, 1}}, {"ends", {6, 6}}}), ins);
......@@ -1307,7 +1334,8 @@ TEST_CASE(conv_bn_relu_maxpool_test)
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0, 0, 0}},
{"stride", {2, 2}},
{"lengths", {2, 2}}}),
{"lengths", {2, 2}},
{"dilations", {1, 1}}}),
l7);
auto prog = optimize_onnx("conv_bn_relu_maxpool_test.onnx");
......@@ -1505,7 +1533,8 @@ TEST_CASE(conv_relu_maxpool_test)
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0, 0, 0}},
{"stride", {2, 2}},
{"lengths", {2, 2}}}),
{"lengths", {2, 2}},
{"dilations", {1, 1}}}),
l6);
auto prog = optimize_onnx("conv_relu_maxpool_test.onnx");
......@@ -1530,7 +1559,8 @@ TEST_CASE(conv_relu_maxpool_x2_test)
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0, 0, 0}},
{"stride", {2, 2}},
{"lengths", {2, 2}}}),
{"lengths", {2, 2}},
{"dilations", {1, 1}}}),
l6);
auto l8 = mm->add_parameter("3", {migraphx::shape::float_type, {1, 5, 5, 5}});
......@@ -1546,7 +1576,8 @@ TEST_CASE(conv_relu_maxpool_x2_test)
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0, 0, 0}},
{"stride", {2, 2}},
{"lengths", {2, 2}}}),
{"lengths", {2, 2}},
{"dilations", {1, 1}}}),
l13);
auto prog = optimize_onnx("conv_relu_maxpool_x2_test.onnx");
......@@ -4245,6 +4276,7 @@ TEST_CASE(lppool_l1_test)
{"padding", {0, 0}},
{"stride", {1}},
{"lengths", {3}},
{"dilations", {1}},
{"lp_order", 1}}),
l0);
auto prog = optimize_onnx("lppool_l1_test.onnx");
......@@ -4261,6 +4293,7 @@ TEST_CASE(lppool_l2_test)
{"padding", {0, 0}},
{"stride", {1}},
{"lengths", {3}},
{"dilations", {1}},
{"lp_order", 2}}),
l0);
auto prog = optimize_onnx("lppool_l2_test.onnx");
......@@ -4513,7 +4546,8 @@ TEST_CASE(maxpool_notset_test)
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0, 1, 1}},
{"stride", {2, 2}},
{"lengths", {6, 6}}}),
{"lengths", {6, 6}},
{"dilations", {1, 1}}}),
input);
auto prog = optimize_onnx("maxpool_notset_test.onnx");
......@@ -4521,6 +4555,24 @@ TEST_CASE(maxpool_notset_test)
EXPECT(p == prog);
}
TEST_CASE(maxpool_dilate_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 4, 3}});
mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {1, 1}},
{"stride", {1}},
{"lengths", {2}},
{"dilations", {3}}}),
input);
auto prog = optimize_onnx("maxpool_dilate_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(maxpool_same_upper_test)
{
migraphx::program p;
......@@ -4530,7 +4582,8 @@ TEST_CASE(maxpool_same_upper_test)
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0, 1, 1}},
{"stride", {1, 1}},
{"lengths", {2, 2}}}),
{"lengths", {2, 2}},
{"dilations", {1, 1}}}),
input);
auto prog = optimize_onnx("maxpool_same_upper_test.onnx");
......@@ -5542,6 +5595,54 @@ TEST_CASE(qlinearadd_test)
EXPECT(p.sort() == prog.sort());
}
TEST_CASE(qlinearaveragepool_notset_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto sc_x = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto z_pt_x = mm->add_literal(migraphx::literal{migraphx::shape::int8_type, {0}});
auto sc_y = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto z_pt_y = mm->add_literal(migraphx::literal{migraphx::shape::int8_type, {10}});
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::int8_type, {1, 1, 5, 5}});
auto scale_x_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 5, 5}}}), sc_x);
auto z_pt_x_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 5, 5}}}), z_pt_x);
auto fp_x =
mm->add_instruction(migraphx::make_op("dequantizelinear"), x, scale_x_bcast, z_pt_x_bcast);
auto fp_y =
mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {2, 2, 2, 2}},
{"stride", {2, 2}},
{"lengths", {6, 6}}}),
fp_x);
fp_y = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {1, 1}}, {"ends", {2, 2}}}), fp_y);
auto scale_y_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 1, 1}}}), sc_y);
auto z_pt_y_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 1, 1}}}), z_pt_y);
auto y =
mm->add_instruction(migraphx::make_op("quantizelinear"), fp_y, scale_y_bcast, z_pt_y_bcast);
mm->add_return({y});
auto prog = migraphx::parse_onnx("qlinearaveragepool_notset_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(qlinearconv_test)
{
migraphx::program p;
......@@ -5642,6 +5743,46 @@ TEST_CASE(qlinearglobalavgpool_test)
EXPECT(p.sort() == prog.sort());
}
TEST_CASE(qlinearleakyrelu_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("X", {migraphx::shape::int8_type, {64}});
auto sc_x = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.05}});
auto z_pt_x = mm->add_literal(migraphx::literal{migraphx::shape::int8_type, {0}});
auto sc_y = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.05}});
auto z_pt_y = mm->add_literal(migraphx::literal{migraphx::shape::int8_type, {10}});
auto scale_x_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), sc_x);
auto z_pt_x_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), z_pt_x);
auto fp_x =
mm->add_instruction(migraphx::make_op("dequantizelinear"), x, scale_x_bcast, z_pt_x_bcast);
auto fp_y = mm->add_instruction(migraphx::make_op("leaky_relu", {{"alpha", 1.1}}), fp_x);
auto scale_y_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), sc_y);
auto z_pt_y_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), z_pt_y);
auto y =
mm->add_instruction(migraphx::make_op("quantizelinear"), fp_y, scale_y_bcast, z_pt_y_bcast);
mm->add_return({y});
auto prog = migraphx::parse_onnx("qlinearleakyrelu_test.onnx");
EXPECT(p.sort() == prog.sort());
}
TEST_CASE(qlinearmatmul_1D_test)
{
migraphx::program p;
......@@ -5754,6 +5895,99 @@ TEST_CASE(qlinearmatmul_2D_test)
EXPECT(p.sort() == prog.sort());
}
TEST_CASE(qlinearmul_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto a = mm->add_parameter("A", {migraphx::shape::uint8_type, {64}});
auto b = mm->add_parameter("B", {migraphx::shape::uint8_type, {64}});
auto sc_a = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.05}});
auto z_pt_a = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {0}});
auto sc_b = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.05}});
auto z_pt_b = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {16}});
auto sc_c = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.05}});
auto z_pt_c = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {100}});
auto scale_a_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), sc_a);
auto z_pt_a_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), z_pt_a);
auto fp_a =
mm->add_instruction(migraphx::make_op("dequantizelinear"), a, scale_a_bcast, z_pt_a_bcast);
auto scale_b_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), sc_b);
auto z_pt_b_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), z_pt_b);
auto fp_b =
mm->add_instruction(migraphx::make_op("dequantizelinear"), b, scale_b_bcast, z_pt_b_bcast);
auto fp_c = mm->add_instruction(migraphx::make_op("mul"), fp_a, fp_b);
auto scale_c_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), sc_c);
auto z_pt_c_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), z_pt_c);
auto c =
mm->add_instruction(migraphx::make_op("quantizelinear"), fp_c, scale_c_bcast, z_pt_c_bcast);
mm->add_return({c});
auto prog = migraphx::parse_onnx("qlinearmul_test.onnx");
EXPECT(p.sort() == prog.sort());
}
TEST_CASE(qlinearsigmoid_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("X", {migraphx::shape::int8_type, {64}});
auto sc_x = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.05}});
auto z_pt_x = mm->add_literal(migraphx::literal{migraphx::shape::int8_type, {0}});
auto sc_y = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.0035}});
auto z_pt_y = mm->add_literal(migraphx::literal{migraphx::shape::int8_type, {-128}});
auto scale_x_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), sc_x);
auto z_pt_x_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), z_pt_x);
auto fp_x =
mm->add_instruction(migraphx::make_op("dequantizelinear"), x, scale_x_bcast, z_pt_x_bcast);
auto fp_y = mm->add_instruction(migraphx::make_op("sigmoid"), fp_x);
auto scale_y_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), sc_y);
auto z_pt_y_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), z_pt_y);
auto y =
mm->add_instruction(migraphx::make_op("quantizelinear"), fp_y, scale_y_bcast, z_pt_y_bcast);
mm->add_return({y});
auto prog = migraphx::parse_onnx("qlinearsigmoid_test.onnx");
EXPECT(p.sort() == prog.sort());
}
migraphx::instruction_ref insert_quantizelinear_clip(migraphx::module& m,
const migraphx::instruction_ref ins,
const migraphx::instruction_ref round,
......@@ -7041,20 +7275,35 @@ TEST_CASE(scatter_none_test)
EXPECT(p == prog);
}
TEST_CASE(scatternd_test)
void scatternd_test_base(const std::string& reduction, const std::string& onnx_file)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("data", migraphx::shape{migraphx::shape::float_type, {2, 2, 2}});
auto l1 = mm->add_parameter("indices", migraphx::shape{migraphx::shape::int64_type, {2, 1, 2}});
auto l2 = mm->add_parameter("updates", migraphx::shape{migraphx::shape::float_type, {2, 1, 2}});
auto r = mm->add_instruction(migraphx::make_op("scatternd_none"), l0, l1, l2);
auto r = mm->add_instruction(migraphx::make_op("scatternd_" + reduction), l0, l1, l2);
mm->add_return({r});
auto prog = migraphx::parse_onnx("scatternd_test.onnx");
auto prog = migraphx::parse_onnx(onnx_file);
EXPECT(p == prog);
}
TEST_CASE(scatternd_test) { scatternd_test_base("none", "scatternd_test.onnx"); }
TEST_CASE(scatternd_add_test) { scatternd_test_base("add", "scatternd_add_test.onnx"); }
TEST_CASE(scatternd_mul_test) { scatternd_test_base("mul", "scatternd_mul_test.onnx"); }
TEST_CASE(scatternd_max_test) { scatternd_test_base("max", "scatternd_max_test.onnx"); }
TEST_CASE(scatternd_min_test) { scatternd_test_base("min", "scatternd_min_test.onnx"); }
TEST_CASE(scatternd_invalid_reduction_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("scatternd_invalid_reduction_test.onnx"); }));
}
TEST_CASE(scatternd_dyn_test)
{
// dynamic input.
......@@ -7078,34 +7327,6 @@ TEST_CASE(scatternd_dyn_test)
EXPECT(p == prog);
}
TEST_CASE(scatternd_add_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("data", migraphx::shape{migraphx::shape::float_type, {2, 2, 2}});
auto l1 = mm->add_parameter("indices", migraphx::shape{migraphx::shape::int64_type, {2, 1, 2}});
auto l2 = mm->add_parameter("updates", migraphx::shape{migraphx::shape::float_type, {2, 1, 2}});
auto r = mm->add_instruction(migraphx::make_op("scatternd_add"), l0, l1, l2);
mm->add_return({r});
auto prog = migraphx::parse_onnx("scatternd_add_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(scatternd_mul_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("data", migraphx::shape{migraphx::shape::float_type, {2, 2, 2}});
auto l1 = mm->add_parameter("indices", migraphx::shape{migraphx::shape::int64_type, {2, 1, 2}});
auto l2 = mm->add_parameter("updates", migraphx::shape{migraphx::shape::float_type, {2, 1, 2}});
auto r = mm->add_instruction(migraphx::make_op("scatternd_mul"), l0, l1, l2);
mm->add_return({r});
auto prog = migraphx::parse_onnx("scatternd_mul_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(selu_test)
{
migraphx::program p;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment