Commit 337d6703 authored by fsx950223's avatar fsx950223
Browse files

merge updates

parents de43a6d8 27482328
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <iostream>
#include <vector>
#include <gtest/gtest.h>
#include "ck/ck.hpp"
#include "ck/library/utility/host_tensor.hpp"
using namespace ck;
TEST(HostTensorTranspose, TestBadArugment)
{
Tensor<float> tensor({13, 7});
EXPECT_THROW(tensor.Transpose({0}), std::runtime_error);
EXPECT_THROW(tensor.Transpose({0, 1, 2}), std::runtime_error);
}
TEST(HostTensorTranspose, Test2D)
{
std::vector<size_t> lengths = {13, 7};
std::vector<size_t> tlengths = {7, 13};
Tensor<float> tensor(lengths);
tensor(0, 0) = 0.f;
tensor(3, 4) = 34.f;
EXPECT_EQ(tensor.GetLengths(), lengths);
EXPECT_EQ(tensor(0, 0), 0.f);
EXPECT_EQ(tensor(3, 4), 34.f);
EXPECT_EQ(tensor(4, 3), 0.f);
EXPECT_EQ(tensor.Transpose().GetLengths(), tlengths);
EXPECT_EQ(tensor.Transpose()(0, 0), 0.f);
EXPECT_EQ(tensor.Transpose()(4, 3), 34.f);
EXPECT_EQ(tensor.Transpose()(3, 4), 0.f);
}
TEST(HostTensorTranspose, Test3D)
{
std::vector<size_t> lengths = {13, 7, 5};
std::vector<size_t> tlengths = {5, 7, 13};
Tensor<float> tensor(lengths);
tensor(0, 0, 0) = 0.f;
tensor(3, 4, 2) = 342.f;
EXPECT_EQ(tensor.GetLengths(), lengths);
EXPECT_EQ(tensor(0, 0, 0), 0.f);
EXPECT_EQ(tensor(3, 4, 2), 342.f);
EXPECT_EQ(tensor(4, 3, 2), 0.f);
EXPECT_EQ(tensor.Transpose().GetLengths(), tlengths);
EXPECT_EQ(tensor.Transpose()(0, 0, 0), 0.f);
EXPECT_EQ(tensor.Transpose()(2, 4, 3), 342.f);
EXPECT_EQ(tensor.Transpose()(2, 3, 4), 0.f);
}
TEST(HostTensorTranspose, Test3D_021)
{
std::vector<size_t> lengths = {13, 7, 5};
std::vector<size_t> tlengths = {13, 5, 7};
Tensor<float> tensor(lengths);
tensor(0, 0, 0) = 0.f;
tensor(3, 4, 2) = 342.f;
EXPECT_EQ(tensor.GetLengths(), lengths);
EXPECT_EQ(tensor(0, 0, 0), 0.f);
EXPECT_EQ(tensor(3, 4, 2), 342.f);
EXPECT_EQ(tensor(4, 3, 2), 0.f);
// transpose last two dimensions
EXPECT_EQ(tensor.Transpose({0, 2, 1}).GetLengths(), tlengths);
EXPECT_EQ(tensor.Transpose({0, 2, 1})(0, 0, 0), 0.f);
EXPECT_EQ(tensor.Transpose({0, 2, 1})(2, 4, 3), 0.f);
EXPECT_EQ(tensor.Transpose({0, 2, 1})(3, 2, 4), 342.f);
EXPECT_EQ(tensor.Transpose({0, 2, 1})(2, 3, 4), 0.f);
// transpose last two dimensions back again
EXPECT_EQ(tensor.Transpose({0, 2, 1}).Transpose({0, 2, 1}).GetLengths(), lengths);
EXPECT_EQ(tensor.Transpose({0, 2, 1}).Transpose({0, 2, 1})(3, 4, 2), 342.f);
}
TEST(HostTensorTranspose, TestNonpacked2D)
{
std::vector<size_t> lengths = {13, 7};
std::vector<size_t> strides = {100, 1};
std::vector<size_t> tlengths = {7, 13};
Tensor<float> tensor(lengths, strides);
tensor(0, 0) = 0.f;
tensor(3, 4) = 34.f;
EXPECT_EQ(tensor.GetLengths(), lengths);
EXPECT_EQ(tensor(0, 0), 0.f);
EXPECT_EQ(tensor(3, 4), 34.f);
EXPECT_EQ(tensor(4, 3), 0.f);
EXPECT_EQ(tensor.Transpose().GetLengths(), tlengths);
EXPECT_EQ(tensor.Transpose()(0, 0), 0.f);
EXPECT_EQ(tensor.Transpose()(4, 3), 34.f);
EXPECT_EQ(tensor.Transpose()(3, 4), 0.f);
}
......@@ -3,9 +3,12 @@ add_custom_target(test_softmax)
add_gtest_executable(test_softmax_rank3 test_softmax_rank3.cpp)
add_gtest_executable(test_softmax_rank4 test_softmax_rank4.cpp)
add_gtest_executable(test_softmax_interface test_softmax_interface.cpp)
add_gtest_executable(test_softmax_host_ref test_softmax_host_ref.cpp)
target_link_libraries(test_softmax_rank3 PRIVATE utility device_softmax_instance)
target_link_libraries(test_softmax_rank4 PRIVATE utility device_softmax_instance)
target_link_libraries(test_softmax_interface PRIVATE utility device_softmax_instance)
target_link_libraries(test_softmax_host_ref PRIVATE utility)
add_dependencies(test_softmax test_softmax_rank3)
add_dependencies(test_softmax test_softmax_rank4)
add_dependencies(test_softmax test_softmax_interface)
add_dependencies(test_softmax test_softmax_host_ref)
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include <vector>
#include "ck/library/utility/fill.hpp"
#include "ck/library/utility/check_err.hpp"
#include "ck/library/utility/host_tensor.hpp"
#include "ck/library/utility/host_tensor_generator.hpp"
#include "ck/library/reference_tensor_operation/cpu/reference_softmax.hpp"
#include "gtest/gtest.h"
using namespace ck;
TEST(ReferenceSoftmax, Run)
{
Tensor<float> x({2, 2});
Tensor<float> y({2, 2});
x.GenerateTensorValue(GeneratorTensor_Diagonal<float>{});
using ReferenceSoftmax = tensor_operation::host::ReferenceSoftmax<float, float, float>;
float alpha = 1.f;
float beta = 0.f;
auto ref_softmax = ReferenceSoftmax{};
auto ref_softmax_invoker = ref_softmax.MakeInvoker();
auto ref_softmax_argument = ref_softmax.MakeArgument(x, y, alpha, beta, {1});
ref_softmax_invoker.Run(ref_softmax_argument);
EXPECT_TRUE((utils::check_err(
y.mData, std::vector<float>{0.73105858f, 0.268941421f, 0.26894142f, 0.73105858f})));
}
TEST(ReferenceSoftmax, RunWithCalculatedStats)
{
// >>> x = np.eye(4)
// >>> m = np.max(np.exp(x), axis=1, keepdims=True)
// >>> l = np.sum(np.exp(x - np.tile(m, (1,4))), axis=1, keepdims=True)
// >>> m + np.log(l)
// array([[1.74366838],
// [1.74366838],
// [1.74366838],
// [1.74366838]])
Tensor<float> x({4, 4});
Tensor<float> y({4, 4});
Tensor<float> stats({4});
x.GenerateTensorValue(GeneratorTensor_Diagonal<float>{});
using ReferenceSoftmax = tensor_operation::host::ReferenceSoftmax<float, float, float>;
float alpha = 1.f;
float beta = 0.f;
auto ref_softmax = ReferenceSoftmax{};
auto ref_softmax_invoker = ref_softmax.MakeInvoker();
{
auto ref_softmax_argument = ref_softmax.MakeArgument(x, y, alpha, beta, {1}, &stats);
ref_softmax_invoker.Run(ref_softmax_argument);
EXPECT_TRUE((utils::check_err(
stats.mData, std::vector<float>{1.74366838f, 1.74366838f, 1.74366838f, 1.74366838f})));
}
{
Tensor<float> yy({4, 4});
auto ref_softmax_argument = ref_softmax.MakeArgument(x, yy, alpha, beta, {1}, &stats);
ref_softmax_invoker.RunWithPreCalcStats(ref_softmax_argument);
EXPECT_TRUE((utils::check_err(y.mData, yy.mData)));
}
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment