Commit 203de1a4 authored by YdrMaster's avatar YdrMaster
Browse files

issue/152/feat: 添加 rearrange 算子测例


Signed-off-by: default avatarYdrMaster <ydrml@hotmail.com>
parent bf5062d5
...@@ -13,6 +13,7 @@ DECLARE_INFINIOP_TEST(rope) ...@@ -13,6 +13,7 @@ DECLARE_INFINIOP_TEST(rope)
DECLARE_INFINIOP_TEST(clip) DECLARE_INFINIOP_TEST(clip)
DECLARE_INFINIOP_TEST(swiglu) DECLARE_INFINIOP_TEST(swiglu)
DECLARE_INFINIOP_TEST(add) DECLARE_INFINIOP_TEST(add)
DECLARE_INFINIOP_TEST(rearrange)
#define REGISTER_INFINIOP_TEST(name) \ #define REGISTER_INFINIOP_TEST(name) \
{ \ { \
...@@ -37,6 +38,7 @@ DECLARE_INFINIOP_TEST(add) ...@@ -37,6 +38,7 @@ DECLARE_INFINIOP_TEST(add)
REGISTER_INFINIOP_TEST(swiglu) \ REGISTER_INFINIOP_TEST(swiglu) \
REGISTER_INFINIOP_TEST(rope) \ REGISTER_INFINIOP_TEST(rope) \
REGISTER_INFINIOP_TEST(rms_norm) \ REGISTER_INFINIOP_TEST(rms_norm) \
REGISTER_INFINIOP_TEST(rearrange) \
} }
namespace infiniop_test { namespace infiniop_test {
...@@ -44,6 +46,18 @@ namespace infiniop_test { ...@@ -44,6 +46,18 @@ namespace infiniop_test {
// Global variable for {op_name: builder} mappings // Global variable for {op_name: builder} mappings
extern std::unordered_map<std::string, const TestBuilder> TEST_BUILDERS; extern std::unordered_map<std::string, const TestBuilder> TEST_BUILDERS;
template <typename V>
bool check_names(
const std::unordered_map<std::string, V> &map,
const std::vector<std::string> &names) {
for (auto const &name : names) {
if (map.find(name) == map.end()) {
return false;
}
}
return true;
}
} // namespace infiniop_test } // namespace infiniop_test
#endif #endif
...@@ -60,6 +60,9 @@ std::shared_ptr<Result> runTest( ...@@ -60,6 +60,9 @@ std::shared_ptr<Result> runTest(
// Check if two tensors are close within given tolerance // Check if two tensors are close within given tolerance
void allClose(std::shared_ptr<Tensor> actual, std::shared_ptr<Tensor> expected, double rtol = 1e-3, double atol = 1e-3); void allClose(std::shared_ptr<Tensor> actual, std::shared_ptr<Tensor> expected, double rtol = 1e-3, double atol = 1e-3);
// Check if two tensors are equal
void allEqual(std::shared_ptr<Tensor> actual, std::shared_ptr<Tensor> expected);
// Helper function for benchmarking a function // Helper function for benchmarking a function
double benchmark(std::function<void()> func, size_t warmups, size_t iterations); double benchmark(std::function<void()> func, size_t warmups, size_t iterations);
} // namespace infiniop_test } // namespace infiniop_test
......
...@@ -28,4 +28,25 @@ inline double getVal(void *ptr, GGML_TYPE ggml_type) { ...@@ -28,4 +28,25 @@ inline double getVal(void *ptr, GGML_TYPE ggml_type) {
} }
} }
inline size_t ggmlSizeOf(GGML_TYPE ggml_type) {
switch (ggml_type) {
case GGML_TYPE_F16:
return sizeof(fp16_t);
case GGML_TYPE_F32:
return sizeof(float);
case GGML_TYPE_F64:
return sizeof(double);
case GGML_TYPE_I8:
return sizeof(int8_t);
case GGML_TYPE_I16:
return sizeof(int16_t);
case GGML_TYPE_I32:
return sizeof(int32_t);
case GGML_TYPE_I64:
return sizeof(int64_t);
default:
throw std::runtime_error("Unsupported data type");
}
}
#endif #endif
...@@ -21,12 +21,7 @@ std::shared_ptr<Test> Test::build( ...@@ -21,12 +21,7 @@ std::shared_ptr<Test> Test::build(
double rtol, double atol) { double rtol, double atol) {
auto test = std::shared_ptr<Test>(new Test(rtol, atol)); auto test = std::shared_ptr<Test>(new Test(rtol, atol));
test->_attributes = new Attributes(); test->_attributes = new Attributes();
if (attributes.find("alpha") == attributes.end() if (!check_names(attributes, Test::attribute_names()) || !check_names(tensors, Test::tensor_names())) {
|| attributes.find("beta") == attributes.end()
|| tensors.find("a") == tensors.end()
|| tensors.find("b") == tensors.end()
|| tensors.find("c") == tensors.end()
|| tensors.find("ans") == tensors.end()) {
throw std::runtime_error("Invalid Test"); throw std::runtime_error("Invalid Test");
} }
......
...@@ -23,14 +23,7 @@ std::shared_ptr<Test> Test::build( ...@@ -23,14 +23,7 @@ std::shared_ptr<Test> Test::build(
double rtol, double atol) { double rtol, double atol) {
auto test = std::shared_ptr<Test>(new Test(rtol, atol)); auto test = std::shared_ptr<Test>(new Test(rtol, atol));
test->_attributes = new Attributes(); test->_attributes = new Attributes();
if (attributes.find("random_val") == attributes.end() if (!check_names(attributes, Test::attribute_names()) || !check_names(tensors, Test::tensor_names())) {
|| attributes.find("topp") == attributes.end()
|| attributes.find("topk") == attributes.end()
|| attributes.find("voc") == attributes.end()
|| attributes.find("temperature") == attributes.end()
|| tensors.find("data") == tensors.end()
|| tensors.find("ans") == tensors.end()
|| tensors.find("result") == tensors.end()) {
throw std::runtime_error("Invalid Test"); throw std::runtime_error("Invalid Test");
} }
......
#include "ops.hpp"
#include "utils.hpp"
#include <infinirt.h>
#include <iomanip>
#include <iostream>
namespace infiniop_test::rearrange {
struct Test::Attributes {
std::shared_ptr<Tensor> dst, src, ans;
};
std::shared_ptr<Test> Test::build(
std::unordered_map<std::string, std::vector<uint8_t>> attributes,
std::unordered_map<std::string, std::shared_ptr<Tensor>> tensors,
double rtol, double atol) {
auto test = std::shared_ptr<Test>(new Test(rtol, atol));
test->_attributes = new Attributes();
if (!check_names(attributes, Test::attribute_names()) || !check_names(tensors, Test::tensor_names())) {
throw std::runtime_error("Invalid Test");
}
test->_attributes->dst = tensors["dst"];
test->_attributes->src = tensors["src"];
test->_attributes->ans = tensors["ans"];
return test;
}
std::shared_ptr<infiniop_test::Result> Test::run(
infiniopHandle_t handle,
infiniDevice_t device,
int device_id,
size_t warm_ups,
size_t iterations) {
infiniopGemmDescriptor_t op_desc;
auto dst = _attributes->dst->to(device, device_id);
auto src = _attributes->src->to(device, device_id);
CHECK_OR(infiniopCreateRearrangeDescriptor(
handle, &op_desc,
dst->desc(),
src->desc()),
return TEST_FAILED(OP_CREATION_FAILED, "Failed to create op descriptor."));
CHECK_OR(infiniopRearrange(
op_desc,
dst->data(),
src->data(),
nullptr),
return TEST_FAILED(OP_EXECUTION_FAILED, "Failed during execution."));
try {
allEqual(dst, _attributes->ans);
} catch (const std::exception &e) {
return TEST_FAILED(RESULT_INCORRECT, e.what());
}
double elapsed_time = 0.;
elapsed_time = benchmark(
[=]() {
infiniopRearrange(
op_desc,
dst->data(),
src->data(),
nullptr);
},
warm_ups, iterations);
return TEST_PASSED(elapsed_time);
}
std::vector<std::string> Test::attribute_names() {
return {};
}
std::vector<std::string> Test::tensor_names() {
return {"dst", "src", "ans"};
}
std::vector<std::string> Test::output_names() {
return {"dst"};
}
std::string Test::toString() const {
std::ostringstream oss;
oss << op_name() << std::endl
<< "- dst: " << _attributes->dst->info() << std::endl
<< "- src: " << _attributes->src->info() << std::endl;
return oss.str();
}
Test::~Test() {
delete _attributes;
}
} // namespace infiniop_test::rearrange
...@@ -173,6 +173,44 @@ void allClose(std::shared_ptr<Tensor> actual_, std::shared_ptr<Tensor> expected_ ...@@ -173,6 +173,44 @@ void allClose(std::shared_ptr<Tensor> actual_, std::shared_ptr<Tensor> expected_
} }
} }
void allEqual(std::shared_ptr<Tensor> actual_, std::shared_ptr<Tensor> expected_) {
auto actual = actual_->to(INFINI_DEVICE_CPU);
auto expected = expected_->to(INFINI_DEVICE_CPU);
auto ggml_type = actual->ggml_type();
auto shape = actual->shape();
if (ggml_type != expected->ggml_type()) {
throw std::runtime_error("Data type mismatch.");
}
if (shape != expected->shape()) {
throw std::runtime_error("Shape mismatch.");
}
auto ndim = shape.size();
size_t total = std::accumulate(shape.begin(), shape.end(), (size_t)1, std::multiplies<size_t>());
auto counter = std::vector<size_t>(ndim, 0);
ptrdiff_t actual_offset = 0,
expected_offset = 0;
size_t num_failed = 0;
std::string first_failed_msg;
for (size_t i = 0; i < total; i++) {
char *a_ = (char *)actual->data() + actual_offset,
*e_ = (char *)expected->data() + expected_offset;
if (std::memcmp(a_, e_, ggmlSizeOf(ggml_type))) {
if (num_failed == 0) {
first_failed_msg = "First failed at index " + std::to_string(i);
}
num_failed++;
}
incrementOffset(actual_offset, actual->strides(), ggmlTypeSize(actual->ggml_type()),
expected_offset, expected->strides(), ggmlTypeSize(expected->ggml_type()),
counter, shape);
}
if (num_failed > 0) {
throw std::runtime_error(std::to_string(num_failed) + " out of " + std::to_string(total) + " values failed. " + first_failed_msg);
}
}
double benchmark(std::function<void()> func, size_t warmups, size_t iterations) { double benchmark(std::function<void()> func, size_t warmups, size_t iterations) {
if (iterations == 0) { if (iterations == 0) {
return 0.0; return 0.0;
......
from ast import List
import numpy as np import numpy as np
import gguf
from typing import List
from .. import InfiniopTestWriter, InfiniopTestCase, np_dtype_to_ggml, gguf_strides from .. import InfiniopTestWriter, InfiniopTestCase, np_dtype_to_ggml
def random_tensor(voc, topk, dtype): def random_tensor(voc, topk, dtype):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment