Commit f8dbf0fe authored by xgqdut2016's avatar xgqdut2016
Browse files

issue/103: 添加并修补matmul测例

parent 86c701a8
...@@ -70,7 +70,7 @@ std::shared_ptr<infiniop_test::Result> Test::run( ...@@ -70,7 +70,7 @@ std::shared_ptr<infiniop_test::Result> Test::run(
return TEST_FAILED(OP_EXECUTION_FAILED, "Failed during execution.")); return TEST_FAILED(OP_EXECUTION_FAILED, "Failed during execution."));
try { try {
allClose(c, _attributes->ans); allClose(c, _attributes->ans, _rtol, _atol);
} catch (const std::exception &e) { } catch (const std::exception &e) {
return TEST_FAILED(RESULT_INCORRECT, e.what()); return TEST_FAILED(RESULT_INCORRECT, e.what());
} }
......
...@@ -110,7 +110,7 @@ Tensor::Tensor(const GGUFTensorInfo *info, ...@@ -110,7 +110,7 @@ Tensor::Tensor(const GGUFTensorInfo *info,
if (i == 0) { if (i == 0) {
contiguous_strides[ndim - 1] = (ptrdiff_t)1; contiguous_strides[ndim - 1] = (ptrdiff_t)1;
} else { } else {
contiguous_strides[ndim - 1 - i] = (ptrdiff_t)_shape[ndim - i] * contiguous_strides[ndim - i]; contiguous_strides[ndim - 1 - i] = (ptrdiff_t)info->shape[i - 1] * contiguous_strides[ndim - i];
} }
} }
......
...@@ -153,7 +153,7 @@ void allClose(std::shared_ptr<Tensor> actual_, std::shared_ptr<Tensor> expected_ ...@@ -153,7 +153,7 @@ void allClose(std::shared_ptr<Tensor> actual_, std::shared_ptr<Tensor> expected_
for (size_t i = 0; i < total; i++) { for (size_t i = 0; i < total; i++) {
double a_ = getVal((char *)actual->data() + actual_offset, actual->ggml_type()); double a_ = getVal((char *)actual->data() + actual_offset, actual->ggml_type());
double e_ = getVal((char *)expected->data() + expected_offset, expected->ggml_type()); double e_ = getVal((char *)expected->data() + expected_offset, expected->ggml_type());
if (std::fabs(a_ - e_) > atol || std::fabs(a_ - e_) > rtol * std::fmax(std::fabs(a_), std::fabs(e_))) { if (std::fabs(a_ - e_) > atol && std::fabs(a_ - e_) > rtol * std::fmax(std::fabs(a_), std::fabs(e_))) {
if (num_failed == 0) { if (num_failed == 0) {
first_failed_msg = "First failed at index " + std::to_string(i) + " with value " + std::to_string(a_) + " but should be " + std::to_string(e_) + "."; first_failed_msg = "First failed at index " + std::to_string(i) + " with value " + std::to_string(a_) + " but should be " + std::to_string(e_) + ".";
} }
......
...@@ -18,6 +18,12 @@ def matmul( ...@@ -18,6 +18,12 @@ def matmul(
return alpha * np.matmul(a, b) + beta * c return alpha * np.matmul(a, b) + beta * c
def random_tensor(shape, dtype):
rate = 1e-3 # 目前发现如果rate=1e-2还是无法全部通过测试
var = 0.5 * rate # 这样设置可以保证采样范围在[-5e-4, 5e-4]
return rate * np.random.rand(*shape).astype(dtype) - var
class MatmulTestCase(InfiniopTestCase): class MatmulTestCase(InfiniopTestCase):
def __init__( def __init__(
self, self,
...@@ -76,25 +82,115 @@ if __name__ == "__main__": ...@@ -76,25 +82,115 @@ if __name__ == "__main__":
# a, stride_a, b, stride_b, c, stride_c, alpha, beta # a, stride_a, b, stride_b, c, stride_c, alpha, beta
test_cases = [ test_cases = [
MatmulTestCase( MatmulTestCase(
np.random.rand(4, 5).astype(np.float32), random_tensor((4, 5), np.float32),
None, None,
np.random.rand(5, 6).astype(np.float32), random_tensor((5, 6), np.float32),
None, None,
np.random.rand(4, 6).astype(np.float32), random_tensor((4, 6), np.float32),
None, None,
1.0, 1.0,
0.0, 0.0,
), ),
MatmulTestCase( MatmulTestCase(
np.random.rand(4, 5).astype(np.float32), random_tensor((4, 5), np.float32),
gguf_strides(1, 4), gguf_strides(1, 4),
np.random.rand(5, 6).astype(np.float32), random_tensor((5, 6), np.float32),
gguf_strides(1, 5), gguf_strides(1, 5),
np.random.rand(4, 6).astype(np.float32), random_tensor((4, 6), np.float32),
gguf_strides(1, 4), gguf_strides(1, 4),
1.0, 1.0,
1.0, 1.0,
), ),
MatmulTestCase(
random_tensor((4, 5), np.float16),
None,
random_tensor((5, 6), np.float16),
None,
random_tensor((4, 6), np.float16),
None,
1.0,
0.0,
),
MatmulTestCase(
random_tensor((4, 5), np.float16),
gguf_strides(1, 4),
random_tensor((5, 6), np.float16),
gguf_strides(1, 5),
random_tensor((4, 6), np.float16),
gguf_strides(1, 4),
1.0,
1.0,
),
MatmulTestCase(
random_tensor((1, 2048), np.float16),
gguf_strides(1, 2048),
random_tensor((2048, 2048), np.float16),
gguf_strides(1, 2048),
random_tensor((1, 2048), np.float16),
gguf_strides(1, 2048),
1.0,
0.0,
),
MatmulTestCase(
random_tensor((1, 2048), np.float32),
None,
random_tensor((2048, 2048), np.float32),
None,
random_tensor((1, 2048), np.float32),
None,
1.0,
0.0,
),
MatmulTestCase(
random_tensor((2, 4, 2048), np.float16),
None,
random_tensor((2, 2048, 2048), np.float16),
None,
random_tensor((2, 4, 2048), np.float16),
None,
1.0,
0.0,
),
MatmulTestCase(
random_tensor((2, 4, 2048), np.float32),
None,
random_tensor((2, 2048, 2048), np.float32),
None,
random_tensor((2, 4, 2048), np.float32),
None,
1.0,
0.0,
),
MatmulTestCase(
random_tensor((6, 2048), np.float32),
gguf_strides(1, 2048),
random_tensor((2048, 2560), np.float32),
gguf_strides(1, 2560),
random_tensor((6, 2560), np.float32),
gguf_strides(1, 2560),
1.0,
1.0,
),
MatmulTestCase(
random_tensor((4, 48, 64), np.float16),
None,
random_tensor((4, 64, 6), np.float16),
None,
random_tensor((4, 48, 6), np.float16),
None,
1.0 / 8,
1.0,
),
MatmulTestCase(
random_tensor((4, 48, 64), np.float32),
None,
random_tensor((4, 64, 6), np.float32),
None,
random_tensor((4, 48, 6), np.float32),
None,
1.0 / 8,
1.0,
),
] ]
test_writer.add_tests(test_cases) test_writer.add_tests(test_cases)
test_writer.save() test_writer.save()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment