Commit 49486f80 authored by chengshunyan's avatar chengshunyan
Browse files

add init

parents
migraphx-auto-test @ 2bd545c3
Subproject commit 2bd545c35b5674e4844acc32843213b57471835e
#!/bin/bash
# DCU Performance Analyzer 测试脚本
# 用于验证工具的功能正确性和稳定性
set -e
# 颜色定义
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# 工具路径
ANALYZER="./dist/dcu_analyzer"
PYTHON_ANALYZER="./dcu_performance_analyzer.py"
# 测试计数
TOTAL_TESTS=0
PASSED_TESTS=0
FAILED_TESTS=0
# 日志文件
TEST_LOG="test_results.log"
# 函数定义
log_info() {
echo -e "${GREEN}[INFO]${NC} $1" | tee -a "$TEST_LOG"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1" | tee -a "$TEST_LOG"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1" | tee -a "$TEST_LOG"
}
run_test() {
local test_name="$1"
local test_cmd="$2"
local expected_result="$3"
TOTAL_TESTS=$((TOTAL_TESTS + 1))
log_info "运行测试: $test_name"
echo "命令: $test_cmd" | tee -a "$TEST_LOG"
if eval "$test_cmd"; then
if [ "$expected_result" = "pass" ]; then
log_info "✓ $test_name 通过"
PASSED_TESTS=$((PASSED_TESTS + 1))
return 0
else
log_error "✗ $test_name 失败 (期望失败但通过了)"
FAILED_TESTS=$((FAILED_TESTS + 1))
return 1
fi
else
if [ "$expected_result" = "fail" ]; then
log_info "✓ $test_name 通过 (期望失败)"
PASSED_TESTS=$((PASSED_TESTS + 1))
return 0
else
log_error "✗ $test_name 失败"
FAILED_TESTS=$((FAILED_TESTS + 1))
return 1
fi
fi
}
check_file_exists() {
local file="$1"
local description="$2"
if [ -f "$file" ]; then
log_info "✓ $description 文件存在: $file"
return 0
else
log_error "✗ $description 文件不存在: $file"
return 1
fi
}
check_directory_exists() {
local dir="$1"
local description="$2"
if [ -d "$dir" ]; then
log_info "✓ $description 目录存在: $dir"
return 0
else
log_error "✗ $description 目录不存在: $dir"
return 1
fi
}
# 清理函数
cleanup() {
log_info "清理测试文件..."
rm -rf dcu_analysis_* test_output 2>/dev/null || true
}
# 主测试函数
main() {
echo "=========================================" | tee "$TEST_LOG"
echo "DCU Performance Analyzer 测试套件" | tee -a "$TEST_LOG"
echo "=========================================" | tee -a "$TEST_LOG"
echo "测试开始时间: $(date)" | tee -a "$TEST_LOG"
echo "" | tee -a "$TEST_LOG"
# 清理之前的测试文件
cleanup
# 测试1: Python版本帮助信息
run_test "Python版本帮助信息" \
"python3 $PYTHON_ANALYZER --help > /dev/null 2>&1" \
"pass"
# 测试2: Python版本版本信息
run_test "Python版本版本信息" \
"python3 $PYTHON_ANALYZER --version > /dev/null 2>&1" \
"pass"
# 测试3: 可执行文件帮助信息
if [ -f "$ANALYZER" ]; then
run_test "可执行文件帮助信息" \
"$ANALYZER --help > /dev/null 2>&1" \
"pass"
# 测试4: 可执行文件版本信息
run_test "可执行文件版本信息" \
"$ANALYZER --version > /dev/null 2>&1" \
"pass"
else
log_warning "可执行文件不存在,跳过相关测试"
fi
# 测试5: 基本功能测试 (Python版本)
run_test "基本功能测试 (Python)" \
"python3 $PYTHON_ANALYZER -c system performance -o test_output > /dev/null 2>&1" \
"pass"
# 验证输出文件
if check_directory_exists "test_output" "输出目录"; then
check_file_exists "test_output/reports/analysis_report.json" "JSON报告"
check_file_exists "test_output/reports/analysis_summary.txt" "文本摘要"
check_file_exists "test_output/data/system_info.json" "系统信息"
check_file_exists "test_output/data/performance_metrics.json" "性能指标"
check_file_exists "test_output/logs/analysis.log" "分析日志"
fi
# 测试6: 数据打包功能
if [ -d "test_output" ]; then
run_test "数据打包功能" \
"python3 $PYTHON_ANALYZER -c system -o test_package > /dev/null 2>&1 && ls test_package.tar.gz > /dev/null 2>&1" \
"pass"
fi
# 测试7: 静默模式
run_test "静默模式" \
"python3 $PYTHON_ANALYZER -c system -q -o test_quiet > /dev/null 2>&1" \
"pass"
# 测试8: 调试模式
run_test "调试模式" \
"python3 $PYTHON_ANALYZER -c system -d -o test_debug > /dev/null 2>&1" \
"pass"
# 测试9: 错误处理 (无效模块)
run_test "错误处理 (无效模块)" \
"python3 $PYTHON_ANALYZER -c invalid_module -o test_error 2>&1 | grep -q 'error'" \
"fail"
# 测试10: 单元测试
if [ -f "test_analyzer.py" ]; then
run_test "单元测试" \
"python3 -m pytest test_analyzer.py -v > /dev/null 2>&1 || python3 test_analyzer.py" \
"pass"
else
log_warning "单元测试文件不存在,跳过单元测试"
fi
# 测试11: 性能测试 (检查执行时间)
log_info "运行性能测试..."
start_time=$(date +%s)
python3 $PYTHON_ANALYZER -c system performance -o perf_test > /dev/null 2>&1
end_time=$(date +%s)
execution_time=$((end_time - start_time))
if [ $execution_time -lt 30 ]; then
log_info "✓ 性能测试通过 (执行时间: ${execution_time}秒)"
PASSED_TESTS=$((PASSED_TESTS + 1))
else
log_error "✗ 性能测试失败 (执行时间过长: ${execution_time}秒)"
FAILED_TESTS=$((FAILED_TESTS + 1))
fi
# 测试结果统计
echo "" | tee -a "$TEST_LOG"
echo "=========================================" | tee -a "$TEST_LOG"
echo "测试完成时间: $(date)" | tee -a "$TEST_LOG"
echo "" | tee -a "$TEST_LOG"
echo "测试结果统计:" | tee -a "$TEST_LOG"
echo " 总测试数: $TOTAL_TESTS" | tee -a "$TEST_LOG"
echo " 通过: $PASSED_TESTS" | tee -a "$TEST_LOG"
echo " 失败: $FAILED_TESTS" | tee -a "$TEST_LOG"
echo " 通过率: $(echo "scale=1; $PASSED_TESTS * 100 / $TOTAL_TESTS" | bc -l 2>/dev/null || echo "N/A")%" | tee -a "$TEST_LOG"
echo "=========================================" | tee -a "$TEST_LOG"
# 清理
cleanup
# 返回结果
if [ $FAILED_TESTS -eq 0 ]; then
log_info "所有测试通过!"
exit 0
else
log_error "部分测试失败!"
exit 1
fi
}
# 检查依赖
log_info "检查测试依赖..."
if ! command -v python3 &> /dev/null; then
log_error "Python3 未安装"
exit 1
fi
if ! command -v bc &> /dev/null; then
log_warning "bc 计算器未安装,无法显示精确通过率"
fi
# 运行主函数
main
\ No newline at end of file
#!/usr/bin/env python3
"""
DCU Performance Analyzer - 测试套件
用于验证工具的功能正确性和稳定性
"""
import unittest
import tempfile
import shutil
import json
import os
import sys
from unittest.mock import patch, MagicMock
# 添加父目录到路径
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from dcu_performance_analyzer import DCUPerformanceAnalyzer, CheckResult
class TestDCUPerformanceAnalyzer(unittest.TestCase):
"""DCU性能分析器测试类"""
def setUp(self):
"""测试前设置"""
self.test_dir = tempfile.mkdtemp()
self.analyzer = DCUPerformanceAnalyzer(output_dir=self.test_dir, debug=True)
def tearDown(self):
"""测试后清理"""
shutil.rmtree(self.test_dir, ignore_errors=True)
@patch('dcu_performance_analyzer.subprocess.run')
def test_run_command_success(self, mock_run):
"""测试命令执行成功"""
# 模拟成功的命令执行
mock_result = MagicMock()
mock_result.returncode = 0
mock_result.stdout = "test output"
mock_result.stderr = ""
mock_run.return_value = mock_result
returncode, stdout, stderr = self.analyzer.run_command("echo test")
self.assertEqual(returncode, 0)
self.assertEqual(stdout, "test output")
self.assertEqual(stderr, "")
@patch('dcu_performance_analyzer.subprocess.run')
def test_run_command_failure(self, mock_run):
"""测试命令执行失败"""
# 模拟失败的命令执行
mock_result = MagicMock()
mock_result.returncode = 1
mock_result.stdout = ""
mock_result.stderr = "error message"
mock_run.return_value = mock_result
returncode, stdout, stderr = self.analyzer.run_command("false")
self.assertEqual(returncode, 1)
self.assertEqual(stdout, "")
self.assertEqual(stderr, "error message")
@patch('dcu_performance_analyzer.subprocess.run')
def test_run_command_timeout(self, mock_run):
"""测试命令超时"""
# 模拟超时异常
mock_run.side_effect = subprocess.TimeoutExpired("cmd", 1)
returncode, stdout, stderr = self.analyzer.run_command("sleep 10", timeout=1)
self.assertEqual(returncode, -1)
self.assertEqual(stdout, "")
self.assertEqual(stderr, "Command timeout")
@patch('dcu_performance_analyzer.subprocess.run')
def test_check_system_info(self, mock_run):
"""测试系统信息检查"""
# 模拟系统命令输出
mock_result = MagicMock()
mock_result.returncode = 0
mock_result.stdout = "CPU info test"
mock_result.stderr = ""
mock_run.return_value = mock_result
result = self.analyzer.check_system_info()
self.assertIsInstance(result, CheckResult)
self.assertEqual(result.module, "System Information")
self.assertIn(result.status, ["PASS", "FAIL"])
self.assertIsInstance(result.execution_time, float)
self.assertGreater(result.execution_time, 0)
@patch('dcu_performance_analyzer.subprocess.run')
def test_check_pcie_devices(self, mock_run):
"""测试PCIe设备检查"""
# 模拟lspci命令输出
mock_result = MagicMock()
mock_result.returncode = 0
mock_result.stdout = "00:00.0 Host bridge: Intel test\n01:00.0 VGA compatible controller: 1d94:1234 DCU"
mock_result.stderr = ""
mock_run.return_value = mock_result
result = self.analyzer.check_pcie_devices()
self.assertIsInstance(result, CheckResult)
self.assertEqual(result.module, "PCIe Devices")
self.assertIn("dcu_devices", result.details)
self.assertGreaterEqual(result.details["dcu_devices"], 0)
@patch('dcu_performance_analyzer.subprocess.run')
def test_check_driver_status(self, mock_run):
"""测试驱动状态检查"""
# 模拟lsmod命令输出
mock_result = MagicMock()
mock_result.returncode = 0
mock_result.stdout = "hydcu 12345 1 - Live 0xffffffffa0000000"
mock_result.stderr = ""
mock_run.return_value = mock_result
result = self.analyzer.check_driver_status()
self.assertIsInstance(result, CheckResult)
self.assertEqual(result.module, "Driver Status")
self.assertIn("loaded_modules", result.details)
self.assertIsInstance(result.details["loaded_modules"], list)
@patch('dcu_performance_analyzer.subprocess.run')
def test_check_performance_metrics(self, mock_run):
"""测试性能指标检查"""
# 模拟性能命令输出
def side_effect(cmd, **kwargs):
mock_result = MagicMock()
if "top" in cmd:
mock_result.returncode = 0
mock_result.stdout = "%Cpu(s): 5.7 us"
elif "free" in cmd:
mock_result.returncode = 0
mock_result.stdout = "Mem: 1542486 54998 1487488 0 1234 23456"
elif "df" in cmd:
mock_result.returncode = 0
mock_result.stdout = "/dev/sda1 50G 45G 5G 90% /"
elif "uptime" in cmd:
mock_result.returncode = 0
mock_result.stdout = "14:30:00 up 1 day, 0:30, 1 user, load average: 2.17, 3.73, 4.72"
else:
mock_result.returncode = 0
mock_result.stdout = ""
mock_result.stderr = ""
return mock_result
mock_run.side_effect = side_effect
result = self.analyzer.check_performance_metrics()
self.assertIsInstance(result, CheckResult)
self.assertEqual(result.module, "Performance Metrics")
self.assertIn("cpu_usage", result.details)
self.assertIn("memory_usage_percent", result.details)
self.assertIn("disk_usage_percent", result.details)
self.assertIn("load_average_1m", result.details)
def test_generate_report(self):
"""测试报告生成"""
# 添加一些测试结果
test_result = CheckResult(
module="Test Module",
status="PASS",
message="Test passed",
details={"test": "data"},
timestamp="2026-02-25T12:00:00",
execution_time=1.5
)
self.analyzer.results.append(test_result)
report_file = self.analyzer.generate_report()
self.assertTrue(os.path.exists(report_file))
# 验证JSON报告格式
with open(report_file, 'r') as f:
report = json.load(f)
self.assertIn("metadata", report)
self.assertIn("summary", report)
self.assertIn("results", report)
self.assertEqual(report["summary"]["total_checks"], 1)
self.assertEqual(report["summary"]["status_distribution"]["PASS"], 1)
def test_create_package(self):
"""测试数据打包"""
# 创建一些测试文件
test_file = os.path.join(self.test_dir, "test.txt")
with open(test_file, 'w') as f:
f.write("test content")
package_file = self.analyzer.create_package()
self.assertTrue(os.path.exists(package_file))
self.assertTrue(package_file.endswith(".tar.gz"))
# 验证包内容
import tarfile
with tarfile.open(package_file, 'r:gz') as tar:
members = tar.getnames()
self.assertGreater(len(members), 0)
def test_result_to_dict(self):
"""测试结果转换"""
result = CheckResult(
module="Test",
status="PASS",
message="OK",
details={"key": "value"},
timestamp="2026-02-25T12:00:00",
execution_time=1.0
)
result_dict = self.analyzer._result_to_dict(result)
self.assertIsInstance(result_dict, dict)
self.assertEqual(result_dict["module"], "Test")
self.assertEqual(result_dict["status"], "PASS")
self.assertEqual(result_dict["message"], "OK")
self.assertEqual(result_dict["details"]["key"], "value")
self.assertEqual(result_dict["timestamp"], "2026-02-25T12:00:00")
self.assertEqual(result_dict["execution_time"], 1.0)
class TestCLI(unittest.TestCase):
"""命令行界面测试"""
@patch('dcu_performance_analyzer.DCUPerformanceAnalyzer')
def test_main_success(self, mock_analyzer_class):
"""测试主函数成功执行"""
mock_analyzer = MagicMock()
mock_analyzer.run_analysis.return_value = "test_package.tar.gz"
mock_analyzer.results = []
mock_analyzer_class.return_value = mock_analyzer
with patch('sys.argv', ['dcu_performance_analyzer.py']):
with patch('sys.exit') as mock_exit:
from dcu_performance_analyzer import main
main()
mock_exit.assert_called_with(0)
@patch('dcu_performance_analyzer.DCUPerformanceAnalyzer')
def test_main_with_checks(self, mock_analyzer_class):
"""测试指定检查模块"""
mock_analyzer = MagicMock()
mock_analyzer.run_analysis.return_value = "test_package.tar.gz"
mock_analyzer.results = []
mock_analyzer_class.return_value = mock_analyzer
with patch('sys.argv', ['dcu_performance_analyzer.py', '-c', 'system', 'pcie']):
with patch('sys.exit') as mock_exit:
from dcu_performance_analyzer import main
main()
mock_analyzer.run_analysis.assert_called_with(['system', 'pcie'])
mock_exit.assert_called_with(0)
def test_create_cli(self):
"""测试CLI创建"""
from dcu_performance_analyzer import create_cli
parser = create_cli()
# 测试基本参数解析
args = parser.parse_args(['-c', 'system', '-o', '/tmp/test', '-d'])
self.assertEqual(args.checks, ['system'])
self.assertEqual(args.output, '/tmp/test')
self.assertTrue(args.debug)
if __name__ == '__main__':
# 运行测试
unittest.main(verbosity=2)
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment