test_sb.py 6.67 KB
Newer Older
1
2
3
4
5
6
7
8
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.

"""SuperBench CLI command and scenario tests."""

import io
import contextlib
from functools import wraps
9
from knack.testsdk import ScenarioTest, StringContainCheck, NoneCheck, JMESPathCheck
10
from pathlib import Path
11
from unittest import mock
12
13
14

import superbench
from superbench.cli import SuperBenchCLI
15
from superbench.benchmarks import BenchmarkRegistry
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54


def capture_system_exit(func):
    """Decorator to capture SystemExit in testing.

    Args:
        func (Callable): Decorated function.

    Returns:
        Callable: Decorator.
    """
    @wraps(func)
    def wrapper(self, *args, **kwargs):
        f = io.StringIO()
        with self.assertRaises(SystemExit) as cm, contextlib.redirect_stderr(f):
            func(self, *args, **kwargs)
        self.assertEqual(cm.exception.code, 2)
        self.stderr = f.getvalue()

    return wrapper


class SuperBenchCLIScenarioTest(ScenarioTest):
    """A class whose instances are CLI single test cases.

    Args:
        ScenarioTest (knack.testsdk.ScenarioTest): Test class for knack.
    """
    def __init__(self, method_name):
        """Override __init__ method for ScenarioTest.

        Args:
            method_name (str): ScenarioTest method_name.
        """
        sb_cli = SuperBenchCLI.get_cli()
        super().__init__(sb_cli, method_name)

    def test_sb_version(self):
        """Test sb version."""
55
        self.cmd('sb version', checks=[StringContainCheck(superbench.__version__)])
56

57
58
    @mock.patch('superbench.runner.SuperBenchRunner.get_failure_count')
    def test_sb_deploy(self, mocked_failure_count):
59
        """Test sb deploy."""
60
        mocked_failure_count.return_value = 0
61
62
        self.cmd('sb deploy --host-list localhost', checks=[NoneCheck()])

63
64
65
66
67
68
    @mock.patch('superbench.runner.SuperBenchRunner.get_failure_count')
    def test_sb_deploy_skippull(self, mocked_failure_count):
        """Test sb deploy without docker pull."""
        mocked_failure_count.return_value = 0
        self.cmd('sb deploy --host-list localhost --no-image-pull', checks=[NoneCheck()])

69
70
71
    def test_sb_deploy_no_host(self):
        """Test sb deploy, no host_file or host_list provided, should fail."""
        self.cmd('sb deploy', expect_failure=True)
72
73
74

    def test_sb_exec(self):
        """Test sb exec."""
75
        self.cmd('sb exec --config-override superbench.enable=["none"]', checks=[NoneCheck()])
76

77
78
    @mock.patch('superbench.runner.SuperBenchRunner.get_failure_count')
    def test_sb_run(self, mocked_failure_count):
79
        """Test sb run."""
80
        mocked_failure_count.return_value = 0
81
        self.cmd('sb run --host-list localhost --config-override superbench.enable=none', checks=[NoneCheck()])
82

83
84
    @mock.patch('superbench.runner.SuperBenchRunner.get_failure_count')
    def test_sb_run_skipdocker(self, mocked_failure_count):
85
        """Test sb run without docker."""
86
        mocked_failure_count.return_value = 0
87
88
        self.cmd('sb run -l localhost -C superbench.enable=none --no-docker', checks=[NoneCheck()])

Yifan Xiong's avatar
Yifan Xiong committed
89
90
91
92
    def test_sb_run_no_docker_auth(self):
        """Test sb run, only --docker-username argument, should fail."""
        result = self.cmd('sb run --docker-username test-user', expect_failure=True)
        self.assertEqual(result.exit_code, 1)
93
94
95
96
97
98
99
100

    def test_sb_run_no_host(self):
        """Test sb run, no --host-file or --host-list, should fail."""
        result = self.cmd('sb run --docker-image test:cuda11.1', expect_failure=True)
        self.assertEqual(result.exit_code, 1)

    def test_sb_run_nonexist_host_file(self):
        """Test sb run, --host-file does not exist, should fail."""
Yifan Xiong's avatar
Yifan Xiong committed
101
        result = self.cmd('sb run --host-file ./nonexist.yaml', expect_failure=True)
102
        self.assertEqual(result.exit_code, 1)
103

104
105
106
107
108
109
110
111
112
113
114
115
116
117
    def test_sb_benchmark_list(self):
        """Test sb benchmark list."""
        self.cmd('sb benchmark list', checks=[JMESPathCheck('length(@)', len(BenchmarkRegistry.benchmarks))])

    def test_sb_benchmark_list_nonexist(self):
        """Test sb benchmark list, give a non-exist benchmark name, should fail."""
        result = self.cmd('sb benchmark list -n non-exist-name', expect_failure=True)
        self.assertEqual(result.exit_code, 1)

    def test_sb_benchmark_list_parameters(self):
        """Test sb benchmark list-parameters."""
        self.cmd('sb benchmark list-parameters', checks=[NoneCheck()])
        self.cmd('sb benchmark list-parameters -n pytorch-[a-z]+', checks=[NoneCheck()])

118
119
120
    def test_sb_node_info(self):
        """Test sb node info, should fail."""
        self.cmd('sb node info', expect_failure=False)
121
122
123
124
125
126
127
128
129

    def test_sb_result_diagnosis(self):
        """Test sb result diagnosis."""
        test_analyzer_dir = str(Path(__file__).parent.resolve() / '../analyzer/')
        # test positive case
        self.cmd(
            'sb result diagnosis -d {dir}/test_results.jsonl -r {dir}/test_rules.yaml -b {dir}/test_baseline.json'.
            format(dir=test_analyzer_dir) + ' --output-dir outputs/test-diagnosis/'
        )
130
131
132
133
        self.cmd(
            'sb result diagnosis -d {dir}/test_results.jsonl -r {dir}/test_rules.yaml -b {dir}/test_baseline.json'.
            format(dir=test_analyzer_dir) + ' --output-dir outputs/test-diagnosis/ --output-all'
        )
134
135
136
137
138
        self.cmd(
            'sb result diagnosis -d {dir}/test_results.jsonl -r {dir}/test_rules_without_baseline.yaml'.
            format(dir=test_analyzer_dir) +
            ' --output-dir outputs/test-diagnosis/ --output-all --output-file-format json'
        )
139
140
141
142
143
144
        # test invalid output format
        self.cmd(
            'sb result diagnosis -d {dir}/test_results.jsonl -r {dir}/test_rules.yaml -b {dir}/test_baseline.json'.
            format(dir=test_analyzer_dir) + ' --output-dir outputs/test-diagnosis/ --output-file-format abb',
            expect_failure=True
        )
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163

    def test_sb_result_summary(self):
        """Test sb result summary."""
        test_analyzer_dir = str(Path(__file__).parent.resolve() / '../analyzer/')
        # test positive case
        self.cmd(
            'sb result summary -d {dir}/test_results.jsonl -r {dir}/test_summary_rules.yaml'.
            format(dir=test_analyzer_dir) + ' --output-dir /tmp/outputs/test-summary/'
        )
        self.cmd(
            'sb result summary -d {dir}/test_results.jsonl -r {dir}/test_summary_rules.yaml'.
            format(dir=test_analyzer_dir) + ' --output-dir /tmp/outputs/test-summary/ --decimal-place-value 4'
        )
        # test invalid output format
        self.cmd(
            'sb result summary -d {dir}/test_results.jsonl -r {dir}/test_rules.yaml'.format(dir=test_analyzer_dir) +
            ' --output-dir /tmp/outputs/test-summary/ --output-file-format abb',
            expect_failure=True
        )